Last Piston view replaced using OAuthlib.
[wolnelektury.git] / src / search / views.py
index 5b65a30..39284c7 100644 (file)
@@ -3,20 +3,22 @@
 # Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
 #
 from django.conf import settings
+from django.http.response import HttpResponseRedirect
 from django.shortcuts import render_to_response
 from django.template import RequestContext
 from django.views.decorators import cache
 from django.http import HttpResponse, JsonResponse
-from django.utils.translation import ugettext as _
 
-from catalogue.utils import split_tags
-from catalogue.models import Book
-from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook
-from search.index import Search, SearchResult
+from catalogue.models import Book, Tag
+from pdcounter.models import Author
+from picture.models import Picture
+from search.index import Search, SearchResult, PictureResult
 from suggest.forms import PublishingSuggestForm
 import re
 import json
 
+from wolnelektury.utils import re_escape
+
 
 def match_word_re(word):
     if 'sqlite' in settings.DATABASES['default']['ENGINE']:
@@ -25,11 +27,11 @@ def match_word_re(word):
         return "[[:<:]]%s[[:>:]]" % word
 
 
-query_syntax_chars = re.compile(r"[\\/*:(){}]")
+query_syntax_chars = re.compile(r"[\\/*:(){}?.[\]+]")
 
 
 def remove_query_syntax_chars(query, replace=' '):
-    return query_syntax_chars.sub(' ', query)
+    return query_syntax_chars.sub(replace, query)
 
 
 def did_you_mean(query, tokens):
@@ -64,63 +66,36 @@ def hint(request):
     if len(prefix) < 2:
         return JsonResponse([], safe=False)
 
-    prefix = remove_query_syntax_chars(prefix)
-
-    search = Search()
-    # tagi beda ograniczac tutaj
-    # ale tagi moga byc na ksiazce i na fragmentach
-    # jezeli tagi dot tylko ksiazki, to wazne zeby te nowe byly w tej samej ksiazce
-    # jesli zas dotycza themes, to wazne, zeby byly w tym samym fragmencie.
-
-    def is_dupe(tag):
-        if isinstance(tag, PDCounterAuthor):
-            if filter(lambda t: t.slug == tag.slug and t != tag, tags):
-                return True
-        elif isinstance(tag, PDCounterBook):
-            if filter(lambda b: b.slug == tag.slug, tags):
-                return True
-        return False
-
-    def category_name(c):
-        if c.startswith('pd_'):
-            c = c[len('pd_'):]
-        return _(c)
+    prefix = re_escape(' '.join(remove_query_syntax_chars(prefix).split()))
 
     try:
         limit = int(request.GET.get('max', ''))
     except ValueError:
-        limit = -1
+        limit = 20
     else:
         if limit < 1:
-            limit = -1
-
-    data = []
-
-    tags = search.hint_tags(prefix, pdcounter=True)
-    tags = filter(lambda t: not is_dupe(t), tags)
-    for t in tags:
-        if not limit:
-            break
-        limit -= 1
-        data.append({
-            'label': t.name,
-            'category': category_name(t.category),
-            'id': t.id,
-            'url': t.get_absolute_url()
-            })
-    if limit:
-        books = search.hint_books(prefix)
-        for b in books:
-            if not limit:
-                break
-            limit -= 1
-            data.append({
+            limit = 20
+
+    authors = Tag.objects.filter(
+        category='author', name_pl__iregex='\m' + prefix).only('name', 'id', 'slug', 'category')
+    data = [
+        {
+            'label': author.name,
+            'id': author.id,
+            'url': author.get_absolute_url(),
+        }
+        for author in authors[:limit]
+    ]
+    if len(data) < limit:
+        data += [
+            {
                 'label': b.title,
-                'category': _('book'),
+                'author': b.author_unicode(),
                 'id': b.id,
                 'url': b.get_absolute_url()
-                })
-
+            }
+            for b in Book.objects.filter(title__iregex='\m' + prefix)[:limit-len(data)]
+        ]
     callback = request.GET.get('callback', None)
     if callback:
         return HttpResponse("%s(%s);" % (callback, json.dumps(data)),
@@ -132,11 +107,6 @@ def hint(request):
 @cache.never_cache
 def main(request):
     query = request.GET.get('q', '')
-    query = ' '.join(query.split())
-    # filter out private use characters
-    import unicodedata
-    query = ''.join(ch for ch in query if unicodedata.category(ch) != 'Co')
-
     if len(query) < 2:
         return render_to_response(
             'catalogue/search_too_short.html', {'prefix': query},
@@ -145,120 +115,128 @@ def main(request):
         return render_to_response(
             'catalogue/search_too_long.html', {'prefix': query}, context_instance=RequestContext(request))
 
-    query = remove_query_syntax_chars(query)
-    
-    search = Search()
-
-    theme_terms = search.index.analyze(text=query, field="themes_pl") \
-        + search.index.analyze(text=query, field="themes")
-
-    # change hints
-    tags = search.hint_tags(query, pdcounter=True, prefix=False)
-    tags = split_tags(tags)
-
-    author_results = search.search_phrase(query, 'authors', book=True)
-    translator_results = search.search_phrase(query, 'translators', book=True)
-
-    title_results = search.search_phrase(query, 'title', book=True)
-
-    # Boost main author/title results with mixed search, and save some of its results for end of list.
-    # boost author, title results
-    author_title_mixed = search.search_some(query, ['authors', 'translators', 'title', 'tags'], query_terms=theme_terms)
-    author_title_rest = []
-
-    for b in author_title_mixed:
-        also_in_mixed = filter(lambda ba: ba.book_id == b.book_id, author_results + translator_results + title_results)
-        for b2 in also_in_mixed:
-            b2.boost *= 1.1
-        if also_in_mixed is []:
-            author_title_rest.append(b)
-
-    # Do a phrase search but a term search as well - this can give us better snippets then search_everywhere,
-    # Because the query is using only one field.
-    text_phrase = SearchResult.aggregate(
-        search.search_phrase(query, 'text', snippets=True, book=False),
-        search.search_some(query, ['text'], snippets=True, book=False, query_terms=theme_terms))
-
-    everywhere = search.search_everywhere(query, query_terms=theme_terms)
-
-    def already_found(results):
-        def f(e):
-            for r in results:
-                if e.book_id == r.book_id:
-                    e.boost = 0.9
-                    results.append(e)
-                    return True
-            return False
-        return f
-    f = already_found(author_results + translator_results + title_results + text_phrase)
-    everywhere = filter(lambda x: not f(x), everywhere)
-
-    author_results = SearchResult.aggregate(author_results)
-    translator_results = SearchResult.aggregate(translator_results)
-    title_results = SearchResult.aggregate(title_results)
-
-    everywhere = SearchResult.aggregate(everywhere, author_title_rest)
-
-    for field, res in [('authors', author_results),
-                       ('translators', translator_results),
-                       ('title', title_results),
-                       ('text', text_phrase),
-                       ('text', everywhere)]:
-        res.sort(reverse=True)
-        for r in res:
-            search.get_snippets(r, query, field, 3)
-
+    query = prepare_query(query)
+    pd_authors = search_pd_authors(query)
+    books = search_books(query)
+    pictures = search_pictures(query)
     suggestion = u''
 
-    def ensure_exists(r):
-        try:
-            return r.book
-        except Book.DoesNotExist:
-            return False
-
-    author_results = filter(ensure_exists, author_results)
-    translator_results = filter(ensure_exists, translator_results)
-    title_results = filter(ensure_exists, title_results)
-    text_phrase = filter(ensure_exists, text_phrase)
-    everywhere = filter(ensure_exists, everywhere)
-
-    results = author_results + translator_results + title_results + text_phrase + everywhere
-    # ensure books do exists & sort them
-    for res in (author_results, translator_results, title_results, text_phrase, everywhere):
-        res.sort(reverse=True)
-
-    # We don't want to redirect to book text, but rather display result page even with one result.
-    # if len(results) == 1:
-    #     fragment_hits = filter(lambda h: 'fragment' in h, results[0].hits)
-    #     if len(fragment_hits) == 1:
-    #         #anchor = fragment_hits[0]['fragment']
-    #         #frag = Fragment.objects.get(anchor=anchor)
-    #         return HttpResponseRedirect(fragment_hits[0]['fragment'].get_absolute_url())
-    #     return HttpResponseRedirect(results[0].book.get_absolute_url())
-    if len(results) == 0:
+    if not (books or pictures or pd_authors):
         form = PublishingSuggestForm(initial={"books": query + ", "})
         return render_to_response(
             'catalogue/search_no_hits.html',
             {
-                'tags': tags,
-                'prefix': query,
                 'form': form,
                 'did_you_mean': suggestion
             },
             context_instance=RequestContext(request))
 
+    if not (books or pictures) and len(pd_authors) == 1:
+        return HttpResponseRedirect(pd_authors[0].get_absolute_url())
+
     return render_to_response(
         'catalogue/search_multiple_hits.html',
         {
-            'tags': tags,
-            'prefix': query,
-            'results': {
-                'author': author_results,
-                'translator': translator_results,
-                'title': title_results,
-                'content': text_phrase,
-                'other': everywhere
-            },
+            'pd_authors': pd_authors,
+            'books': books,
+            'pictures': pictures,
             'did_you_mean': suggestion
         },
         context_instance=RequestContext(request))
+
+
+def search_books(query):
+    search = Search()
+    results_parts = []
+    search_fields = []
+    words = query.split()
+    fieldsets = (
+        (['authors', 'authors_nonstem'], True),
+        (['title', 'title_nonstem'], True),
+        (['metadata', 'metadata_nonstem'], True),
+        (['text', 'text_nonstem', 'themes_pl', 'themes_pl_nonstem'], False),
+    )
+    for fields, is_book in fieldsets:
+        search_fields += fields
+        results_parts.append(search.search_words(words, search_fields, required=fields, book=is_book))
+    results = []
+    ids_results = {}
+    for results_part in results_parts:
+        for result in sorted(SearchResult.aggregate(results_part), reverse=True):
+            book_id = result.book_id
+            if book_id in ids_results:
+                ids_results[book_id].merge(result)
+            else:
+                results.append(result)
+                ids_results[book_id] = result
+    descendant_ids = set(
+        Book.objects.filter(id__in=ids_results, ancestor__in=ids_results).values_list('id', flat=True))
+    results = [result for result in results if result.book_id not in descendant_ids]
+    for result in results:
+        search.get_snippets(result, query, num=3)
+
+    def ensure_exists(r):
+        try:
+            return r.book
+        except Book.DoesNotExist:
+            return False
+
+    results = filter(ensure_exists, results)
+    return results
+
+
+def search_pictures(query):
+    search = Search()
+    results_parts = []
+    search_fields = []
+    words = query.split()
+    fieldsets = (
+        (['authors', 'authors_nonstem'], True),
+        (['title', 'title_nonstem'], True),
+        (['metadata', 'metadata_nonstem'], True),
+        (['themes_pl', 'themes_pl_nonstem'], False),
+    )
+    for fields, is_book in fieldsets:
+        search_fields += fields
+        results_parts.append(search.search_words(words, search_fields, required=fields, book=is_book, picture=True))
+    results = []
+    ids_results = {}
+    for results_part in results_parts:
+        for result in sorted(PictureResult.aggregate(results_part), reverse=True):
+            picture_id = result.picture_id
+            if picture_id in ids_results:
+                ids_results[picture_id].merge(result)
+            else:
+                results.append(result)
+                ids_results[picture_id] = result
+
+    def ensure_exists(r):
+        try:
+            return r.picture
+        except Picture.DoesNotExist:
+            return False
+
+    results = filter(ensure_exists, results)
+    return results
+
+
+def search_pd_authors(query):
+    pd_authors = Author.objects.filter(name__icontains=query)
+    existing_slugs = Tag.objects.filter(
+        category='author', slug__in=list(pd_authors.values_list('slug', flat=True))) \
+        .values_list('slug', flat=True)
+    pd_authors = pd_authors.exclude(slug__in=existing_slugs)
+    return pd_authors
+
+
+def prepare_query(query):
+    query = ' '.join(query.split())
+    # filter out private use characters
+    import unicodedata
+    query = ''.join(ch for ch in query if unicodedata.category(ch) != 'Co')
+    query = remove_query_syntax_chars(query)
+
+    words = query.split()
+    if len(words) > 10:
+        query = ' '.join(words[:10])
+    return query