minor fix
[wolnelektury.git] / src / search / views.py
index 67c3fd4..b6e290b 100644 (file)
@@ -1,22 +1,22 @@
-# -*- coding: utf-8 -*-
 # This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
 # Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
 #
 from django.conf import settings
-from django.shortcuts import render_to_response
-from django.template import RequestContext
+from django.http.response import HttpResponseRedirect
+from django.shortcuts import render
 from django.views.decorators import cache
 from django.http import HttpResponse, JsonResponse
-from django.utils.translation import ugettext as _
 
-from catalogue.utils import split_tags
-from catalogue.models import Book
-from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook
-from search.index import Search, SearchResult
+from catalogue.models import Book, Tag
+from pdcounter.models import Author
+from picture.models import Picture
+from search.index import Search, SearchResult, PictureResult
 from suggest.forms import PublishingSuggestForm
 import re
 import json
 
+from wolnelektury.utils import re_escape
+
 
 def match_word_re(word):
     if 'sqlite' in settings.DATABASES['default']['ENGINE']:
@@ -25,11 +25,11 @@ def match_word_re(word):
         return "[[:<:]]%s[[:>:]]" % word
 
 
-query_syntax_chars = re.compile(r"[\\/*:(){}]")
+query_syntax_chars = re.compile(r"[\\/*:(){}?.[\]+]")
 
 
 def remove_query_syntax_chars(query, replace=' '):
-    return query_syntax_chars.sub(' ', query)
+    return query_syntax_chars.sub(replace, query)
 
 
 def did_you_mean(query, tokens):
@@ -59,67 +59,50 @@ def did_you_mean(query, tokens):
 
 
 @cache.never_cache
-def hint(request):
-    prefix = request.GET.get('term', '')
+def hint(request, mozhint=False, param='term'):
+    prefix = request.GET.get(param, '')
     if len(prefix) < 2:
         return JsonResponse([], safe=False)
 
-    prefix = remove_query_syntax_chars(prefix)
-
-    search = Search()
-    # tagi beda ograniczac tutaj
-    # ale tagi moga byc na ksiazce i na fragmentach
-    # jezeli tagi dot tylko ksiazki, to wazne zeby te nowe byly w tej samej ksiazce
-    # jesli zas dotycza themes, to wazne, zeby byly w tym samym fragmencie.
-
-    def is_dupe(tag):
-        if isinstance(tag, PDCounterAuthor):
-            if filter(lambda t: t.slug == tag.slug and t != tag, tags):
-                return True
-        elif isinstance(tag, PDCounterBook):
-            if filter(lambda b: b.slug == tag.slug, tags):
-                return True
-        return False
-
-    def category_name(c):
-        if c.startswith('pd_'):
-            c = c[len('pd_'):]
-        return _(c)
+    prefix = re_escape(' '.join(remove_query_syntax_chars(prefix).split()))
 
     try:
         limit = int(request.GET.get('max', ''))
     except ValueError:
-        limit = -1
+        limit = 20
     else:
         if limit < 1:
-            limit = -1
-
-    data = []
-
-    tags = search.hint_tags(prefix, pdcounter=True)
-    tags = filter(lambda t: not is_dupe(t), tags)
-    for t in tags:
-        if not limit:
-            break
-        limit -= 1
-        data.append({
-            'label': t.name,
-            'category': category_name(t.category),
-            'id': t.id,
-            'url': t.get_absolute_url()
-            })
-    if limit:
-        books = search.hint_books(prefix)
-        for b in books:
-            if not limit:
-                break
-            limit -= 1
-            data.append({
+            limit = 20
+
+    authors = Tag.objects.filter(
+        category='author', name_pl__iregex='\m' + prefix).only('name', 'id', 'slug', 'category')
+    data = [
+        {
+            'label': author.name,
+            'id': author.id,
+            'url': author.get_absolute_url(),
+        }
+        for author in authors[:limit]
+    ]
+    if len(data) < limit:
+        data += [
+            {
                 'label': b.title,
-                'category': _('book'),
+                'author': b.author_unicode(),
                 'id': b.id,
                 'url': b.get_absolute_url()
-                })
+            }
+            for b in Book.objects.filter(findable=True, title__iregex='\m' + prefix)[:limit-len(data)]
+        ]
+
+    if mozhint:
+        data = [
+            prefix,
+            [
+                item['label']
+                for item in data
+            ]
+        ]
 
     callback = request.GET.get('callback', None)
     if callback:
@@ -133,128 +116,203 @@ def hint(request):
 def main(request):
     query = request.GET.get('q', '')
 
+    format = request.GET.get('format')
+    lang = request.GET.get('lang')
+    epoch = request.GET.get('epoch')
+    kind = request.GET.get('kind')
+    genre = request.GET.get('genre')
+
     if len(query) < 2:
-        return render_to_response(
-            'catalogue/search_too_short.html', {'prefix': query},
-            context_instance=RequestContext(request))
+        return render(
+            request, 'catalogue/search_too_short.html',
+            {'prefix': query})
     elif len(query) > 256:
-        return render_to_response(
-            'catalogue/search_too_long.html', {'prefix': query}, context_instance=RequestContext(request))
+        return render(
+            request, 'catalogue/search_too_long.html',
+            {'prefix': query})
 
-    query = remove_query_syntax_chars(query)
+    query = prepare_query(query)
+    if not (format or lang or epoch or kind or genre):
+        pd_authors = search_pd_authors(query)
+    else:
+        pd_authors = []
+    if not format or format != 'obraz':
+        books = search_books(
+            query,
+            lang=lang,
+            only_audio=format=='audio',
+            only_synchro=format=='synchro',
+            epoch=epoch,
+            kind=kind,
+            genre=genre
+        )
+    else:
+        books = []
+    if (not format or format == 'obraz') and not lang:
+        pictures = search_pictures(
+            query,
+            epoch=epoch,
+            kind=kind,
+            genre=genre
+        )
+    else:
+        pictures = []
     
+    suggestion = ''
+
+    if not (books or pictures or pd_authors):
+        form = PublishingSuggestForm(initial={"books": query + ", "})
+        return render(
+            request,
+            'catalogue/search_no_hits.html',
+            {
+                'form': form,
+                'did_you_mean': suggestion
+            })
+
+    if not (books or pictures) and len(pd_authors) == 1:
+        return HttpResponseRedirect(pd_authors[0].get_absolute_url())
+
+    return render(
+        request,
+        'catalogue/search_multiple_hits.html',
+        {
+            'pd_authors': pd_authors,
+            'books': books,
+            'pictures': pictures,
+            'did_you_mean': suggestion,
+            'set': {
+                'lang': lang,
+                'format': format,
+                'epoch': epoch,
+                'kind': kind,
+                'genre': genre,
+            },
+            'tags': {
+                'epoch': Tag.objects.filter(category='epoch', for_books=True),
+                'genre': Tag.objects.filter(category='genre', for_books=True),
+                'kind': Tag.objects.filter(category='kind', for_books=True),
+            },
+        })
+
+def search_books(query, lang=None, only_audio=False, only_synchro=False, epoch=None, kind=None, genre=None):
     search = Search()
+    results_parts = []
+    search_fields = []
+    words = query.split()
+    fieldsets = (
+        (['authors', 'authors_nonstem'], True),
+        (['title', 'title_nonstem'], True),
+        (['metadata', 'metadata_nonstem'], True),
+        (['text', 'text_nonstem', 'themes_pl', 'themes_pl_nonstem'], False),
+    )
+    for fields, is_book in fieldsets:
+        search_fields += fields
+        results_parts.append(search.search_words(words, search_fields, required=fields, book=is_book))
+    results = []
+    ids_results = {}
+    for results_part in results_parts:
+        for result in sorted(SearchResult.aggregate(results_part), reverse=True):
+            book_id = result.book_id
+            if book_id in ids_results:
+                ids_results[book_id].merge(result)
+            else:
+                results.append(result)
+                ids_results[book_id] = result
+    descendant_ids = set(
+        Book.objects.filter(id__in=ids_results, ancestor__in=ids_results).values_list('id', flat=True))
+    results = [result for result in results if result.book_id not in descendant_ids]
+    for result in results:
+        search.get_snippets(result, query, num=3)
+
+    def ensure_exists(r):
+        try:
+            if not r.book:
+                return False
+        except Book.DoesNotExist:
+            return False
 
-    theme_terms = search.index.analyze(text=query, field="themes_pl") \
-        + search.index.analyze(text=query, field="themes")
-
-    # change hints
-    tags = search.hint_tags(query, pdcounter=True, prefix=False)
-    tags = split_tags(tags)
-
-    author_results = search.search_phrase(query, 'authors', book=True)
-    translator_results = search.search_phrase(query, 'translators', book=True)
-
-    title_results = search.search_phrase(query, 'title', book=True)
-
-    # Boost main author/title results with mixed search, and save some of its results for end of list.
-    # boost author, title results
-    author_title_mixed = search.search_some(query, ['authors', 'translators', 'title', 'tags'], query_terms=theme_terms)
-    author_title_rest = []
-
-    for b in author_title_mixed:
-        also_in_mixed = filter(lambda ba: ba.book_id == b.book_id, author_results + translator_results + title_results)
-        for b2 in also_in_mixed:
-            b2.boost *= 1.1
-        if also_in_mixed is []:
-            author_title_rest.append(b)
-
-    # Do a phrase search but a term search as well - this can give us better snippets then search_everywhere,
-    # Because the query is using only one field.
-    text_phrase = SearchResult.aggregate(
-        search.search_phrase(query, 'text', snippets=True, book=False),
-        search.search_some(query, ['text'], snippets=True, book=False, query_terms=theme_terms))
-
-    everywhere = search.search_everywhere(query, query_terms=theme_terms)
-
-    def already_found(results):
-        def f(e):
-            for r in results:
-                if e.book_id == r.book_id:
-                    e.boost = 0.9
-                    results.append(e)
-                    return True
+        if lang and r.book.language != lang:
+            return False
+        if only_audio and not r.book.has_mp3_file():
+            return False
+        if only_synchro and not r.book.has_daisy_file():
+            return False
+        if epoch and not r.book.tags.filter(category='epoch', slug=epoch).exists():
+            return False
+        if kind and not r.book.tags.filter(category='kind', slug=kind).exists():
+            return False
+        if genre and not r.book.tags.filter(category='genre', slug=genre).exists():
             return False
-        return f
-    f = already_found(author_results + translator_results + title_results + text_phrase)
-    everywhere = filter(lambda x: not f(x), everywhere)
 
-    author_results = SearchResult.aggregate(author_results)
-    translator_results = SearchResult.aggregate(translator_results)
-    title_results = SearchResult.aggregate(title_results)
+        return True
 
-    everywhere = SearchResult.aggregate(everywhere, author_title_rest)
+    results = [r for r in results if ensure_exists(r)]
+    return results
 
-    for field, res in [('authors', author_results),
-                       ('translators', translator_results),
-                       ('title', title_results),
-                       ('text', text_phrase),
-                       ('text', everywhere)]:
-        res.sort(reverse=True)
-        for r in res:
-            search.get_snippets(r, query, field, 3)
 
-    suggestion = u''
+def search_pictures(query, epoch=None, kind=None, genre=None):
+    search = Search()
+    results_parts = []
+    search_fields = []
+    words = query.split()
+    fieldsets = (
+        (['authors', 'authors_nonstem'], True),
+        (['title', 'title_nonstem'], True),
+        (['metadata', 'metadata_nonstem'], True),
+        (['themes_pl', 'themes_pl_nonstem'], False),
+    )
+    for fields, is_book in fieldsets:
+        search_fields += fields
+        results_parts.append(search.search_words(words, search_fields, required=fields, book=is_book, picture=True))
+    results = []
+    ids_results = {}
+    for results_part in results_parts:
+        for result in sorted(PictureResult.aggregate(results_part), reverse=True):
+            picture_id = result.picture_id
+            if picture_id in ids_results:
+                ids_results[picture_id].merge(result)
+            else:
+                results.append(result)
+                ids_results[picture_id] = result
 
     def ensure_exists(r):
         try:
-            return r.book
-        except Book.DoesNotExist:
+            if not r.picture:
+                return False
+        except Picture.DoesNotExist:
             return False
 
-    author_results = filter(ensure_exists, author_results)
-    translator_results = filter(ensure_exists, translator_results)
-    title_results = filter(ensure_exists, title_results)
-    text_phrase = filter(ensure_exists, text_phrase)
-    everywhere = filter(ensure_exists, everywhere)
-
-    results = author_results + translator_results + title_results + text_phrase + everywhere
-    # ensure books do exists & sort them
-    for res in (author_results, translator_results, title_results, text_phrase, everywhere):
-        res.sort(reverse=True)
-
-    # We don't want to redirect to book text, but rather display result page even with one result.
-    # if len(results) == 1:
-    #     fragment_hits = filter(lambda h: 'fragment' in h, results[0].hits)
-    #     if len(fragment_hits) == 1:
-    #         #anchor = fragment_hits[0]['fragment']
-    #         #frag = Fragment.objects.get(anchor=anchor)
-    #         return HttpResponseRedirect(fragment_hits[0]['fragment'].get_absolute_url())
-    #     return HttpResponseRedirect(results[0].book.get_absolute_url())
-    if len(results) == 0:
-        form = PublishingSuggestForm(initial={"books": query + ", "})
-        return render_to_response(
-            'catalogue/search_no_hits.html',
-            {
-                'tags': tags,
-                'prefix': query,
-                'form': form,
-                'did_you_mean': suggestion
-            },
-            context_instance=RequestContext(request))
+        if epoch and not r.picture.tags.filter(category='epoch', slug=epoch).exists():
+            return False
+        if kind and not r.picture.tags.filter(category='kind', slug=kind).exists():
+            return False
+        if genre and not r.picture.tags.filter(category='genre', slug=genre).exists():
+            return False
 
-    return render_to_response(
-        'catalogue/search_multiple_hits.html',
-        {
-            'tags': tags,
-            'prefix': query,
-            'results': {
-                'author': author_results,
-                'translator': translator_results,
-                'title': title_results,
-                'content': text_phrase,
-                'other': everywhere
-            },
-            'did_you_mean': suggestion
-        },
-        context_instance=RequestContext(request))
+        return True
+
+    results = [r for r in results if ensure_exists(r)]
+    return results
+
+
+def search_pd_authors(query):
+    pd_authors = Author.objects.filter(name__icontains=query)
+    existing_slugs = Tag.objects.filter(
+        category='author', slug__in=list(pd_authors.values_list('slug', flat=True))) \
+        .values_list('slug', flat=True)
+    pd_authors = pd_authors.exclude(slug__in=existing_slugs)
+    return pd_authors
+
+
+def prepare_query(query):
+    query = ' '.join(query.split())
+    # filter out private use characters
+    import unicodedata
+    query = ''.join(ch for ch in query if unicodedata.category(ch) != 'Co')
+    query = remove_query_syntax_chars(query)
+
+    words = query.split()
+    if len(words) > 10:
+        query = ' '.join(words[:10])
+    return query