Missing logos.
[wolnelektury.git] / src / search / views.py
index 67c3fd4..2f9adab 100644 (file)
-# -*- coding: utf-8 -*-
-# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
-# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
+# This file is part of Wolne Lektury, licensed under GNU Affero GPLv3 or later.
+# Copyright © Fundacja Wolne Lektury. See NOTICE for more information.
 #
 from django.conf import settings
-from django.shortcuts import render_to_response
-from django.template import RequestContext
+from django.shortcuts import render
 from django.views.decorators import cache
 from django.http import HttpResponse, JsonResponse
-from django.utils.translation import ugettext as _
+from sorl.thumbnail import get_thumbnail
 
-from catalogue.utils import split_tags
-from catalogue.models import Book
-from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook
-from search.index import Search, SearchResult
-from suggest.forms import PublishingSuggestForm
+import catalogue.models
+import infopages.models
+import picture.models
+from .forms import SearchFilters
 import re
 import json
 
+from wolnelektury.utils import re_escape
 
-def match_word_re(word):
-    if 'sqlite' in settings.DATABASES['default']['ENGINE']:
-        return r"\b%s\b" % word
-    elif 'mysql' in settings.DATABASES['default']['ENGINE']:
-        return "[[:<:]]%s[[:>:]]" % word
 
-
-query_syntax_chars = re.compile(r"[\\/*:(){}]")
+query_syntax_chars = re.compile(r"[\\/*:(){}?.[\]+]")
 
 
 def remove_query_syntax_chars(query, replace=' '):
-    return query_syntax_chars.sub(' ', query)
-
-
-def did_you_mean(query, tokens):
-    return query
-    # change = {}
-    # for t in tokens:
-    #     authors = Tag.objects.filter(category='author', name__iregex=match_word_re(t))
-    #     if len(authors) > 0:
-    #         continue
-
-    #     if False:
-    #         if not dictionary.check(t):
-    #             try:
-    #                 change_to = dictionary.suggest(t)[0].lower()
-    #                 if change_to != t.lower():
-    #                     change[t] = change_to
-    #             except IndexError:
-    #                 pass
-
-    # if change == {}:
-    #     return None
-
-    # for frm, to in change.items():
-    #     query = query.replace(frm, to)
-
-    # return query
+    return query_syntax_chars.sub(replace, query)
 
 
 @cache.never_cache
-def hint(request):
-    prefix = request.GET.get('term', '')
+def hint(request, mozhint=False, param='term'):
+    prefix = request.GET.get(param, '')
     if len(prefix) < 2:
         return JsonResponse([], safe=False)
 
-    prefix = remove_query_syntax_chars(prefix)
-
-    search = Search()
-    # tagi beda ograniczac tutaj
-    # ale tagi moga byc na ksiazce i na fragmentach
-    # jezeli tagi dot tylko ksiazki, to wazne zeby te nowe byly w tej samej ksiazce
-    # jesli zas dotycza themes, to wazne, zeby byly w tym samym fragmencie.
-
-    def is_dupe(tag):
-        if isinstance(tag, PDCounterAuthor):
-            if filter(lambda t: t.slug == tag.slug and t != tag, tags):
-                return True
-        elif isinstance(tag, PDCounterBook):
-            if filter(lambda b: b.slug == tag.slug, tags):
-                return True
-        return False
-
-    def category_name(c):
-        if c.startswith('pd_'):
-            c = c[len('pd_'):]
-        return _(c)
+    prefix = re_escape(' '.join(remove_query_syntax_chars(prefix).split()))
 
     try:
         limit = int(request.GET.get('max', ''))
     except ValueError:
-        limit = -1
+        limit = 20
     else:
         if limit < 1:
-            limit = -1
+            limit = 20
 
     data = []
-
-    tags = search.hint_tags(prefix, pdcounter=True)
-    tags = filter(lambda t: not is_dupe(t), tags)
-    for t in tags:
-        if not limit:
-            break
-        limit -= 1
-        data.append({
-            'label': t.name,
-            'category': category_name(t.category),
-            'id': t.id,
-            'url': t.get_absolute_url()
-            })
-    if limit:
-        books = search.hint_books(prefix)
-        for b in books:
-            if not limit:
-                break
-            limit -= 1
-            data.append({
-                'label': b.title,
-                'category': _('book'),
-                'id': b.id,
-                'url': b.get_absolute_url()
-                })
+    if len(data) < limit:
+        authors = catalogue.models.Tag.objects.filter(
+            category='author', name_pl__iregex='\m' + prefix).only('name', 'id', 'slug', 'category')
+        data.extend([
+            {
+                'type': 'author',
+                'label': author.name,
+                'url': author.get_absolute_gallery_url() if author.for_pictures else author.get_absolute_url(),
+                'img': get_thumbnail(author.photo, '72x72', crop='top').url if author.photo else '',
+            }
+            for author in authors[:limit - len(data)]
+        ])
+    if request.user.is_authenticated and len(data) < limit:
+        tags = catalogue.models.Tag.objects.filter(
+            category='set', user=request.user, name_pl__iregex='\m' + prefix).only('name', 'id', 'slug', 'category')
+        data.extend([
+            {
+                'type': 'set',
+                'label': tag.name,
+                'url': tag.get_absolute_url(),
+            }
+            for tag in tags[:limit - len(data)]
+        ])
+    if len(data) < limit:
+        tags = catalogue.models.Tag.objects.filter(
+            category__in=('theme', 'genre', 'epoch', 'kind'), name_pl__iregex='\m' + prefix).only('name', 'id', 'slug', 'category')
+        data.extend([
+            {
+                'type': tag.category,
+                'label': tag.name,
+                'url': tag.get_absolute_gallery_url() if tag.for_pictures else tag.get_absolute_url(),
+            }
+            for tag in tags[:limit - len(data)]
+        ])
+    if len(data) < limit:
+        collections = catalogue.models.Collection.objects.filter(
+            title_pl__iregex='\m' + prefix).only('title', 'slug')
+        data.extend([
+            {
+                'type': 'collection',
+                'label': collection.title,
+                'url': collection.get_absolute_url(),
+            }
+            for collection in collections[:limit - len(data)]
+        ])
+    if len(data) < limit:
+        for b in catalogue.models.Book.objects.filter(findable=True, title__iregex='\m' + prefix)[:limit-len(data)]:
+            author_str = b.author_unicode()
+            translator = b.translator()
+            if translator:
+                author_str += ' (tłum. ' + translator + ')'
+            data.append(
+                {
+                    'type': 'book',
+                    'label': b.title,
+                    'author': author_str,
+                    'url': b.get_absolute_url(),
+                    'img': get_thumbnail(b.cover_clean, '72x72').url if b.cover_clean else '',
+                }
+            )
+    if len(data) < limit:
+        arts = picture.models.Picture.objects.filter(
+            title__iregex='\m' + prefix).only('title', 'id', 'slug') # img?
+        data.extend([
+            {
+                'type': 'art',
+                'label': art.title,
+                'author': art.author_unicode(),
+                'url': art.get_absolute_url(),
+                'img': get_thumbnail(art.image_file, '72x72').url if art.image_file else '',
+            }
+            for art in arts[:limit - len(data)]
+        ])
+    if len(data) < limit:
+        infos = infopages.models.InfoPage.objects.filter(
+            title_pl__iregex='\m' + prefix).only('title', 'id', 'slug')
+        data.extend([
+            {
+                'type': 'info',
+                'label': info.title,
+                'url': info.get_absolute_url(),
+            }
+            for info in infos[:limit - len(data)]
+        ])
+
+    if mozhint:
+        data = [
+            prefix,
+            [
+                item['label']
+                for item in data
+            ],
+            [
+                item.get('author', '')
+                for item in data
+            ],
+            [
+                item['url']
+                for item in data
+            ]
+        ]
 
     callback = request.GET.get('callback', None)
     if callback:
@@ -129,132 +151,19 @@ def hint(request):
         return JsonResponse(data, safe=False)
 
 
-@cache.never_cache
-def main(request):
-    query = request.GET.get('q', '')
-
-    if len(query) < 2:
-        return render_to_response(
-            'catalogue/search_too_short.html', {'prefix': query},
-            context_instance=RequestContext(request))
-    elif len(query) > 256:
-        return render_to_response(
-            'catalogue/search_too_long.html', {'prefix': query}, context_instance=RequestContext(request))
-
-    query = remove_query_syntax_chars(query)
-    
-    search = Search()
 
-    theme_terms = search.index.analyze(text=query, field="themes_pl") \
-        + search.index.analyze(text=query, field="themes")
-
-    # change hints
-    tags = search.hint_tags(query, pdcounter=True, prefix=False)
-    tags = split_tags(tags)
-
-    author_results = search.search_phrase(query, 'authors', book=True)
-    translator_results = search.search_phrase(query, 'translators', book=True)
-
-    title_results = search.search_phrase(query, 'title', book=True)
-
-    # Boost main author/title results with mixed search, and save some of its results for end of list.
-    # boost author, title results
-    author_title_mixed = search.search_some(query, ['authors', 'translators', 'title', 'tags'], query_terms=theme_terms)
-    author_title_rest = []
-
-    for b in author_title_mixed:
-        also_in_mixed = filter(lambda ba: ba.book_id == b.book_id, author_results + translator_results + title_results)
-        for b2 in also_in_mixed:
-            b2.boost *= 1.1
-        if also_in_mixed is []:
-            author_title_rest.append(b)
-
-    # Do a phrase search but a term search as well - this can give us better snippets then search_everywhere,
-    # Because the query is using only one field.
-    text_phrase = SearchResult.aggregate(
-        search.search_phrase(query, 'text', snippets=True, book=False),
-        search.search_some(query, ['text'], snippets=True, book=False, query_terms=theme_terms))
-
-    everywhere = search.search_everywhere(query, query_terms=theme_terms)
-
-    def already_found(results):
-        def f(e):
-            for r in results:
-                if e.book_id == r.book_id:
-                    e.boost = 0.9
-                    results.append(e)
-                    return True
-            return False
-        return f
-    f = already_found(author_results + translator_results + title_results + text_phrase)
-    everywhere = filter(lambda x: not f(x), everywhere)
-
-    author_results = SearchResult.aggregate(author_results)
-    translator_results = SearchResult.aggregate(translator_results)
-    title_results = SearchResult.aggregate(title_results)
-
-    everywhere = SearchResult.aggregate(everywhere, author_title_rest)
-
-    for field, res in [('authors', author_results),
-                       ('translators', translator_results),
-                       ('title', title_results),
-                       ('text', text_phrase),
-                       ('text', everywhere)]:
-        res.sort(reverse=True)
-        for r in res:
-            search.get_snippets(r, query, field, 3)
-
-    suggestion = u''
-
-    def ensure_exists(r):
-        try:
-            return r.book
-        except Book.DoesNotExist:
-            return False
-
-    author_results = filter(ensure_exists, author_results)
-    translator_results = filter(ensure_exists, translator_results)
-    title_results = filter(ensure_exists, title_results)
-    text_phrase = filter(ensure_exists, text_phrase)
-    everywhere = filter(ensure_exists, everywhere)
-
-    results = author_results + translator_results + title_results + text_phrase + everywhere
-    # ensure books do exists & sort them
-    for res in (author_results, translator_results, title_results, text_phrase, everywhere):
-        res.sort(reverse=True)
-
-    # We don't want to redirect to book text, but rather display result page even with one result.
-    # if len(results) == 1:
-    #     fragment_hits = filter(lambda h: 'fragment' in h, results[0].hits)
-    #     if len(fragment_hits) == 1:
-    #         #anchor = fragment_hits[0]['fragment']
-    #         #frag = Fragment.objects.get(anchor=anchor)
-    #         return HttpResponseRedirect(fragment_hits[0]['fragment'].get_absolute_url())
-    #     return HttpResponseRedirect(results[0].book.get_absolute_url())
-    if len(results) == 0:
-        form = PublishingSuggestForm(initial={"books": query + ", "})
-        return render_to_response(
-            'catalogue/search_no_hits.html',
-            {
-                'tags': tags,
-                'prefix': query,
-                'form': form,
-                'did_you_mean': suggestion
-            },
-            context_instance=RequestContext(request))
-
-    return render_to_response(
-        'catalogue/search_multiple_hits.html',
-        {
-            'tags': tags,
-            'prefix': query,
-            'results': {
-                'author': author_results,
-                'translator': translator_results,
-                'title': title_results,
-                'content': text_phrase,
-                'other': everywhere
-            },
-            'did_you_mean': suggestion
-        },
-        context_instance=RequestContext(request))
+@cache.never_cache
+def search(request):
+    filters = SearchFilters(request.GET)
+    ctx = {
+        'title': 'Wynik wyszukiwania',
+        'query': request.GET.get('q', ''),
+        'filters': filters,
+    }
+    if filters.is_valid():
+        ctx['results'] = filters.results()
+        for k, v in ctx['results'].items():
+            if v:
+                ctx['hasresults'] = True
+                break
+    return render(request, 'search/results.html', ctx)