Merge branch 'master' into sunburnt
[wolnelektury.git] / apps / search / views.py
index ec8275b..72852d0 100644 (file)
@@ -12,7 +12,7 @@ from catalogue.utils import split_tags
 from catalogue.models import Book, Tag, Fragment
 from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook
 from catalogue.views import JSONResponse
 from catalogue.models import Book, Tag, Fragment
 from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook
 from catalogue.views import JSONResponse
-from search import Search, JVM, SearchResult
+from search import Search, SearchResult
 from lucene import StringReader
 from suggest.forms import PublishingSuggestForm
 from time import sleep
 from lucene import StringReader
 from suggest.forms import PublishingSuggestForm
 from time import sleep
@@ -20,8 +20,6 @@ import re
 #import enchant
 import json
 
 #import enchant
 import json
 
-#dictionary = enchant.Dict('en_US')
-
 
 def match_word_re(word):
     if 'sqlite' in settings.DATABASES['default']['ENGINE']:
 
 def match_word_re(word):
     if 'sqlite' in settings.DATABASES['default']['ENGINE']:
@@ -31,60 +29,37 @@ def match_word_re(word):
 
 
 def did_you_mean(query, tokens):
 
 
 def did_you_mean(query, tokens):
-    change = {}
-    for t in tokens:
-        authors = Tag.objects.filter(category='author', name__iregex=match_word_re(t))
-        if len(authors) > 0:
-            continue
-
-        if False:
-            if not dictionary.check(t):
-                try:
-                    change_to = dictionary.suggest(t)[0].lower()
-                    if change_to != t.lower():
-                        change[t] = change_to
-                except IndexError:
-                    pass
-
-    if change == {}:
-        return None
-
-    for frm, to in change.items():
-        query = query.replace(frm, to)
-
     return query
     return query
+    # change = {}
+    # for t in tokens:
+    #     authors = Tag.objects.filter(category='author', name__iregex=match_word_re(t))
+    #     if len(authors) > 0:
+    #         continue
 
 
+    #     if False:
+    #         if not dictionary.check(t):
+    #             try:
+    #                 change_to = dictionary.suggest(t)[0].lower()
+    #                 if change_to != t.lower():
+    #                     change[t] = change_to
+    #             except IndexError:
+    #                 pass
 
 
-JVM.attachCurrentThread()
-_search = None
-
+    # if change == {}:
+    #     return None
 
 
-def get_search():
-    global _search
+    # for frm, to in change.items():
+    #     query = query.replace(frm, to)
 
 
-    while _search is False:
-        sleep(1)
-
-    if _search is None:
-        _search = False
-        _search = Search()
-    return _search
+    # return query
 
 
 def hint(request):
     prefix = request.GET.get('term', '')
     if len(prefix) < 2:
         return JSONResponse([])
 
 
 def hint(request):
     prefix = request.GET.get('term', '')
     if len(prefix) < 2:
         return JSONResponse([])
-    JVM.attachCurrentThread()
-
-    search = get_search()
-    hint = search.hint()
-    try:
-        tags = request.GET.get('tags', '')
-        hint.tags(Tag.get_tag_list(tags))
-    except:
-        pass
 
 
+    search = Search()
     # tagi beda ograniczac tutaj
     # ale tagi moga byc na ksiazce i na fragmentach
     # jezeli tagi dot tylko ksiazki, to wazne zeby te nowe byly w tej samej ksiazce
     # tagi beda ograniczac tutaj
     # ale tagi moga byc na ksiazce i na fragmentach
     # jezeli tagi dot tylko ksiazki, to wazne zeby te nowe byly w tej samej ksiazce
@@ -93,7 +68,6 @@ def hint(request):
     tags = search.hint_tags(prefix, pdcounter=True)
     books = search.hint_books(prefix)
 
     tags = search.hint_tags(prefix, pdcounter=True)
     books = search.hint_books(prefix)
 
-    
     def is_dupe(tag):
         if isinstance(tag, PDCounterAuthor):
             if filter(lambda t: t.slug == tag.slug and t != tag, tags):
     def is_dupe(tag):
         if isinstance(tag, PDCounterAuthor):
             if filter(lambda t: t.slug == tag.slug and t != tag, tags):
@@ -126,64 +100,51 @@ def hint(request):
                             content_type="application/json; charset=utf-8")
     else:
         return JSONResponse(data)
                             content_type="application/json; charset=utf-8")
     else:
         return JSONResponse(data)
-            
 
 
 def main(request):
     results = {}
 
 
 def main(request):
     results = {}
-    JVM.attachCurrentThread()  # where to put this?
 
     results = None
     query = None
 
     results = None
     query = None
-    fuzzy = False #0.8
-
-    query = request.GET.get('q','')
-    # book_id = request.GET.get('book', None)
-    # book = None
-    # if book_id is not None:
-    #     book = get_object_or_404(Book, id=book_id)
 
 
-    # hint = search.hint()
-    # try:
-    #     tag_list = Tag.get_tag_list(tags)
-    # except:
-    #     tag_list = []
+    query = request.GET.get('q', '')
 
     if len(query) < 2:
 
     if len(query) < 2:
-        return render_to_response('catalogue/search_too_short.html', {'prefix': query},
-                                  context_instance=RequestContext(request))
-
-    search = get_search()
-    # hint.tags(tag_list)
-    # if book:
-    #     hint.books(book)
-    tags = search.hint_tags(query, pdcounter=True, prefix=False, fuzzy=fuzzy)
-    tags = split_tags(tags)
+        return render_to_response('catalogue/search_too_short.html',
+                                  {'prefix': query},
+            context_instance=RequestContext(request))
+    search = Search()
 
 
-    toks = StringReader(query)
-    tokens_cache = {}
+    theme_terms = search.index.analyze(text=query, field="themes_pl") \
+        + search.index.analyze(text=query, field="themes")
 
 
-    author_results = search.search_phrase(toks, 'authors', fuzzy=fuzzy, tokens_cache=tokens_cache)
-    title_results = search.search_phrase(toks, 'title', fuzzy=fuzzy, tokens_cache=tokens_cache)
+            # change hints
+    tags = search.hint_tags(query, pdcounter=True, prefix=False)
+    tags = split_tags(tags)
+
+    author_results = search.search_phrase(query, 'authors', book=True)
+    title_results = search.search_phrase(query, 'title', book=True)
 
     # Boost main author/title results with mixed search, and save some of its results for end of list.
     # boost author, title results
 
     # Boost main author/title results with mixed search, and save some of its results for end of list.
     # boost author, title results
-    author_title_mixed = search.search_some(toks, ['authors', 'title', 'tags'], fuzzy=fuzzy, tokens_cache=tokens_cache)
+    author_title_mixed = search.search_some(query, ['authors', 'title', 'tags'], query_terms=theme_terms)
     author_title_rest = []
     author_title_rest = []
+
     for b in author_title_mixed:
     for b in author_title_mixed:
-        bks = filter(lambda ba: ba.book_id == b.book_id, author_results + title_results)
-        for b2 in bks:
+        also_in_mixed = filter(lambda ba: ba.book_id == b.book_id, author_results + title_results)
+        for b2 in also_in_mixed:
             b2.boost *= 1.1
             b2.boost *= 1.1
-        if bks is []:
+        if also_in_mixed is []:
             author_title_rest.append(b)
 
     # Do a phrase search but a term search as well - this can give us better snippets then search_everywhere,
     # Because the query is using only one field.
     text_phrase = SearchResult.aggregate(
             author_title_rest.append(b)
 
     # Do a phrase search but a term search as well - this can give us better snippets then search_everywhere,
     # Because the query is using only one field.
     text_phrase = SearchResult.aggregate(
-        search.search_phrase(toks, 'content', fuzzy=fuzzy, tokens_cache=tokens_cache, snippets=True, book=False, slop=4),
-        search.search_some(toks, ['content'], tokens_cache=tokens_cache, snippets=True, book=False))
+        search.search_phrase(query, 'text', snippets=True, book=False),
+        search.search_some(query, ['text'], snippets=True, book=False, query_terms=theme_terms))
 
 
-    everywhere = search.search_everywhere(toks, fuzzy=fuzzy, tokens_cache=tokens_cache)
+    everywhere = search.search_everywhere(query, query_terms=theme_terms)
 
     def already_found(results):
         def f(e):
 
     def already_found(results):
         def f(e):
@@ -202,15 +163,15 @@ def main(request):
 
     everywhere = SearchResult.aggregate(everywhere, author_title_rest)
 
 
     everywhere = SearchResult.aggregate(everywhere, author_title_rest)
 
-    for res in [author_results, title_results, text_phrase, everywhere]:
+    for field, res in [('authors', author_results),
+                       ('title', title_results),
+                       ('text', text_phrase),
+                       ('text', everywhere)]:
         res.sort(reverse=True)
         for r in res:
         res.sort(reverse=True)
         for r in res:
-            for h in r.hits:
-                h['snippets'] = map(lambda s:
-                                    re.subn(r"(^[ \t\n]+|[ \t\n]+$)", u"",
-                                            re.subn(r"[ \t\n]*\n[ \t\n]*", u"\n", s)[0])[0], h['snippets'])
+            search.get_snippets(r, query, field, 3)
 
 
-    suggestion = did_you_mean(query, search.get_tokens(toks, field="SIMPLE"))
+    suggestion = u''
 
     def ensure_exists(r):
         try:
 
     def ensure_exists(r):
         try:
@@ -246,9 +207,9 @@ def main(request):
     return render_to_response('catalogue/search_multiple_hits.html',
                               {'tags': tags,
                                'prefix': query,
     return render_to_response('catalogue/search_multiple_hits.html',
                               {'tags': tags,
                                'prefix': query,
-                               'results': { 'author': author_results,
-                                            'title': title_results,
-                                            'content': text_phrase,
-                                            'other': everywhere},
+                               'results': {'author': author_results,
+                                           'title': title_results,
+                                           'content': text_phrase,
+                                           'other': everywhere},
                                'did_you_mean': suggestion},
         context_instance=RequestContext(request))
                                'did_you_mean': suggestion},
         context_instance=RequestContext(request))