X-Git-Url: https://git.mdrn.pl/wolnelektury.git/blobdiff_plain/ec93d5082ab137ec690f310eb6eba3c54d10b825..013ec8a416d4ff1ea3e1f2fa9ee2935623197ecd:/apps/search/views.py?ds=sidebyside diff --git a/apps/search/views.py b/apps/search/views.py index dc90841dd..f7aa77cd9 100644 --- a/apps/search/views.py +++ b/apps/search/views.py @@ -1,106 +1,255 @@ - +# -*- coding: utf-8 -*- +# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later. +# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information. +# +from django.conf import settings from django.shortcuts import render_to_response, get_object_or_404 from django.template import RequestContext -from django.contrib.auth.decorators import login_required from django.views.decorators import cache +from django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponsePermanentRedirect, JsonResponse +from django.utils.translation import ugettext as _ -from catalogue.utils import get_random_hash -from catalogue.models import Book, Tag, TAG_CATEGORIES -from catalogue.fields import dumps -from catalogue.views import JSONResponse -from catalogue import forms -from search import MultiSearch, JVM, SearchResult -from lucene import StringReader +from catalogue.utils import split_tags +from catalogue.models import Book, Tag, Fragment +from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook +from search.index import Search, SearchResult +from suggest.forms import PublishingSuggestForm +import re +#import enchant +import json -import enchant -dictionary = enchant.Dict('pl_PL') +def match_word_re(word): + if 'sqlite' in settings.DATABASES['default']['ENGINE']: + return r"\b%s\b" % word + elif 'mysql' in settings.DATABASES['default']['ENGINE']: + return "[[:<:]]%s[[:>:]]" % word -def did_you_mean(query, tokens): - change = {} - - for t in tokens: - print("%s ok? %s, sug: %s" %(t, dictionary.check(t), dictionary.suggest(t))) - if not dictionary.check(t): - try: - change[t] = dictionary.suggest(t)[0] - except IndexError: - pass +query_syntax_chars = re.compile(r"[\\/*:(){}]") - if change == {}: - return None - for frm, to in change.items(): - query = query.replace(frm, to) +def remove_query_syntax_chars(query, replace=' '): + return query_syntax_chars.sub(' ', query) + +def did_you_mean(query, tokens): return query + # change = {} + # for t in tokens: + # authors = Tag.objects.filter(category='author', name__iregex=match_word_re(t)) + # if len(authors) > 0: + # continue + # if False: + # if not dictionary.check(t): + # try: + # change_to = dictionary.suggest(t)[0].lower() + # if change_to != t.lower(): + # change[t] = change_to + # except IndexError: + # pass -def category_name(category): - try: - return filter(lambda c: c[0] == category, TAG_CATEGORIES)[0][1].encode('utf-8') - except IndexError: - raise KeyError("No category %s" % category) + # if change == {}: + # return None + + # for frm, to in change.items(): + # query = query.replace(frm, to) + # return query + +@cache.never_cache def hint(request): prefix = request.GET.get('term', '') if len(prefix) < 2: - return JSONResponse(dumps({})) - JVM.attachCurrentThread() - s = MultiSearch() - tags = s.hint_tags(prefix) - books = s.hint_books(prefix) - - return JSONResponse( - [{'label': t.name, - 'category': category_name(t.category), - 'id': t.id, - 'url': t.get_absolute_url()} - for t in tags] + \ - [{'label': b.title, - 'category': category_name('book'), - 'id': b.id, - 'url': b.get_absolute_url()} - for b in books]) - + return JsonResponse([], safe=False) + + prefix = remove_query_syntax_chars(prefix) + + search = Search() + # tagi beda ograniczac tutaj + # ale tagi moga byc na ksiazce i na fragmentach + # jezeli tagi dot tylko ksiazki, to wazne zeby te nowe byly w tej samej ksiazce + # jesli zas dotycza themes, to wazne, zeby byly w tym samym fragmencie. + + def is_dupe(tag): + if isinstance(tag, PDCounterAuthor): + if filter(lambda t: t.slug == tag.slug and t != tag, tags): + return True + elif isinstance(tag, PDCounterBook): + if filter(lambda b: b.slug == tag.slug, tags): + return True + return False + + def category_name(c): + if c.startswith('pd_'): + c = c[len('pd_'):] + return _(c) + try: + limit = int(request.GET.get('max', '')) + except ValueError: + limit = -1 + else: + if limit < 1: + limit = -1 + + data = [] + + tags = search.hint_tags(prefix, pdcounter=True) + tags = filter(lambda t: not is_dupe(t), tags) + for t in tags: + if not limit: + break + limit -= 1 + data.append({ + 'label': t.name, + 'category': category_name(t.category), + 'id': t.id, + 'url': t.get_absolute_url() + }) + if limit: + books = search.hint_books(prefix) + for b in books: + if not limit: + break + limit -= 1 + data.append({ + 'label': b.title, + 'category': _('book'), + 'id': b.id, + 'url': b.get_absolute_url() + }) + + callback = request.GET.get('callback', None) + if callback: + return HttpResponse("%s(%s);" % (callback, json.dumps(data)), + content_type="application/json; charset=utf-8") + else: + return JsonResponse(data, safe=False) + + +@cache.never_cache def main(request): results = {} - JVM.attachCurrentThread() # where to put this? - srch = MultiSearch() results = None query = None - fuzzy = False - - if 'q' in request.GET: - tags = request.GET.get('tags', '') - try: - tag_list = Tag.get_tag_list(tags) - except: - tag_list = [] - - # tag_filter = srch. - query = request.GET['q'] - toks = StringReader(query) - fuzzy = 'fuzzy' in request.GET - if fuzzy: - fuzzy = 0.7 + query = request.GET.get('q', '') + if len(query) < 2: + return render_to_response('catalogue/search_too_short.html', + {'prefix': query}, + context_instance=RequestContext(request)) - results = SearchResult.aggregate(srch.search_perfect_book(toks, fuzzy=fuzzy), - srch.search_perfect_parts(toks, fuzzy=fuzzy), - srch.search_everywhere(toks, fuzzy=fuzzy)) - results.sort(reverse=True) - - for r in results: - print r.parts - - return render_to_response('newsearch/search.html', {'results': results, - 'did_you_mean': (query is not None) and - did_you_mean(query, srch.get_tokens(query, field='SIMPLE')), - 'fuzzy': fuzzy}, - context_instance=RequestContext(request)) + query = remove_query_syntax_chars(query) + + search = Search() + + theme_terms = search.index.analyze(text=query, field="themes_pl") \ + + search.index.analyze(text=query, field="themes") + + # change hints + tags = search.hint_tags(query, pdcounter=True, prefix=False) + tags = split_tags(tags) + + author_results = search.search_phrase(query, 'authors', book=True) + translator_results = search.search_phrase(query, 'translators', book=True) + + title_results = search.search_phrase(query, 'title', book=True) + + # Boost main author/title results with mixed search, and save some of its results for end of list. + # boost author, title results + author_title_mixed = search.search_some(query, ['authors', 'translators', 'title', 'tags'], query_terms=theme_terms) + author_title_rest = [] + + for b in author_title_mixed: + also_in_mixed = filter(lambda ba: ba.book_id == b.book_id, author_results + translator_results + title_results) + for b2 in also_in_mixed: + b2.boost *= 1.1 + if also_in_mixed is []: + author_title_rest.append(b) + + # Do a phrase search but a term search as well - this can give us better snippets then search_everywhere, + # Because the query is using only one field. + text_phrase = SearchResult.aggregate( + search.search_phrase(query, 'text', snippets=True, book=False), + search.search_some(query, ['text'], snippets=True, book=False, query_terms=theme_terms)) + + everywhere = search.search_everywhere(query, query_terms=theme_terms) + + def already_found(results): + def f(e): + for r in results: + if e.book_id == r.book_id: + e.boost = 0.9 + results.append(e) + return True + return False + return f + f = already_found(author_results + translator_results + title_results + text_phrase) + everywhere = filter(lambda x: not f(x), everywhere) + + author_results = SearchResult.aggregate(author_results) + translator_results = SearchResult.aggregate(translator_results) + title_results = SearchResult.aggregate(title_results) + + everywhere = SearchResult.aggregate(everywhere, author_title_rest) + + for field, res in [('authors', author_results), + ('translators', translator_results), + ('title', title_results), + ('text', text_phrase), + ('text', everywhere)]: + res.sort(reverse=True) + for r in res: + search.get_snippets(r, query, field, 3) + + suggestion = u'' + + def ensure_exists(r): + try: + return r.book + except Book.DoesNotExist: + return False + + author_results = filter(ensure_exists, author_results) + translator_results = filter(ensure_exists, translator_results) + title_results = filter(ensure_exists, title_results) + text_phrase = filter(ensure_exists, text_phrase) + everywhere = filter(ensure_exists, everywhere) + + results = author_results + translator_results + title_results + text_phrase + everywhere + # ensure books do exists & sort them + for res in (author_results, translator_results, title_results, text_phrase, everywhere): + res.sort(reverse=True) + + # We don't want to redirect to book text, but rather display result page even with one result. + # if len(results) == 1: + # fragment_hits = filter(lambda h: 'fragment' in h, results[0].hits) + # if len(fragment_hits) == 1: + # #anchor = fragment_hits[0]['fragment'] + # #frag = Fragment.objects.get(anchor=anchor) + # return HttpResponseRedirect(fragment_hits[0]['fragment'].get_absolute_url()) + # return HttpResponseRedirect(results[0].book.get_absolute_url()) + if len(results) == 0: + form = PublishingSuggestForm(initial={"books": query + ", "}) + return render_to_response('catalogue/search_no_hits.html', + {'tags': tags, + 'prefix': query, + "form": form, + 'did_you_mean': suggestion}, + context_instance=RequestContext(request)) + + return render_to_response('catalogue/search_multiple_hits.html', + {'tags': tags, + 'prefix': query, + 'results': {'author': author_results, + 'translator': translator_results, + 'title': title_results, + 'content': text_phrase, + 'other': everywhere}, + 'did_you_mean': suggestion}, + context_instance=RequestContext(request))