-# -*- coding: utf-8 -*-
# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
from django.conf import settings
-from django.shortcuts import render_to_response
-from django.template import RequestContext
+from django.http.response import HttpResponseRedirect
+from django.shortcuts import render
from django.views.decorators import cache
from django.http import HttpResponse, JsonResponse
-from django.utils.translation import ugettext as _
-from catalogue.utils import split_tags
-from catalogue.models import Book
-from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook
-from search.index import Search, SearchResult
+from catalogue.models import Book, Tag
+from pdcounter.models import Author
+from picture.models import Picture
+from search.index import Search, SearchResult, PictureResult
from suggest.forms import PublishingSuggestForm
import re
import json
+from wolnelektury.utils import re_escape
+
def match_word_re(word):
if 'sqlite' in settings.DATABASES['default']['ENGINE']:
return "[[:<:]]%s[[:>:]]" % word
-query_syntax_chars = re.compile(r"[\\/*:(){}]")
+query_syntax_chars = re.compile(r"[\\/*:(){}?.[\]+]")
def remove_query_syntax_chars(query, replace=' '):
- return query_syntax_chars.sub(' ', query)
+ return query_syntax_chars.sub(replace, query)
def did_you_mean(query, tokens):
@cache.never_cache
-def hint(request):
- prefix = request.GET.get('term', '')
+def hint(request, mozhint=False, param='term'):
+ prefix = request.GET.get(param, '')
if len(prefix) < 2:
return JsonResponse([], safe=False)
- prefix = remove_query_syntax_chars(prefix)
-
- search = Search()
- # tagi beda ograniczac tutaj
- # ale tagi moga byc na ksiazce i na fragmentach
- # jezeli tagi dot tylko ksiazki, to wazne zeby te nowe byly w tej samej ksiazce
- # jesli zas dotycza themes, to wazne, zeby byly w tym samym fragmencie.
-
- def is_dupe(tag):
- if isinstance(tag, PDCounterAuthor):
- if filter(lambda t: t.slug == tag.slug and t != tag, tags):
- return True
- elif isinstance(tag, PDCounterBook):
- if filter(lambda b: b.slug == tag.slug, tags):
- return True
- return False
-
- def category_name(c):
- if c.startswith('pd_'):
- c = c[len('pd_'):]
- return _(c)
+ prefix = re_escape(' '.join(remove_query_syntax_chars(prefix).split()))
try:
limit = int(request.GET.get('max', ''))
except ValueError:
- limit = -1
+ limit = 20
else:
if limit < 1:
- limit = -1
-
- data = []
-
- tags = search.hint_tags(prefix, pdcounter=True)
- tags = filter(lambda t: not is_dupe(t), tags)
- for t in tags:
- if not limit:
- break
- limit -= 1
- data.append({
- 'label': t.name,
- 'category': category_name(t.category),
- 'id': t.id,
- 'url': t.get_absolute_url()
- })
- if limit:
- books = search.hint_books(prefix)
- for b in books:
- if not limit:
- break
- limit -= 1
- data.append({
- 'label': '<cite>%s</cite>, %s' % (b.title, b.author_unicode()),
- 'category': _('book'),
+ limit = 20
+
+ authors = Tag.objects.filter(
+ category='author', name_pl__iregex='\m' + prefix).only('name', 'id', 'slug', 'category')
+ data = [
+ {
+ 'label': author.name,
+ 'id': author.id,
+ 'url': author.get_absolute_url(),
+ }
+ for author in authors[:limit]
+ ]
+ if len(data) < limit:
+ data += [
+ {
+ 'label': b.title,
+ 'author': b.author_unicode(),
'id': b.id,
'url': b.get_absolute_url()
- })
+ }
+ for b in Book.objects.filter(findable=True, title__iregex='\m' + prefix)[:limit-len(data)]
+ ]
+
+ if mozhint:
+ data = [
+ prefix,
+ [
+ item['label']
+ for item in data
+ ]
+ ]
callback = request.GET.get('callback', None)
if callback:
@cache.never_cache
def main(request):
query = request.GET.get('q', '')
- query = ' '.join(query.split())
- # filter out private use characters
- import unicodedata
- query = ''.join(ch for ch in query if unicodedata.category(ch) != 'Co')
-
if len(query) < 2:
- return render_to_response(
- 'catalogue/search_too_short.html', {'prefix': query},
- context_instance=RequestContext(request))
+ return render(
+ request, 'catalogue/search_too_short.html',
+ {'prefix': query})
elif len(query) > 256:
- return render_to_response(
- 'catalogue/search_too_long.html', {'prefix': query}, context_instance=RequestContext(request))
+ return render(
+ request, 'catalogue/search_too_long.html',
+ {'prefix': query})
- query = remove_query_syntax_chars(query)
+ query = prepare_query(query)
+ pd_authors = search_pd_authors(query)
+ books = search_books(query)
+ pictures = search_pictures(query)
+ suggestion = ''
- words = query.split()
- if len(words) > 10:
- query = ' '.join(words[:10])
-
- search = Search()
-
- # change hints
- tags = search.hint_tags(query, pdcounter=True, prefix=False)
- tags = split_tags(tags)
-
- author_results = search.search_words(words, ['authors'])
-
- title_results = search.search_words(words, ['title'])
-
- author_title_mixed = search.search_words(words, ['authors', 'title', 'metadata'])
- author_title_rest = []
+ if not (books or pictures or pd_authors):
+ form = PublishingSuggestForm(initial={"books": query + ", "})
+ return render(
+ request,
+ 'catalogue/search_no_hits.html',
+ {
+ 'form': form,
+ 'did_you_mean': suggestion
+ })
- for b in author_title_mixed:
- also_in_mixed = filter(lambda ba: ba.book_id == b.book_id, author_results + title_results)
- for b2 in also_in_mixed:
- b2.boost *= 1.1
- if not also_in_mixed:
- author_title_rest.append(b)
+ if not (books or pictures) and len(pd_authors) == 1:
+ return HttpResponseRedirect(pd_authors[0].get_absolute_url())
- text_phrase = SearchResult.aggregate(search.search_words(words, ['text'], book=False))
+ return render(
+ request,
+ 'catalogue/search_multiple_hits.html',
+ {
+ 'pd_authors': pd_authors,
+ 'books': books,
+ 'pictures': pictures,
+ 'did_you_mean': suggestion
+ })
- everywhere = search.search_words(words, ['metadata', 'text', 'themes_pl'], book=False)
+def search_books(query):
+ search = Search()
+ results_parts = []
+ search_fields = []
+ words = query.split()
+ fieldsets = (
+ (['authors', 'authors_nonstem'], True),
+ (['title', 'title_nonstem'], True),
+ (['metadata', 'metadata_nonstem'], True),
+ (['text', 'text_nonstem', 'themes_pl', 'themes_pl_nonstem'], False),
+ )
+ for fields, is_book in fieldsets:
+ search_fields += fields
+ results_parts.append(search.search_words(words, search_fields, required=fields, book=is_book))
+ results = []
+ ids_results = {}
+ for results_part in results_parts:
+ for result in sorted(SearchResult.aggregate(results_part), reverse=True):
+ book_id = result.book_id
+ if book_id in ids_results:
+ ids_results[book_id].merge(result)
+ else:
+ results.append(result)
+ ids_results[book_id] = result
+ descendant_ids = set(
+ Book.objects.filter(id__in=ids_results, ancestor__in=ids_results).values_list('id', flat=True))
+ results = [result for result in results if result.book_id not in descendant_ids]
+ for result in results:
+ search.get_snippets(result, query, num=3)
- def already_found(results):
- def f(e):
- for r in results:
- if e.book_id == r.book_id:
- e.boost = 0.9
- results.append(e)
- return True
+ def ensure_exists(r):
+ try:
+ return r.book
+ except Book.DoesNotExist:
return False
- return f
- f = already_found(author_results + title_results + text_phrase)
- everywhere = filter(lambda x: not f(x), everywhere)
- author_results = SearchResult.aggregate(author_results, author_title_rest)
- title_results = SearchResult.aggregate(title_results)
+ results = [r for r in results if ensure_exists(r)]
+ return results
- everywhere = SearchResult.aggregate(everywhere, author_title_rest)
- for field, res in [('authors', author_results),
- ('title', title_results),
- ('text', text_phrase),
- ('text', everywhere)]:
- res.sort(reverse=True)
- for r in res:
- search.get_snippets(r, query, field, 3)
-
- suggestion = u''
+def search_pictures(query):
+ search = Search()
+ results_parts = []
+ search_fields = []
+ words = query.split()
+ fieldsets = (
+ (['authors', 'authors_nonstem'], True),
+ (['title', 'title_nonstem'], True),
+ (['metadata', 'metadata_nonstem'], True),
+ (['themes_pl', 'themes_pl_nonstem'], False),
+ )
+ for fields, is_book in fieldsets:
+ search_fields += fields
+ results_parts.append(search.search_words(words, search_fields, required=fields, book=is_book, picture=True))
+ results = []
+ ids_results = {}
+ for results_part in results_parts:
+ for result in sorted(PictureResult.aggregate(results_part), reverse=True):
+ picture_id = result.picture_id
+ if picture_id in ids_results:
+ ids_results[picture_id].merge(result)
+ else:
+ results.append(result)
+ ids_results[picture_id] = result
def ensure_exists(r):
try:
- return r.book
- except Book.DoesNotExist:
+ return r.picture
+ except Picture.DoesNotExist:
return False
- author_results = filter(ensure_exists, author_results)
- title_results = filter(ensure_exists, title_results)
- text_phrase = filter(ensure_exists, text_phrase)
- everywhere = filter(ensure_exists, everywhere)
+ results = [r for r in results if ensure_exists(r)]
+ return results
- # ensure books do exists & sort them
- for res in (author_results, title_results, text_phrase):
- res.sort(reverse=True)
- if not (author_results or title_results or text_phrase or everywhere):
- form = PublishingSuggestForm(initial={"books": query + ", "})
- return render_to_response(
- 'catalogue/search_no_hits.html',
- {
- 'tags': tags,
- 'prefix': query,
- 'form': form,
- 'did_you_mean': suggestion
- },
- context_instance=RequestContext(request))
+def search_pd_authors(query):
+ pd_authors = Author.objects.filter(name__icontains=query)
+ existing_slugs = Tag.objects.filter(
+ category='author', slug__in=list(pd_authors.values_list('slug', flat=True))) \
+ .values_list('slug', flat=True)
+ pd_authors = pd_authors.exclude(slug__in=existing_slugs)
+ return pd_authors
- return render_to_response(
- 'catalogue/search_multiple_hits.html',
- {
- 'tags': tags,
- 'prefix': query,
- 'results': {
- 'author': author_results,
- 'title': title_results,
- 'content': text_phrase,
- 'other': everywhere
- },
- 'did_you_mean': suggestion
- },
- context_instance=RequestContext(request))
+
+def prepare_query(query):
+ query = ' '.join(query.split())
+ # filter out private use characters
+ import unicodedata
+ query = ''.join(ch for ch in query if unicodedata.category(ch) != 'Co')
+ query = remove_query_syntax_chars(query)
+
+ words = query.split()
+ if len(words) > 10:
+ query = ' '.join(words[:10])
+ return query