PayU payments working.
[wolnelektury.git] / src / search / index.py
index f9fb4b2..9f87b99 100644 (file)
@@ -1,26 +1,34 @@
-# -*- coding: utf-8 -*-
 # This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
 # Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
 #
 # This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
 # Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
 #
-from django.conf import settings
-
+from functools import reduce, total_ordering
+from itertools import chain
+import logging
+import operator
 import os
 import re
 import os
 import re
+from django.conf import settings
 from librarian import dcparser
 from librarian.parser import WLDocument
 from lxml import etree
 from librarian import dcparser
 from librarian.parser import WLDocument
 from lxml import etree
+import scorched
 import catalogue.models
 import catalogue.models
+import picture.models
 from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook
 from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook
-from itertools import chain
-import sunburnt
-import custom
-import operator
-import logging
 from wolnelektury.utils import makedirs
 from wolnelektury.utils import makedirs
+from . import custom
 
 log = logging.getLogger('search')
 
 
 
 log = logging.getLogger('search')
 
 
+if os.path.isfile(settings.SOLR_STOPWORDS):
+    stopwords = set(
+        line.strip()
+        for line in open(settings.SOLR_STOPWORDS) if not line.startswith('#'))
+else:
+    stopwords = set()
+
+
 class SolrIndex(object):
     def __init__(self, mode=None):
         self.index = custom.CustomSolrInterface(settings.SOLR, mode=mode)
 class SolrIndex(object):
     def __init__(self, mode=None):
         self.index = custom.CustomSolrInterface(settings.SOLR, mode=mode)
@@ -121,7 +129,7 @@ class Index(SolrIndex):
         """
         uids = set()
         for q in queries:
         """
         uids = set()
         for q in queries:
-            if isinstance(q, sunburnt.search.LuceneQuery):
+            if isinstance(q, scorched.search.LuceneQuery):
                 q = self.index.query(q)
             q.field_limiter.update(['uid'])
             st = 0
                 q = self.index.query(q)
             q.field_limiter.update(['uid'])
             st = 0
@@ -240,7 +248,8 @@ class Index(SolrIndex):
             self.remove_book(book, remove_snippets=False)
 
         book_doc = self.create_book_doc(book)
             self.remove_book(book, remove_snippets=False)
 
         book_doc = self.create_book_doc(book)
-        meta_fields = self.extract_metadata(book, book_info, dc_only=['source_name', 'authors', 'translators', 'title'])
+        meta_fields = self.extract_metadata(book, book_info, dc_only=[
+            'source_name', 'authors', 'translators', 'title', 'epochs', 'kinds', 'genres'])
         # let's not index it - it's only used for extracting publish date
         if 'source_name' in meta_fields:
             del meta_fields['source_name']
         # let's not index it - it's only used for extracting publish date
         if 'source_name' in meta_fields:
             del meta_fields['source_name']
@@ -257,8 +266,9 @@ class Index(SolrIndex):
             'published_date': meta_fields['published_date']
             }
 
             'published_date': meta_fields['published_date']
             }
 
-        if 'translators' in meta_fields:
-            book_fields['translators'] = meta_fields['translators']
+        for tag_name in ('translators', 'epochs', 'kinds', 'genres'):
+            if tag_name in meta_fields:
+                book_fields[tag_name] = meta_fields[tag_name]
 
         self.index_content(book, book_fields=book_fields)
 
 
         self.index_content(book, book_fields=book_fields)
 
@@ -269,14 +279,14 @@ class Index(SolrIndex):
         'dramat_wierszowany_lp',
         'dramat_wspolczesny', 'liryka_l', 'liryka_lp',
         'wywiad',
         'dramat_wierszowany_lp',
         'dramat_wspolczesny', 'liryka_l', 'liryka_lp',
         'wywiad',
-        ]
+    ]
 
     ignore_content_tags = [
 
     ignore_content_tags = [
-        'uwaga', 'extra', 'nota_red',
+        'uwaga', 'extra', 'nota_red', 'abstrakt',
         'zastepnik_tekstu', 'sekcja_asterysk', 'separator_linia', 'zastepnik_wersu',
         'didaskalia',
         'naglowek_aktu', 'naglowek_sceny', 'naglowek_czesc',
         'zastepnik_tekstu', 'sekcja_asterysk', 'separator_linia', 'zastepnik_wersu',
         'didaskalia',
         'naglowek_aktu', 'naglowek_sceny', 'naglowek_czesc',
-        ]
+    ]
 
     footnote_tags = ['pa', 'pt', 'pr', 'pe']
 
 
     footnote_tags = ['pa', 'pt', 'pr', 'pe']
 
@@ -295,7 +305,6 @@ class Index(SolrIndex):
             book_info = dcparser.parse(open(book.xml_file.path))
 
         fields['slug'] = book.slug
             book_info = dcparser.parse(open(book.xml_file.path))
 
         fields['slug'] = book.slug
-        fields['tags'] = [t.name for t in book.tags]
         fields['is_book'] = True
 
         # validator, name
         fields['is_book'] = True
 
         # validator, name
@@ -315,9 +324,9 @@ class Index(SolrIndex):
                 elif type_indicator == dcparser.as_person:
                     p = getattr(book_info, field.name)
                     if isinstance(p, dcparser.Person):
                 elif type_indicator == dcparser.as_person:
                     p = getattr(book_info, field.name)
                     if isinstance(p, dcparser.Person):
-                        persons = unicode(p)
+                        persons = str(p)
                     else:
                     else:
-                        persons = ', '.join(map(unicode, p))
+                        persons = ', '.join(map(str, p))
                     fields[field.name] = persons
                 elif type_indicator == dcparser.as_date:
                     dt = getattr(book_info, field.name)
                     fields[field.name] = persons
                 elif type_indicator == dcparser.as_date:
                     dt = getattr(book_info, field.name)
@@ -469,7 +478,7 @@ class Index(SolrIndex):
                         fid = start.attrib['id'][1:]
                         handle_text.append(lambda text: None)
                         if start.text is not None:
                         fid = start.attrib['id'][1:]
                         handle_text.append(lambda text: None)
                         if start.text is not None:
-                            fragments[fid]['themes'] += map(unicode.strip, map(unicode, (start.text.split(','))))
+                            fragments[fid]['themes'] += map(str.strip, map(str, (start.text.split(','))))
                     elif end is not None and end.tag == 'motyw':
                         handle_text.pop()
 
                     elif end is not None and end.tag == 'motyw':
                         handle_text.pop()
 
@@ -506,7 +515,49 @@ class Index(SolrIndex):
         finally:
             snippets.close()
 
         finally:
             snippets.close()
 
+    def remove_picture(self, picture_or_id):
+        """Removes a picture from search index."""
+        if isinstance(picture_or_id, picture.models.Picture):
+            picture_id = picture_or_id.id
+        else:
+            picture_id = picture_or_id
+        self.delete_query(self.index.Q(picture_id=picture_id))
+
+    def index_picture(self, picture, picture_info=None, overwrite=True):
+        """
+        Indexes the picture.
+        Creates a lucene document for extracted metadata
+        and calls self.index_area() to index the contents of the picture.
+        """
+        if overwrite:
+            # we don't remove snippets, since they might be still needed by
+            # threads using not reopened index
+            self.remove_picture(picture)
 
 
+        picture_doc = {'picture_id': int(picture.id)}
+        meta_fields = self.extract_metadata(picture, picture_info, dc_only=[
+            'authors', 'title', 'epochs', 'kinds', 'genres'])
+
+        picture_doc.update(meta_fields)
+
+        picture_doc['uid'] = "picture%s" % picture_doc['picture_id']
+        self.index.add(picture_doc)
+        del picture_doc['is_book']
+        for area in picture.areas.all():
+            self.index_area(area, picture_fields=picture_doc)
+
+    def index_area(self, area, picture_fields):
+        """
+        Indexes themes and objects on the area.
+        """
+        doc = dict(picture_fields)
+        doc['area_id'] = area.id
+        doc['themes'] = list(area.tags.filter(category__in=('theme', 'thing')).values_list('name', flat=True))
+        doc['uid'] = 'area%s' % area.id
+        self.index.add(doc)
+
+
+@total_ordering
 class SearchResult(object):
     def __init__(self, doc, how_found=None, query_terms=None):
         self.boost = 1.0
 class SearchResult(object):
     def __init__(self, doc, how_found=None, query_terms=None):
         self.boost = 1.0
@@ -549,14 +600,25 @@ class SearchResult(object):
 
             self._hits.append(hit)
 
 
             self._hits.append(hit)
 
-    def __unicode__(self):
+    @classmethod
+    def from_book(cls, book, how_found=None, query_terms=None):
+        doc = {
+            'score': book.popularity.count,
+            'book_id': book.id,
+            'published_date': 0,
+        }
+        result = cls(doc, how_found=how_found, query_terms=query_terms)
+        result._book = book
+        return result
+
+    def __str__(self):
         return u"<SR id=%d %d(%d) hits score=%f %d snippets>" % \
             (self.book_id, len(self._hits),
              len(self._processed_hits) if self._processed_hits else -1,
              self._score, len(self.snippets))
 
         return u"<SR id=%d %d(%d) hits score=%f %d snippets>" % \
             (self.book_id, len(self._hits),
              len(self._processed_hits) if self._processed_hits else -1,
              self._score, len(self.snippets))
 
-    def __str__(self):
-        return unicode(self).encode('utf-8')
+    def __bytes__(self):
+        return str(self).encode('utf-8')
 
     @property
     def score(self):
 
     @property
     def score(self):
@@ -564,16 +626,18 @@ class SearchResult(object):
 
     def merge(self, other):
         if self.book_id != other.book_id:
 
     def merge(self, other):
         if self.book_id != other.book_id:
-            raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id))
+            raise ValueError("this search result is for book %d; tried to merge with %d" % (self.book_id, other.book_id))
         self._hits += other._hits
         self._hits += other._hits
-        if other.score > self.score:
-            self._score = other._score
+        self._score += max(other._score, 0)
         return self
 
     def get_book(self):
         if self._book is not None:
             return self._book
         return self
 
     def get_book(self):
         if self._book is not None:
             return self._book
-        self._book = catalogue.models.Book.objects.get(id=self.book_id)
+        try:
+            self._book = catalogue.models.Book.objects.get(id=self.book_id)
+        except catalogue.models.Book.DoesNotExist:
+            self._book = None
         return self._book
 
     book = property(get_book)
         return self._book
 
     book = property(get_book)
@@ -593,12 +657,12 @@ class SearchResult(object):
         # to sections and fragments
         frags = filter(lambda r: r[self.FRAGMENT] is not None, self._hits)
 
         # to sections and fragments
         frags = filter(lambda r: r[self.FRAGMENT] is not None, self._hits)
 
-        sect = filter(lambda r: r[self.FRAGMENT] is None, self._hits)
+        sect = [hit for hit in self._hits if hit[self.FRAGMENT] is None]
 
         # sections not covered by fragments
 
         # sections not covered by fragments
-        sect = filter(lambda s: 0 == len(filter(
+        sect = filter(lambda s: 0 == len(list(filter(
             lambda f: f[self.POSITION][self.POSITION_INDEX] <= s[self.POSITION][self.POSITION_INDEX] <
             lambda f: f[self.POSITION][self.POSITION_INDEX] <= s[self.POSITION][self.POSITION_INDEX] <
-                      f[self.POSITION][self.POSITION_INDEX] + f[self.POSITION][self.POSITION_SPAN], frags)), sect)
+                      f[self.POSITION][self.POSITION_INDEX] + f[self.POSITION][self.POSITION_SPAN], frags))), sect)
 
         def remove_duplicates(lst, keyfn, compare):
             els = {}
 
         def remove_duplicates(lst, keyfn, compare):
             els = {}
@@ -631,7 +695,7 @@ class SearchResult(object):
             m.update(s[self.OTHER])
             sections[si] = m
 
             m.update(s[self.OTHER])
             sections[si] = m
 
-        hits = sections.values()
+        hits = list(sections.values())
 
         for f in frags:
             try:
 
         for f in frags:
             try:
@@ -645,19 +709,19 @@ class SearchResult(object):
             if self.query_terms is not None:
                 for i in range(0, len(f[self.OTHER]['themes'])):
                     tms = f[self.OTHER]['themes'][i].split(r' +') + f[self.OTHER]['themes_pl'][i].split(' ')
             if self.query_terms is not None:
                 for i in range(0, len(f[self.OTHER]['themes'])):
                     tms = f[self.OTHER]['themes'][i].split(r' +') + f[self.OTHER]['themes_pl'][i].split(' ')
-                    tms = map(unicode.lower, tms)
+                    tms = map(str.lower, tms)
                     for qt in self.query_terms:
                         if qt in tms:
                             themes_hit.add(f[self.OTHER]['themes'][i])
                             break
 
             def theme_by_name(n):
                     for qt in self.query_terms:
                         if qt in tms:
                             themes_hit.add(f[self.OTHER]['themes'][i])
                             break
 
             def theme_by_name(n):
-                th = filter(lambda t: t.name == n, themes)
+                th = list(filter(lambda t: t.name == n, themes))
                 if th:
                     return th[0]
                 else:
                     return None
                 if th:
                     return th[0]
                 else:
                     return None
-            themes_hit = filter(lambda a: a is not None, map(theme_by_name, themes_hit))
+            themes_hit = list(filter(lambda a: a is not None, map(theme_by_name, themes_hit)))
 
             m = {'score': f[self.SCORE],
                  'fragment': frag,
 
             m = {'score': f[self.SCORE],
                  'fragment': frag,
@@ -668,7 +732,7 @@ class SearchResult(object):
             m.update(f[self.OTHER])
             hits.append(m)
 
             m.update(f[self.OTHER])
             hits.append(m)
 
-        hits.sort(lambda a, b: cmp(a['score'], b['score']), reverse=True)
+        hits.sort(key=lambda h: h['score'], reverse=True)
 
         self._processed_hits = hits
 
 
         self._processed_hits = hits
 
@@ -685,13 +749,17 @@ class SearchResult(object):
                     books[r.book_id] = r
         return books.values()
 
                     books[r.book_id] = r
         return books.values()
 
-    def __cmp__(self, other):
-        c = cmp(self.score, other.score)
-        if c == 0:
-            # this is inverted, because earlier date is better
-            return cmp(other.published_date, self.published_date)
-        else:
-            return c
+    def get_sort_key(self):
+        return (-self.score,
+                self.published_date,
+                self.book.sort_key_author if self.book else '',
+                self.book.sort_key if self.book else '')
+
+    def __lt__(self, other):
+        return self.get_sort_key() > other.get_sort_key()
+
+    def __eq__(self, other):
+        return self.get_sort_key() == other.get_sort_key()
 
     def __len__(self):
         return len(self.hits)
 
     def __len__(self):
         return len(self.hits)
@@ -706,6 +774,114 @@ class SearchResult(object):
             return None
 
 
             return None
 
 
+@total_ordering
+class PictureResult(object):
+    def __init__(self, doc, how_found=None, query_terms=None):
+        self.boost = 1.0
+        self.query_terms = query_terms
+        self._picture = None
+        self._hits = []
+        self._processed_hits = None
+
+        if 'score' in doc:
+            self._score = doc['score']
+        else:
+            self._score = 0
+
+        self.picture_id = int(doc["picture_id"])
+
+        if doc.get('area_id'):
+            hit = (self._score, {
+                'how_found': how_found,
+                'area_id': doc['area_id'],
+                'themes': doc.get('themes', []),
+                'themes_pl': doc.get('themes_pl', []),
+            })
+
+            self._hits.append(hit)
+
+    def __str__(self):
+        return u"<PR id=%d score=%f >" % (self.picture_id, self._score)
+
+    def __repr__(self):
+        return str(self)
+
+    @property
+    def score(self):
+        return self._score * self.boost
+
+    def merge(self, other):
+        if self.picture_id != other.picture_id:
+            raise ValueError(
+                "this search result is for picture %d; tried to merge with %d" % (self.picture_id, other.picture_id))
+        self._hits += other._hits
+        self._score += max(other._score, 0)
+        return self
+
+    SCORE = 0
+    OTHER = 1
+
+    @property
+    def hits(self):
+        if self._processed_hits is not None:
+            return self._processed_hits
+
+        hits = []
+        for hit in self._hits:
+            try:
+                area = picture.models.PictureArea.objects.get(id=hit[self.OTHER]['area_id'])
+            except picture.models.PictureArea.DoesNotExist:
+                # stale index
+                continue
+            # Figure out if we were searching for a token matching some word in theme name.
+            themes_hit = set()
+            if self.query_terms is not None:
+                for i in range(0, len(hit[self.OTHER]['themes'])):
+                    tms = hit[self.OTHER]['themes'][i].split(r' +') + hit[self.OTHER]['themes_pl'][i].split(' ')
+                    tms = map(str.lower, tms)
+                    for qt in self.query_terms:
+                        if qt in tms:
+                            themes_hit.add(hit[self.OTHER]['themes'][i])
+                            break
+
+            m = {
+                'score': hit[self.SCORE],
+                'area': area,
+                'themes_hit': themes_hit,
+            }
+            m.update(hit[self.OTHER])
+            hits.append(m)
+
+        hits.sort(key=lambda h: h['score'], reverse=True)
+        hits = hits[:1]
+        self._processed_hits = hits
+        return hits
+
+    def get_picture(self):
+        if self._picture is None:
+            self._picture = picture.models.Picture.objects.get(id=self.picture_id)
+        return self._picture
+
+    picture = property(get_picture)
+
+    @staticmethod
+    def aggregate(*result_lists):
+        books = {}
+        for rl in result_lists:
+            for r in rl:
+                if r.picture_id in books:
+                    books[r.picture_id].merge(r)
+                else:
+                    books[r.picture_id] = r
+        return books.values()
+
+    def __lt__(self, other):
+        return self.score < other.score
+
+    def __eq__(self, other):
+        return self.score == other.score
+
+
 class Search(SolrIndex):
     """
     Search facilities.
 class Search(SolrIndex):
     """
     Search facilities.
@@ -726,71 +902,51 @@ class Search(SolrIndex):
 
         return q
 
 
         return q
 
-    def search_phrase(self, searched, field='text', book=False,
-                      filters=None,
-                      snippets=False):
-        if filters is None:
-            filters = []
-        if book:
-            filters.append(self.index.Q(is_book=True))
-
-        q = self.index.query(**{field: searched})
-        q = self.apply_filters(q, filters).field_limit(score=True, all_fields=True)
-        res = q.paginate(rows=100).execute()
-        return [SearchResult(found, how_found=u'search_phrase') for found in res]
-
-    def search_some(self, searched, fields, book=True,
-                    filters=None, snippets=True, query_terms=None):
-        assert isinstance(fields, list)
-        if filters is None:
-            filters = []
+    def search_by_author(self, words):
+        from catalogue.models import Book
+        books = Book.objects.filter(parent=None).order_by('-popularity__count')
+        for word in words:
+            books = books.filter(cached_author__iregex='\m%s\M' % word).select_related('popularity__count')
+        return [SearchResult.from_book(book, how_found='search_by_author', query_terms=words) for book in books[:30]]
+
+    def search_words(self, words, fields, required=None, book=True, picture=False):
+        if book and not picture and fields == ['authors']:
+            return self.search_by_author(words)
+        filters = []
+        for word in words:
+            if book or picture or (word not in stopwords):
+                word_filter = None
+                for field in fields:
+                    q = self.index.Q(**{field: word})
+                    if word_filter is None:
+                        word_filter = q
+                    else:
+                        word_filter |= q
+                filters.append(word_filter)
+        if required:
+            required_filter = None
+            for field in required:
+                for word in words:
+                    if book or picture or (word not in stopwords):
+                        q = self.index.Q(**{field: word})
+                        if required_filter is None:
+                            required_filter = q
+                        else:
+                            required_filter |= q
+            filters.append(required_filter)
+        if not filters:
+            return []
+        params = {}
         if book:
         if book:
-            filters.append(self.index.Q(is_book=True))
-
-        query = self.index.Q()
-
-        for fld in fields:
-            query = self.index.Q(query | self.make_term_query(searched, fld))
-
-        query = self.index.query(query)
+            params['is_book'] = True
+        if picture:
+            params['picture_id__gt'] = 0
+        else:
+            params['book_id__gt'] = 0
+        query = self.index.query(**params)
         query = self.apply_filters(query, filters).field_limit(score=True, all_fields=True)
         query = self.apply_filters(query, filters).field_limit(score=True, all_fields=True)
-        res = query.execute()
-        return [SearchResult(found, how_found='search_some', query_terms=query_terms) for found in res]
-
-    def search_everywhere(self, searched, query_terms=None):
-        """
-        Tries to use search terms to match different fields of book (or its parts).
-        E.g. one word can be an author survey, another be a part of the title, and the rest
-        are some words from third chapter.
-        """
-        books = []
-        # content only query : themes x content
-        q = self.make_term_query(searched, 'text')
-        q_themes = self.make_term_query(searched, 'themes_pl')
-
-        query = self.index.query(q).query(q_themes).field_limit(score=True, all_fields=True)
-        res = query.execute()
-
-        for found in res:
-            books.append(SearchResult(found, how_found='search_everywhere_themesXcontent', query_terms=query_terms))
-
-        # query themes/content x author/title/tags
-        in_content = self.index.Q()
-        in_meta = self.index.Q()
-
-        for fld in ['themes_pl', 'text']:
-            in_content |= self.make_term_query(searched, field=fld)
-
-        for fld in ['tags', 'authors', 'title']:
-            in_meta |= self.make_term_query(searched, field=fld)
-
-        q = in_content & in_meta
-        res = self.index.query(q).field_limit(score=True, all_fields=True).execute()
-
-        for found in res:
-            books.append(SearchResult(found, how_found='search_everywhere', query_terms=query_terms))
-
-        return books
+        result_class = PictureResult if picture else SearchResult
+        return [result_class(found, how_found='search_words', query_terms=words) for found in query.execute()]
 
     def get_snippets(self, searchresult, query, field='text', num=1):
         """
 
     def get_snippets(self, searchresult, query, field='text', num=1):
         """
@@ -813,13 +969,15 @@ class Search(SolrIndex):
                 text = snippets.get((int(position),
                                      int(length)))
                 snip = self.index.highlight(text=text, field=field, q=query)
                 text = snippets.get((int(position),
                                      int(length)))
                 snip = self.index.highlight(text=text, field=field, q=query)
+                if not snip and field == 'text':
+                    snip = self.index.highlight(text=text, field='text_nonstem', q=query)
                 if snip not in snips:
                     snips[idx] = snip
                     if snip:
                         num -= 1
                 idx += 1
 
                 if snip not in snips:
                     snips[idx] = snip
                     if snip:
                         num -= 1
                 idx += 1
 
-        except IOError, e:
+        except IOError as e:
             book = catalogue.models.Book.objects.filter(id=book_id)
             if not book:
                 log.error("Book does not exist for book id = %d" % book_id)
             book = catalogue.models.Book.objects.filter(id=book_id)
             if not book:
                 log.error("Book does not exist for book id = %d" % book_id)
@@ -829,111 +987,13 @@ class Search(SolrIndex):
         finally:
             snippets.close()
 
         finally:
             snippets.close()
 
-            # remove verse end markers..
-        snips = map(lambda s: s and s.replace("/\n", "\n"), snips)
+        # remove verse end markers..
+        snips = [s.replace("/\n", "\n") if s else s for s in snips]
 
         searchresult.snippets = snips
 
         return snips
 
 
         searchresult.snippets = snips
 
         return snips
 
-    def hint_tags(self, query, pdcounter=True, prefix=True):
-        """
-        Return auto-complete hints for tags
-        using prefix search.
-        """
-        q = self.index.Q()
-        query = query.strip()
-        for field in ['tag_name', 'tag_name_pl']:
-            if prefix:
-                q |= self.index.Q(**{field: query + "*"})
-            else:
-                q |= self.make_term_query(query, field=field)
-        qu = self.index.query(q)
-
-        return self.search_tags(qu, pdcounter=pdcounter)
-
-    def search_tags(self, query, filters=None, pdcounter=False):
-        """
-        Search for Tag objects using query.
-        """
-        if not filters:
-            filters = []
-        if not pdcounter:
-            filters.append(~self.index.Q(is_pdcounter=True))
-        res = self.apply_filters(query, filters).execute()
-
-        tags = []
-        pd_tags = []
-
-        for doc in res:
-            is_pdcounter = doc.get('is_pdcounter', False)
-            category = doc.get('tag_category')
-            try:
-                if is_pdcounter:
-                    if category == 'pd_author':
-                        tag = PDCounterAuthor.objects.get(id=doc.get('tag_id'))
-                    elif category == 'pd_book':
-                        tag = PDCounterBook.objects.get(id=doc.get('tag_id'))
-                        tag.category = 'pd_book'  # make it look more lik a tag.
-                    else:
-                        # WTF
-                        print ("Warning. cannot get pdcounter tag_id=%d from db; cat=%s" % (
-                            int(doc.get('tag_id')), category)).encode('utf-8')
-                    pd_tags.append(tag)
-                else:
-                    tag = catalogue.models.Tag.objects.get(id=doc.get("tag_id"))
-                    tags.append(tag)
-
-            except catalogue.models.Tag.DoesNotExist:
-                pass
-            except PDCounterAuthor.DoesNotExist:
-                pass
-            except PDCounterBook.DoesNotExist:
-                pass
-
-        tags_slugs = set(map(lambda t: t.slug, tags))
-        tags = tags + filter(lambda t: t.slug not in tags_slugs, pd_tags)
-
-        log.debug('search_tags: %s' % tags)
-
-        return tags
-
-    def hint_books(self, query, prefix=True):
-        """
-        Returns auto-complete hints for book titles
-        Because we do not index 'pseudo' title-tags.
-        Prefix search.
-        """
-        q = self.index.Q()
-        query = query.strip()
-        if prefix:
-            q |= self.index.Q(title=query + "*")
-            q |= self.index.Q(title_orig=query + "*")
-        else:
-            q |= self.make_term_query(query, field='title')
-            q |= self.make_term_query(query, field='title_orig')
-        qu = self.index.query(q)
-        only_books = self.index.Q(is_book=True)
-        return self.search_books(qu, [only_books])
-
-    def search_books(self, query, filters=None, max_results=10):
-        """
-        Searches for Book objects using query
-        """
-        bks = []
-        bks_found = set()
-        query = query.query(is_book=True)
-        res = self.apply_filters(query, filters).field_limit(['book_id'])
-        for r in res:
-            try:
-                bid = r['book_id']
-                if bid not in bks_found:
-                    bks.append(catalogue.models.Book.objects.get(id=bid))
-                    bks_found.add(bid)
-            except catalogue.models.Book.DoesNotExist:
-                pass
-        return bks
-
     @staticmethod
     def apply_filters(query, filters):
         """
     @staticmethod
     def apply_filters(query, filters):
         """