Cite backdrop z książkami
[wolnelektury.git] / apps / search / index.py
index cc478ee..dc33575 100644 (file)
@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 
 from django.conf import settings
 # -*- coding: utf-8 -*-
 
 from django.conf import settings
-from lucene import SimpleFSDirectory, IndexWriter, CheckIndex, \
+from lucene import SimpleFSDirectory, IndexWriter, IndexWriterConfig, CheckIndex, \
     File, Field, Integer, \
     NumericField, Version, Document, JavaError, IndexSearcher, \
     QueryParser, PerFieldAnalyzerWrapper, \
     File, Field, Integer, \
     NumericField, Version, Document, JavaError, IndexSearcher, \
     QueryParser, PerFieldAnalyzerWrapper, \
@@ -25,7 +25,9 @@ import re
 import errno
 from librarian import dcparser
 from librarian.parser import WLDocument
 import errno
 from librarian import dcparser
 from librarian.parser import WLDocument
+from lxml import etree
 import catalogue.models
 import catalogue.models
+from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook
 from multiprocessing.pool import ThreadPool
 from threading import current_thread
 import atexit
 from multiprocessing.pool import ThreadPool
 from threading import current_thread
 import atexit
@@ -167,11 +169,13 @@ class BaseIndex(IndexStore):
             analyzer = WLAnalyzer()
         self.analyzer = analyzer
 
             analyzer = WLAnalyzer()
         self.analyzer = analyzer
 
-    def open(self, analyzer=None):
+    def open(self, timeout=None):
         if self.index:
             raise Exception("Index is already opened")
         if self.index:
             raise Exception("Index is already opened")
-        self.index = IndexWriter(self.store, self.analyzer,\
-                                 IndexWriter.MaxFieldLength.LIMITED)
+        conf = IndexWriterConfig(Version.LUCENE_34, self.analyzer)
+        if timeout:
+            conf.setWriteLockTimeout(long(timeout))
+        self.index = IndexWriter(self.store, conf)
         return self.index
 
     def optimize(self):
         return self.index
 
     def optimize(self):
@@ -212,20 +216,38 @@ class Index(BaseIndex):
 
         for tag in catalogue.models.Tag.objects.all():
             doc = Document()
 
         for tag in catalogue.models.Tag.objects.all():
             doc = Document()
-            doc.add(NumericField("tag_id", Field.Store.YES, True).setIntValue(tag.id))
+            doc.add(NumericField("tag_id", Field.Store.YES, True).setIntValue(int(tag.id)))
             doc.add(Field("tag_name", tag.name, Field.Store.NO, Field.Index.ANALYZED))
             doc.add(Field("tag_name_pl", tag.name, Field.Store.NO, Field.Index.ANALYZED))
             doc.add(Field("tag_category", tag.category, Field.Store.NO, Field.Index.NOT_ANALYZED))
             self.index.addDocument(doc)
 
             doc.add(Field("tag_name", tag.name, Field.Store.NO, Field.Index.ANALYZED))
             doc.add(Field("tag_name_pl", tag.name, Field.Store.NO, Field.Index.ANALYZED))
             doc.add(Field("tag_category", tag.category, Field.Store.NO, Field.Index.NOT_ANALYZED))
             self.index.addDocument(doc)
 
+        for pdtag in PDCounterAuthor.objects.all():
+            doc = Document()
+            doc.add(NumericField("tag_id", Field.Store.YES, True).setIntValue(int(pdtag.id)))
+            doc.add(Field("tag_name", pdtag.name, Field.Store.NO, Field.Index.ANALYZED))
+            doc.add(Field("tag_name_pl", pdtag.name, Field.Store.NO, Field.Index.ANALYZED))
+            doc.add(Field("tag_category", 'pd_author', Field.Store.YES, Field.Index.NOT_ANALYZED))
+            doc.add(Field("is_pdcounter", 'true', Field.Store.YES, Field.Index.NOT_ANALYZED))
+            self.index.addDocument(doc)
+
+        for pdtag in PDCounterBook.objects.all():
+            doc = Document()
+            doc.add(NumericField("tag_id", Field.Store.YES, True).setIntValue(int(pdtag.id)))
+            doc.add(Field("tag_name", pdtag.title, Field.Store.NO, Field.Index.ANALYZED))
+            doc.add(Field("tag_name_pl", pdtag.title, Field.Store.NO, Field.Index.ANALYZED))
+            doc.add(Field("tag_category", 'pd_book', Field.Store.YES, Field.Index.NOT_ANALYZED))
+            doc.add(Field("is_pdcounter", 'true', Field.Store.YES, Field.Index.NOT_ANALYZED))
+            self.index.addDocument(doc)
+
     def create_book_doc(self, book):
         """
         Create a lucene document referring book id.
         """
         doc = Document()
     def create_book_doc(self, book):
         """
         Create a lucene document referring book id.
         """
         doc = Document()
-        doc.add(NumericField("book_id", Field.Store.YES, True).setIntValue(book.id))
+        doc.add(NumericField("book_id", Field.Store.YES, True).setIntValue(int(book.id)))
         if book.parent is not None:
         if book.parent is not None:
-            doc.add(NumericField("parent_id", Field.Store.YES, True).setIntValue(book.parent.id))
+            doc.add(NumericField("parent_id", Field.Store.YES, True).setIntValue(int(book.parent.id)))
         return doc
 
     def remove_book(self, book):
         return doc
 
     def remove_book(self, book):
@@ -251,11 +273,10 @@ class Index(BaseIndex):
                     book_doc.add(elem)
             else:
                 book_doc.add(f)
                     book_doc.add(elem)
             else:
                 book_doc.add(f)
-
         self.index.addDocument(book_doc)
         del book_doc
 
         self.index.addDocument(book_doc)
         del book_doc
 
-        self.index_content(book, book_fields=[meta_fields['title'], meta_fields['authors']])
+        self.index_content(book, book_fields=[meta_fields['title'], meta_fields['authors'], meta_fields['published_date']])
 
     master_tags = [
         'opowiadanie',
 
     master_tags = [
         'opowiadanie',
@@ -263,10 +284,21 @@ class Index(BaseIndex):
         'dramat_wierszowany_l',
         'dramat_wierszowany_lp',
         'dramat_wspolczesny', 'liryka_l', 'liryka_lp',
         'dramat_wierszowany_l',
         'dramat_wierszowany_lp',
         'dramat_wspolczesny', 'liryka_l', 'liryka_lp',
-        'wywiad'
+        'wywiad',
+        ]
+
+    ignore_content_tags = [
+        'uwaga', 'extra',
+        'zastepnik_tekstu', 'sekcja_asterysk', 'separator_linia', 'zastepnik_wersu',
+        'didaskalia',
+        'naglowek_aktu', 'naglowek_sceny', 'naglowek_czesc',
         ]
 
         ]
 
-    skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne']
+    footnote_tags = ['pa', 'pt', 'pr', 'pe']
+
+    skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne', '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF']
+
+    published_date_re = re.compile("([0-9]+)[\]. ]*$")
 
     def extract_metadata(self, book, book_info=None):
         """
 
     def extract_metadata(self, book, book_info=None):
         """
@@ -308,6 +340,15 @@ class Index(BaseIndex):
                     fields[field.name] = Field(field.name, "%04d%02d%02d" %\
                                                (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED)
 
                     fields[field.name] = Field(field.name, "%04d%02d%02d" %\
                                                (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED)
 
+        # get published date
+        pd = None
+        if hasattr(book_info, 'source_name') and book_info.source_name:
+            match = self.published_date_re.search(book_info.source_name)
+            if match is not None:
+                pd = str(match.groups()[0])
+        if not pd: pd = ""
+        fields["published_date"] = Field("published_date", pd, Field.Store.YES, Field.Index.NOT_ANALYZED)
+
         return fields
 
     def add_gaps(self, fields, fieldname):
         return fields
 
     def add_gaps(self, fields, fieldname):
@@ -340,15 +381,33 @@ class Index(BaseIndex):
         if master is None:
             return []
 
         if master is None:
             return []
 
-        def walker(node):
-            yield node, None
-            for child in list(node):
-                for b, e in walker(child):
-                    yield b, e
-            yield None, node
+        def walker(node, ignore_tags=[]):
+
+            if node.tag not in ignore_tags:
+                yield node, None, None
+                if node.text is not None:
+                    yield None, node.text, None
+                for child in list(node):
+                    for b, t, e in walker(child):
+                        yield b, t, e
+                yield None, None, node
+
+            if node.tail is not None:
+                yield None, node.tail, None
             return
 
         def fix_format(text):
             return
 
         def fix_format(text):
+            #            separator = [u" ", u"\t", u".", u";", u","]
+            if isinstance(text, list):
+                # need to join it first
+                text = filter(lambda s: s is not None, content)
+                text = u' '.join(text)
+                # for i in range(len(text)):
+                #     if i > 0:
+                #         if text[i][0] not in separator\
+                #             and text[i - 1][-1] not in separator:
+                #          text.insert(i, u" ")
+
             return re.sub("(?m)/$", "", text)
 
         def add_part(snippets, **fields):
             return re.sub("(?m)/$", "", text)
 
         def add_part(snippets, **fields):
@@ -401,24 +460,51 @@ class Index(BaseIndex):
 
                 if header.tag in self.skip_header_tags:
                     continue
 
                 if header.tag in self.skip_header_tags:
                     continue
+                if header.tag is etree.Comment:
+                    continue
 
 
-                content = u' '.join([t for t in header.itertext()])
-                content = fix_format(content)
-
-                doc = add_part(snippets, header_index=position, header_type=header.tag, content=content)
-
-                self.index.addDocument(doc)
-
-                for start, end in walker(header):
+                # section content
+                content = []
+                footnote = []
+
+                def all_content(text):
+                    for frag in fragments.values():
+                        frag['content'].append(text)
+                    content.append(text)
+                handle_text = [all_content]
+
+
+                for start, text, end in walker(header, ignore_tags=self.ignore_content_tags):
+                    # handle footnotes
+                    if start is not None and start.tag in self.footnote_tags:
+                        footnote = []
+                        def collect_footnote(t):
+                            footnote.append(t)
+                        handle_text.append(collect_footnote)
+                    elif end is not None and footnote is not [] and end.tag in self.footnote_tags:
+                        handle_text.pop()
+                        doc = add_part(snippets, header_index=position, header_type=header.tag,
+                                       content=u''.join(footnote),
+                                       is_footnote=Field("is_footnote", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED))
+                
+                        self.index.addDocument(doc)
+                        #print "@ footnote text: %s" % footnote
+                        footnote = []
+                    
+                    # handle fragments and themes.
                     if start is not None and start.tag == 'begin':
                         fid = start.attrib['id'][1:]
                         fragments[fid] = {'content': [], 'themes': [], 'start_section': position, 'start_header': header.tag}
                     if start is not None and start.tag == 'begin':
                         fid = start.attrib['id'][1:]
                         fragments[fid] = {'content': [], 'themes': [], 'start_section': position, 'start_header': header.tag}
-                        fragments[fid]['content'].append(start.tail)
+
+                    # themes for this fragment
                     elif start is not None and start.tag == 'motyw':
                         fid = start.attrib['id'][1:]
                     elif start is not None and start.tag == 'motyw':
                         fid = start.attrib['id'][1:]
+                        handle_text.append(None)
                         if start.text is not None:
                             fragments[fid]['themes'] += map(str.strip, map(give_me_utf8, start.text.split(',')))
                         if start.text is not None:
                             fragments[fid]['themes'] += map(str.strip, map(give_me_utf8, start.text.split(',')))
-                        fragments[fid]['content'].append(start.tail)
+                    elif end is not None and end.tag == 'motyw':
+                        handle_text.pop()
+
                     elif start is not None and start.tag == 'end':
                         fid = start.attrib['id'][1:]
                         if fid not in fragments:
                     elif start is not None and start.tag == 'end':
                         fid = start.attrib['id'][1:]
                         if fid not in fragments:
@@ -428,26 +514,30 @@ class Index(BaseIndex):
                             continue  # empty themes list.
                         del fragments[fid]
 
                             continue  # empty themes list.
                         del fragments[fid]
 
-                        def jstr(l):
-                            return u' '.join(map(
-                                lambda x: x == None and u'(none)' or unicode(x),
-                                l))
-
                         doc = add_part(snippets,
                                        header_type=frag['start_header'],
                                        header_index=frag['start_section'],
                                        header_span=position - frag['start_section'] + 1,
                                        fragment_anchor=fid,
                         doc = add_part(snippets,
                                        header_type=frag['start_header'],
                                        header_index=frag['start_section'],
                                        header_span=position - frag['start_section'] + 1,
                                        fragment_anchor=fid,
-                                       content=u' '.join(filter(lambda s: s is not None, frag['content'])),
+                                       content=fix_format(frag['content']),
                                        themes=frag['themes'])
                                        themes=frag['themes'])
-
+                        #print '@ FRAG %s' % frag['content']
                         self.index.addDocument(doc)
                         self.index.addDocument(doc)
-                    elif start is not None:
-                        for frag in fragments.values():
-                            frag['content'].append(start.text)
-                    elif end is not None:
-                        for frag in fragments.values():
-                            frag['content'].append(end.tail)
+
+                        # Collect content.
+
+                    if text is not None and handle_text is not []:
+                        hdl = handle_text[-1]
+                        if hdl is not None:
+                            hdl(text)
+
+                        # in the end, add a section text.
+                doc = add_part(snippets, header_index=position, header_type=header.tag,
+                               content=fix_format(content))
+                #print '@ CONTENT: %s' % fix_format(content)
+
+                self.index.addDocument(doc)
+
         finally:
             snippets.close()
 
         finally:
             snippets.close()
 
@@ -473,12 +563,12 @@ class ReusableIndex(Index):
     """
     index = None
 
     """
     index = None
 
-    def open(self, analyzer=None, threads=4):
-        if ReusableIndex.index is not None:
+    def open(self, analyzer=None, **kw):
+        if ReusableIndex.index:
             self.index = ReusableIndex.index
         else:
             print("opening index")
             self.index = ReusableIndex.index
         else:
             print("opening index")
-            Index.open(self, analyzer)
+            Index.open(self, analyzer, **kw)
             ReusableIndex.index = self.index
             atexit.register(ReusableIndex.close_reusable)
 
             ReusableIndex.index = self.index
             atexit.register(ReusableIndex.close_reusable)
 
@@ -488,13 +578,15 @@ class ReusableIndex(Index):
 
     @staticmethod
     def close_reusable():
 
     @staticmethod
     def close_reusable():
-        if ReusableIndex.index is not None:
+        if ReusableIndex.index:
+            print("closing index")
             ReusableIndex.index.optimize()
             ReusableIndex.index.close()
             ReusableIndex.index = None
 
     def close(self):
             ReusableIndex.index.optimize()
             ReusableIndex.index.close()
             ReusableIndex.index = None
 
     def close(self):
-        pass
+        if ReusableIndex.index:
+            ReusableIndex.index.commit()
 
 
 class JoinSearch(object):
 
 
 class JoinSearch(object):
@@ -541,38 +633,57 @@ class JoinSearch(object):
 
 
 class SearchResult(object):
 
 
 class SearchResult(object):
-    def __init__(self, searcher, scoreDocs, score=None, how_found=None, snippets=None):
+    def __init__(self, search, scoreDocs, score=None, how_found=None, snippets=None, searched=None, tokens_cache=None):
+        if tokens_cache is None: tokens_cache = {}
+
         if score:
         if score:
-            self.score = score
+            self._score = score
         else:
         else:
-            self.score = scoreDocs.score
+            self._score = scoreDocs.score
+
+        self.boost = 1.0
 
         self._hits = []
 
         self._hits = []
-        self.hits = None  # processed hits
+        self._processed_hits = None  # processed hits
 
 
-        stored = searcher.doc(scoreDocs.doc)
+        stored = search.searcher.doc(scoreDocs.doc)
         self.book_id = int(stored.get("book_id"))
 
         self.book_id = int(stored.get("book_id"))
 
+        pd = stored.get("published_date")
+        try:
+            self.published_date = int(pd)
+        except ValueError:
+            self.published_date = 0
+
         header_type = stored.get("header_type")
         header_type = stored.get("header_type")
-        if not header_type:
-            return
+        # we have a content hit in some header of fragment
+        if header_type is not None:
+            sec = (header_type, int(stored.get("header_index")))
+            header_span = stored.get('header_span')
+            header_span = header_span is not None and int(header_span) or 1
+
+            fragment = stored.get("fragment_anchor")
 
 
-        sec = (header_type, int(stored.get("header_index")))
-        header_span = stored.get('header_span')
-        header_span = header_span is not None and int(header_span) or 1
+            if snippets:
+                snippets = snippets.replace("/\n", "\n")
+            hit = (sec + (header_span,), fragment, scoreDocs.score, {'how_found': how_found, 'snippets': snippets and [snippets] or []})
 
 
-        fragment = stored.get("fragment_anchor")
+            self._hits.append(hit)
 
 
-        hit = (sec + (header_span,), fragment, scoreDocs.score, {'how_found': how_found, 'snippets': snippets and [snippets] or []})
+        self.search = search
+        self.searched = searched
+        self.tokens_cache = tokens_cache
 
 
-        self._hits.append(hit)
+    @property
+    def score(self):
+        return self._score * self.boost
 
     def merge(self, other):
         if self.book_id != other.book_id:
             raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id))
         self._hits += other._hits
         if other.score > self.score:
 
     def merge(self, other):
         if self.book_id != other.book_id:
             raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id))
         self._hits += other._hits
         if other.score > self.score:
-            self.score = other.score
+            self._score = other._score
         return self
 
     def get_book(self):
         return self
 
     def get_book(self):
@@ -580,7 +691,11 @@ class SearchResult(object):
 
     book = property(get_book)
 
 
     book = property(get_book)
 
-    def process_hits(self):
+    @property
+    def hits(self):
+        if self._processed_hits is not None:
+            return self._processed_hits
+
         POSITION = 0
         FRAGMENT = 1
         POSITION_INDEX = 1
         POSITION = 0
         FRAGMENT = 1
         POSITION_INDEX = 1
@@ -627,19 +742,39 @@ class SearchResult(object):
         hits = sections.values()
 
         for f in frags:
         hits = sections.values()
 
         for f in frags:
-            frag = catalogue.models.Fragment.objects.get(anchor=f[FRAGMENT])
+            try:
+                frag = catalogue.models.Fragment.objects.get(anchor=f[FRAGMENT], book__id=self.book_id)
+            except catalogue.models.Fragment.DoesNotExist:
+                # stale index
+                continue
+
+            # Figure out if we were searching for a token matching some word in theme name.
+            themes = frag.tags.filter(category='theme')
+            themes_hit = []
+            if self.searched is not None:
+                tokens = self.search.get_tokens(self.searched, 'POLISH', cached=self.tokens_cache)
+                for theme in themes:
+                    name_tokens = self.search.get_tokens(theme.name, 'POLISH')
+                    for t in tokens:
+                        if t in name_tokens:
+                            if not theme in themes_hit:
+                                themes_hit.append(theme)
+                            break
+
             m = {'score': f[SCORE],
                  'fragment': frag,
             m = {'score': f[SCORE],
                  'fragment': frag,
-                 'themes': frag.tags.filter(category='theme')
+                 'section_number': f[POSITION][POSITION_INDEX] + 1,
+                 'themes': themes,
+                 'themes_hit': themes_hit
                  }
             m.update(f[OTHER])
             hits.append(m)
 
         hits.sort(lambda a, b: cmp(a['score'], b['score']), reverse=True)
 
                  }
             m.update(f[OTHER])
             hits.append(m)
 
         hits.sort(lambda a, b: cmp(a['score'], b['score']), reverse=True)
 
-        self.hits = hits
+        self._processed_hits = hits
 
 
-        return self
+        return hits
 
     def __unicode__(self):
         return u'SearchResult(book_id=%d, score=%d)' % (self.book_id, self.score)
 
     def __unicode__(self):
         return u'SearchResult(book_id=%d, score=%d)' % (self.book_id, self.score)
@@ -657,7 +792,12 @@ class SearchResult(object):
         return books.values()
 
     def __cmp__(self, other):
         return books.values()
 
     def __cmp__(self, other):
-        return cmp(self.score, other.score)
+        c = cmp(self.score, other.score)
+        if c == 0:
+            # this is inverted, because earlier date is better
+            return cmp(other.published_date, self.published_date)
+        else:
+            return c
 
 
 class Hint(object):
 
 
 class Hint(object):
@@ -787,11 +927,14 @@ class Search(IndexStore):
             bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
         return (bks, tops.totalHits)
 
             bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
         return (bks, tops.totalHits)
 
-    def get_tokens(self, searched, field='content'):
+    def get_tokens(self, searched, field='content', cached=None):
         """returns tokens analyzed by a proper (for a field) analyzer
         argument can be: StringReader, string/unicode, or tokens. In the last case
         they will just be returned (so we can reuse tokens, if we don't change the analyzer)
         """
         """returns tokens analyzed by a proper (for a field) analyzer
         argument can be: StringReader, string/unicode, or tokens. In the last case
         they will just be returned (so we can reuse tokens, if we don't change the analyzer)
         """
+        if cached is not None and field in cached:
+            return cached[field]
+
         if isinstance(searched, str) or isinstance(searched, unicode):
             searched = StringReader(searched)
         elif isinstance(searched, list):
         if isinstance(searched, str) or isinstance(searched, unicode):
             searched = StringReader(searched)
         elif isinstance(searched, list):
@@ -803,6 +946,10 @@ class Search(IndexStore):
         while tokens.incrementToken():
             cta = tokens.getAttribute(CharTermAttribute.class_)
             toks.append(cta.toString())
         while tokens.incrementToken():
             cta = tokens.getAttribute(CharTermAttribute.class_)
             toks.append(cta.toString())
+
+        if cached is not None:
+            cached[field] = toks
+
         return toks
 
     def fuzziness(self, fuzzy):
         return toks
 
     def fuzziness(self, fuzzy):
@@ -859,9 +1006,40 @@ class Search(IndexStore):
             q.add(BooleanClause(term, modal))
         return q
 
             q.add(BooleanClause(term, modal))
         return q
 
-    # def content_query(self, query):
-    #     return BlockJoinQuery(query, self.parent_filter,
-    #                           BlockJoinQuery.ScoreMode.Total)
+    def search_phrase(self, searched, field, book=True, max_results=20, fuzzy=False,
+                      filters=None, tokens_cache=None, boost=None, snippets=False, slop=2):
+        if filters is None: filters = []
+        if tokens_cache is None: tokens_cache = {}
+
+        tokens = self.get_tokens(searched, field, cached=tokens_cache)
+
+        query = self.make_phrase(tokens, field=field, fuzzy=fuzzy, slop=slop)
+        if book:
+            filters.append(self.term_filter(Term('is_book', 'true')))
+        top = self.searcher.search(query, self.chain_filters(filters), max_results)
+
+        return [SearchResult(self, found, snippets=(snippets and self.get_snippets(found, query) or None), searched=searched) for found in top.scoreDocs]
+
+    def search_some(self, searched, fields, book=True, max_results=20, fuzzy=False,
+                    filters=None, tokens_cache=None, boost=None, snippets=True):
+        if filters is None: filters = []
+        if tokens_cache is None: tokens_cache = {}
+
+        if book:
+            filters.append(self.term_filter(Term('is_book', 'true')))
+
+        query = BooleanQuery()
+
+        for fld in fields:
+            tokens = self.get_tokens(searched, fld, cached=tokens_cache)
+
+            query.add(BooleanClause(self.make_term_query(tokens, field=fld,
+                                fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
+
+        top = self.searcher.search(query, self.chain_filters(filters), max_results)
+
+        return [SearchResult(self, found, searched=searched, tokens_cache=tokens_cache,
+                             snippets=(snippets and self.get_snippets(found, query) or None)) for found in top.scoreDocs]
 
     def search_perfect_book(self, searched, max_results=20, fuzzy=False, hint=None):
         """
 
     def search_perfect_book(self, searched, max_results=20, fuzzy=False, hint=None):
         """
@@ -884,7 +1062,7 @@ class Search(IndexStore):
                 self.chain_filters([only_in, self.term_filter(Term('is_book', 'true'))]),
                 max_results)
             for found in top.scoreDocs:
                 self.chain_filters([only_in, self.term_filter(Term('is_book', 'true'))]),
                 max_results)
             for found in top.scoreDocs:
-                books.append(SearchResult(self.searcher, found))
+                books.append(SearchResult(self, found, how_found="search_perfect_book"))
         return books
 
     def search_book(self, searched, max_results=20, fuzzy=False, hint=None):
         return books
 
     def search_book(self, searched, max_results=20, fuzzy=False, hint=None):
@@ -910,13 +1088,13 @@ class Search(IndexStore):
                                    self.chain_filters([only_in, self.term_filter(Term('is_book', 'true'))]),
             max_results)
         for found in top.scoreDocs:
                                    self.chain_filters([only_in, self.term_filter(Term('is_book', 'true'))]),
             max_results)
         for found in top.scoreDocs:
-            books.append(SearchResult(self.searcher, found))
+            books.append(SearchResult(self, found, how_found="search_book"))
 
         return books
 
     def search_perfect_parts(self, searched, max_results=20, fuzzy=False, hint=None):
         """
 
         return books
 
     def search_perfect_parts(self, searched, max_results=20, fuzzy=False, hint=None):
         """
-        Search for book parts which containt a phrase perfectly matching (with a slop of 2, default for make_phrase())
+        Search for book parts which contains a phrase perfectly matching (with a slop of 2, default for make_phrase())
         some part/fragment of the book.
         """
         qrys = [self.make_phrase(self.get_tokens(searched), field=fld, fuzzy=fuzzy) for fld in ['content']]
         some part/fragment of the book.
         """
         qrys = [self.make_phrase(self.get_tokens(searched), field=fld, fuzzy=fuzzy) for fld in ['content']]
@@ -932,16 +1110,17 @@ class Search(IndexStore):
                                                            flt]),
                                        max_results)
             for found in top.scoreDocs:
                                                            flt]),
                                        max_results)
             for found in top.scoreDocs:
-                books.append(SearchResult(self.searcher, found, snippets=self.get_snippets(found, q)))
+                books.append(SearchResult(self, found, snippets=self.get_snippets(found, q), how_found='search_perfect_parts'))
 
         return books
 
 
         return books
 
-    def search_everywhere(self, searched, max_results=20, fuzzy=False, hint=None):
+    def search_everywhere(self, searched, max_results=20, fuzzy=False, hint=None, tokens_cache=None):
         """
         Tries to use search terms to match different fields of book (or its parts).
         E.g. one word can be an author survey, another be a part of the title, and the rest
         are some words from third chapter.
         """
         """
         Tries to use search terms to match different fields of book (or its parts).
         E.g. one word can be an author survey, another be a part of the title, and the rest
         are some words from third chapter.
         """
+        if tokens_cache is None: tokens_cache = {}
         books = []
         only_in = None
 
         books = []
         only_in = None
 
@@ -951,8 +1130,8 @@ class Search(IndexStore):
         # content only query : themes x content
         q = BooleanQuery()
 
         # content only query : themes x content
         q = BooleanQuery()
 
-        tokens_pl = self.get_tokens(searched, field='content')
-        tokens = self.get_tokens(searched, field='SIMPLE')
+        tokens_pl = self.get_tokens(searched, field='content', cached=tokens_cache)
+        tokens = self.get_tokens(searched, field='SIMPLE', cached=tokens_cache)
 
         # only search in themes when we do not already filter by themes
         if hint is None or hint.just_search_in(['themes']) != []:
 
         # only search in themes when we do not already filter by themes
         if hint is None or hint.just_search_in(['themes']) != []:
@@ -964,7 +1143,7 @@ class Search(IndexStore):
 
         topDocs = self.searcher.search(q, only_in, max_results)
         for found in topDocs.scoreDocs:
 
         topDocs = self.searcher.search(q, only_in, max_results)
         for found in topDocs.scoreDocs:
-            books.append(SearchResult(self.searcher, found))
+            books.append(SearchResult(self, found, how_found='search_everywhere_themesXcontent', searched=searched))
             print "* %s theme x content: %s" % (searched, books[-1]._hits)
 
         # query themes/content x author/title/tags
             print "* %s theme x content: %s" % (searched, books[-1]._hits)
 
         # query themes/content x author/title/tags
@@ -983,7 +1162,7 @@ class Search(IndexStore):
 
         topDocs = self.searcher.search(q, only_in, max_results)
         for found in topDocs.scoreDocs:
 
         topDocs = self.searcher.search(q, only_in, max_results)
         for found in topDocs.scoreDocs:
-            books.append(SearchResult(self.searcher, found))
+            books.append(SearchResult(self, found, how_found='search_everywhere', searched=searched))
             print "* %s scatter search: %s" % (searched, books[-1]._hits)
 
         return books
             print "* %s scatter search: %s" % (searched, books[-1]._hits)
 
         return books
@@ -1039,18 +1218,30 @@ class Search(IndexStore):
 
         stored = self.searcher.doc(scoreDoc.doc)
 
 
         stored = self.searcher.doc(scoreDoc.doc)
 
+        position = stored.get('snippets_position')
+        length = stored.get('snippets_length')
+        if position is None or length is None:
+            return None
         # locate content.
         # locate content.
-        snippets = Snippets(stored.get('book_id')).open()
+        book_id = int(stored.get('book_id'))
+        snippets = Snippets(book_id).open()
         try:
         try:
-            text = snippets.get((int(stored.get('snippets_position')),
-                                 int(stored.get('snippets_length'))))
-        finally:
-            snippets.close()
+            try:
+                text = snippets.get((int(position),
+                                     int(length)))
+            finally:
+                snippets.close()
 
 
-        tokenStream = TokenSources.getAnyTokenStream(self.searcher.getIndexReader(), scoreDoc.doc, field, self.analyzer)
-        #  highlighter.getBestTextFragments(tokenStream, text, False, 10)
-        snip = highlighter.getBestFragments(tokenStream, text, 3, "...")
+            tokenStream = TokenSources.getAnyTokenStream(self.searcher.getIndexReader(), scoreDoc.doc, field, self.analyzer)
+            #  highlighter.getBestTextFragments(tokenStream, text, False, 10)
+            snip = highlighter.getBestFragments(tokenStream, text, 3, "...")
 
 
+        except Exception, e:
+            e2 = e
+            if hasattr(e, 'getJavaException'):
+                e2 = unicode(e.getJavaException())
+            raise Exception("Problem fetching snippets for book %d, @%d len=%d" % (book_id, int(position), int(length)),
+                e2)
         return snip
 
     @staticmethod
         return snip
 
     @staticmethod
@@ -1070,19 +1261,34 @@ class Search(IndexStore):
         if terms:
             return JArray('object')(terms, Term)
 
         if terms:
             return JArray('object')(terms, Term)
 
-    def search_tags(self, query, filter=None, max_results=40):
+    def search_tags(self, query, filters=None, max_results=40, pdcounter=False):
         """
         Search for Tag objects using query.
         """
         """
         Search for Tag objects using query.
         """
-        tops = self.searcher.search(query, filter, max_results)
+        if not pdcounter:
+            filters = self.chain_filters([filter, self.term_filter(Term('is_pdcounter', 'true'), inverse=True)])
+        tops = self.searcher.search(query, filters, max_results)
 
         tags = []
         for found in tops.scoreDocs:
             doc = self.searcher.doc(found.doc)
 
         tags = []
         for found in tops.scoreDocs:
             doc = self.searcher.doc(found.doc)
-            tag = catalogue.models.Tag.objects.get(id=doc.get("tag_id"))
-            tags.append(tag)
-            print "%s (%d) -> %f" % (tag, tag.id, found.score)
-
+            is_pdcounter = doc.get('is_pdcounter')
+            category = doc.get('tag_category')
+            if is_pdcounter == 'true':
+                if category == 'pd_author':
+                    tag = PDCounterAuthor.objects.get(id=doc.get('tag_id'))
+                elif category == 'pd_book':
+                    tag = PDCounterBook.objects.get(id=doc.get('tag_id'))
+                    tag.category = 'pd_book'  # make it look more lik a tag.
+                else:
+                    print "Warning. cannot get pdcounter tag_id=%d from db; cat=%s" % (int(doc.get('tag_id')), category)
+            else:
+                tag = catalogue.models.Tag.objects.get(id=doc.get("tag_id"))
+                # don't add the pdcounter tag if same tag already exists
+            if not (is_pdcounter and filter(lambda t: tag.slug == t.slug, tags)):
+                tags.append(tag)
+                #            print "%s (%d) -> %f" % (tag, tag.id, found.score)
+        print 'returning %s' % tags
         return tags
 
     def search_books(self, query, filter=None, max_results=10):
         return tags
 
     def search_books(self, query, filter=None, max_results=10):
@@ -1096,7 +1302,7 @@ class Search(IndexStore):
             bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
         return bks
 
             bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
         return bks
 
-    def create_prefix_phrase(self, toks, field):
+    def make_prefix_phrase(self, toks, field):
         q = MultiPhraseQuery()
         for i in range(len(toks)):
             t = Term(field, toks[i])
         q = MultiPhraseQuery()
         for i in range(len(toks)):
             t = Term(field, toks[i])
@@ -1122,7 +1328,7 @@ class Search(IndexStore):
 
         return only_term
 
 
         return only_term
 
-    def hint_tags(self, string, max_results=50):
+    def hint_tags(self, string, max_results=50, pdcounter=True, prefix=True, fuzzy=False):
         """
         Return auto-complete hints for tags
         using prefix search.
         """
         Return auto-complete hints for tags
         using prefix search.
@@ -1131,14 +1337,17 @@ class Search(IndexStore):
         top = BooleanQuery()
 
         for field in ['tag_name', 'tag_name_pl']:
         top = BooleanQuery()
 
         for field in ['tag_name', 'tag_name_pl']:
-            q = self.create_prefix_phrase(toks, field)
+            if prefix:
+                q = self.make_prefix_phrase(toks, field)
+            else:
+                q = self.make_term_query(toks, field, fuzzy=fuzzy)
             top.add(BooleanClause(q, BooleanClause.Occur.SHOULD))
 
         no_book_cat = self.term_filter(Term("tag_category", "book"), inverse=True)
 
             top.add(BooleanClause(q, BooleanClause.Occur.SHOULD))
 
         no_book_cat = self.term_filter(Term("tag_category", "book"), inverse=True)
 
-        return self.search_tags(top, no_book_cat, max_results=max_results)
+        return self.search_tags(top, no_book_cat, max_results=max_results, pdcounter=pdcounter)
 
 
-    def hint_books(self, string, max_results=50):
+    def hint_books(self, string, max_results=50, prefix=True, fuzzy=False):
         """
         Returns auto-complete hints for book titles
         Because we do not index 'pseudo' title-tags.
         """
         Returns auto-complete hints for book titles
         Because we do not index 'pseudo' title-tags.
@@ -1146,7 +1355,10 @@ class Search(IndexStore):
         """
         toks = self.get_tokens(string, field='SIMPLE')
 
         """
         toks = self.get_tokens(string, field='SIMPLE')
 
-        q = self.create_prefix_phrase(toks, 'title')
+        if prefix:
+            q = self.make_prefix_phrase(toks, 'title')
+        else:
+            q = self.make_term_query(toks, 'title', fuzzy=fuzzy)
 
         return self.search_books(q, self.term_filter(Term("is_book", "true")), max_results=max_results)
 
 
         return self.search_books(q, self.term_filter(Term("is_book", "true")), max_results=max_results)
 
@@ -1156,7 +1368,7 @@ class Search(IndexStore):
         Chains a filter list together
         """
         filters = filter(lambda x: x is not None, filters)
         Chains a filter list together
         """
         filters = filter(lambda x: x is not None, filters)
-        if not filters:
+        if not filters or filters is []:
             return None
         chf = ChainedFilter(JArray('object')(filters, Filter), op)
         return chf
             return None
         chf = ChainedFilter(JArray('object')(filters, Filter), op)
         return chf