fix snippet display
[wolnelektury.git] / apps / search / index.py
index a19dd69..557f404 100644 (file)
@@ -1,88 +1,38 @@
 # -*- coding: utf-8 -*-
 
 from django.conf import settings
-from lucene import SimpleFSDirectory, IndexWriter, CheckIndex, \
-    File, Field, \
-    NumericField, Version, Document, JavaError, IndexSearcher, \
-    QueryParser, PerFieldAnalyzerWrapper, \
-    SimpleAnalyzer, PolishAnalyzer, ArrayList, \
-    KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \
-    BlockJoinQuery, BlockJoinCollector, TermsFilter, \
-    HashSet, BooleanClause, Term, CharTermAttribute, \
-    PhraseQuery, MultiPhraseQuery, StringReader, TermQuery, BlockJoinQuery, \
-    FuzzyQuery, FuzzyTermEnum, Sort, Integer, \
-    SimpleHTMLFormatter, Highlighter, QueryScorer, TokenSources, TextFragment, \
-    initVM, CLASSPATH, JArray, JavaError
-    # KeywordAnalyzer
-
-# Initialize jvm
-JVM = initVM(classpath=CLASSPATH, maxheap=str(400*1024*1024))
-
-import sys
+
 import os
 import re
 import errno
 from librarian import dcparser
 from librarian.parser import WLDocument
+from lxml import etree
 import catalogue.models
-from multiprocessing.pool import ThreadPool
-from threading import current_thread
-import atexit
+from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook
+from itertools import chain
 import traceback
+import logging
+log = logging.getLogger('search')
+import sunburnt
+import custom
+import operator
 
 
-class WLAnalyzer(PerFieldAnalyzerWrapper):
-    def __init__(self):
-        polish = PolishAnalyzer(Version.LUCENE_34)
-        simple = SimpleAnalyzer(Version.LUCENE_34)
-        keyword = KeywordAnalyzer(Version.LUCENE_34)
-        # not sure if needed: there's NOT_ANALYZED meaning basically the same
-
-        PerFieldAnalyzerWrapper.__init__(self, polish)
-
-        self.addAnalyzer("tags", simple)
-        self.addAnalyzer("technical_editors", simple)
-        self.addAnalyzer("editors", simple)
-        self.addAnalyzer("url", keyword)
-        self.addAnalyzer("source_url", keyword)
-        self.addAnalyzer("source_name", simple)
-        self.addAnalyzer("publisher", simple)
-        self.addAnalyzer("author", simple)
-        self.addAnalyzer("is_book", keyword)
-
-        self.addAnalyzer("KEYWORD", keyword)
-        self.addAnalyzer("SIMPLE", simple)
-        self.addAnalyzer("POLISH", polish)
-
-
-class IndexStore(object):
-    def __init__(self):
-        self.make_index_dir()
-        self.store = SimpleFSDirectory(File(settings.SEARCH_INDEX))
-
-    def make_index_dir(self):
-        try:
-            os.makedirs(settings.SEARCH_INDEX)
-        except OSError as exc:
-            if exc.errno == errno.EEXIST:
-                pass
-            else: raise
-
-
-class IndexChecker(IndexStore):
-    def __init__(self):
-        IndexStore.__init__(self)
-
-    def check(self):
-        checker = CheckIndex(self.store)
-        status = checker.checkIndex()
-        return status
+class SolrIndex(object):
+    def __init__(self, mode=None):
+        self.index = custom.CustomSolrInterface(settings.SOLR, mode=mode)
 
 
 class Snippets(object):
+    """
+    This class manages snippet files for indexed object (book)
+    the snippets are concatenated together, and their positions and
+    lengths are kept in lucene index fields.
+    """
     SNIPPET_DIR = "snippets"
 
-    def __init__(self, book_id):
+    def __init__(self, book_id, revision=None):
         try:
             os.makedirs(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR))
         except OSError as exc:
@@ -90,75 +40,226 @@ class Snippets(object):
                 pass
             else: raise
         self.book_id = book_id
+        self.revision = revision
         self.file = None
 
+    @property
+    def path(self):
+        if self.revision: fn = "%d.%d" % (self.book_id, self.revision)
+        else: fn = "%d" % self.book_id
+
+        return os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR, fn)
+
     def open(self, mode='r'):
+        """
+        Open the snippet file. Call .close() afterwards.
+        """
         if not 'b' in mode:
             mode += 'b'
-        self.file = open(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR, str(self.book_id)), mode)
+
+        if 'w' in mode:
+            if os.path.exists(self.path):
+                self.revision = 1
+                while True:
+                    if not os.path.exists(self.path):
+                        break
+                    self.revision += 1
+
+        self.file = open(self.path, mode)
         self.position = 0
         return self
 
     def add(self, snippet):
-        l = len(snippet)
-        self.file.write(snippet.encode('utf-8'))
+        """
+        Append a snippet (unicode) to the snippet file.
+        Return a (position, length) tuple
+        """
+        txt = snippet.encode('utf-8')
+        l = len(txt)
+        self.file.write(txt)
         pos = (self.position, l)
         self.position += l
         return pos
 
     def get(self, pos):
+        """
+        Given a tuple of (position, length) return an unicode
+        of the snippet stored there.
+        """
         self.file.seek(pos[0], 0)
-        return self.read(pos[1]).decode('utf-8')
+        txt = self.file.read(pos[1]).decode('utf-8')
+        return txt
 
     def close(self):
+        """Close snippet file"""
         self.file.close()
 
+    def remove(self):
+        self.revision = None
+        try:
+            os.unlink(self.path)
+            self.revision = 0
+            while True:
+                self.revision += 1
+                os.unlink(self.path)
+        except OSError:
+            pass
 
-class Index(IndexStore):
-    def __init__(self, analyzer=None):
-        IndexStore.__init__(self)
-        self.index = None
-        if not analyzer:
-            analyzer = WLAnalyzer()
-        self.analyzer = analyzer
 
-    def open(self, analyzer=None):
-        if self.index:
-            raise Exception("Index is already opened")
-        self.index = IndexWriter(self.store, self.analyzer,\
-                                 IndexWriter.MaxFieldLength.LIMITED)
-        return self.index
+class Index(SolrIndex):
+    """
+    Class indexing books.
+    """
+    def __init__(self):
+        super(Index, self).__init__(mode='rw')
 
-    def optimize(self):
-        self.index.optimize()
+    def delete_query(self, *queries):
+        """
+        index.delete(queries=...) doesn't work, so let's reimplement it
+        using deletion of list of uids.
+        """
+        uids = set()
+        for q in queries:
+            if isinstance(q, sunburnt.search.LuceneQuery):
+                q = self.index.query(q)
+            q.field_limiter.update(['uid'])
+            st = 0
+            rows = 100
+            while True:
+                ids = q.paginate(start=st, rows=rows).execute()
+                if not len(ids):
+                    break
+                for res in ids:
+                    uids.add(res['uid'])
+                st += rows
+                #        print "Will delete %s" % ','.join([x for x in uids])
+        if uids:
+            self.index.delete(uids)
+            return True
+        else:
+            return False
 
-    def close(self):
-        try:
-            self.index.optimize()
-        except JavaError, je:
-            print "Error during optimize phase, check index: %s" % je
+    def index_tags(self, *tags, **kw):
+        """
+        Re-index global tag list.
+        Removes all tags from index, then index them again.
+        Indexed fields include: id, name (with and without polish stems), category
+        """
+        remove_only = kw.get('remove_only', False)
+        # first, remove tags from index.
+        if tags:
+            tag_qs = []
+            for tag in tags:
+                q_id = self.index.Q(tag_id=tag.id)
+
+                if isinstance(tag, PDCounterAuthor):
+                    q_cat = self.index.Q(tag_category='pd_author')
+                elif isinstance(tag, PDCounterBook):
+                    q_cat = self.index.Q(tag_category='pd_book')
+                else:
+                    q_cat = self.index.Q(tag_category=tag.category)
+
+                q_id_cat = self.index.Q(q_id & q_cat)
+                tag_qs.append(q_id_cat)
+            self.delete_query(tag_qs)
+        else:  # all
+            q = self.index.Q(tag_id__any=True)
+            self.delete_query(q)
+
+        if not remove_only:
+            # then add them [all or just one passed]
+            if not tags:
+                tags = chain(catalogue.models.Tag.objects.exclude(category='set'), \
+                    PDCounterAuthor.objects.all(), \
+                    PDCounterBook.objects.all())
+
+            for tag in tags:
+                if isinstance(tag, PDCounterAuthor):
+                    doc = {
+                        "tag_id": int(tag.id),
+                        "tag_name": tag.name,
+                        "tag_name_pl": tag.name,
+                        "tag_category": 'pd_author',
+                        "is_pdcounter": True,
+                        "uid": "tag%d_pd_a" % tag.id
+                        }
+                elif isinstance(tag, PDCounterBook):
+                    doc = {
+                        "tag_id": int(tag.id),
+                        "tag_name": tag.title,
+                        "tag_name_pl": tag.title,
+                        "tag_category": 'pd_book',
+                        "is_pdcounter": True,
+                        "uid": "tag%d_pd_b" % tag.id
+                        }
+                else:
+                    doc = {
+                        "tag_id": int(tag.id),
+                        "tag_name": tag.name,
+                        "tag_name_pl": tag.name,
+                        "tag_category": tag.category,
+                        "is_pdcounter": False,
+                        "uid": "tag%d" % tag.id
+                        }
+                self.index.add(doc)
 
-        self.index.close()
-        self.index = None
+    def create_book_doc(self, book):
+        """
+        Create a lucene document referring book id.
+        """
+        doc = {
+            'book_id': int(book.id),
+            }
+        if book.parent is not None:
+            doc["parent_id"] = int(book.parent.id)
+        return doc
 
-    def remove_book(self, book):
-        q = NumericRangeQuery.newIntRange("book_id", book.id, book.id, True, True)
-        self.index.deleteDocuments(q)
+    def remove_book(self, book_or_id, remove_snippets=True):
+        """Removes a book from search index.
+        book - Book instance."""
+        if isinstance(book_or_id, catalogue.models.Book):
+            book_id = book_or_id.id
+        else:
+            book_id = book_or_id
 
-    def index_book(self, book, overwrite=True):
+        self.delete_query(self.index.Q(book_id=book_id))
+
+        if remove_snippets:
+            snippets = Snippets(book_id)
+            snippets.remove()
+
+    def index_book(self, book, book_info=None, overwrite=True):
+        """
+        Indexes the book.
+        Creates a lucene document for extracted metadata
+        and calls self.index_content() to index the contents of the book.
+        """
         if overwrite:
-            self.remove_book(book)
-
-        doc = self.extract_metadata(book)
-        parts = self.extract_content(book)
-        block = ArrayList().of_(Document)
-        
-        print "adding block."
-        for p in parts:
-            block.add(p)
-        block.add(doc)
-        self.index.addDocuments(block)
-        print "added."
+            # we don't remove snippets, since they might be still needed by
+            # threads using not reopened index
+            self.remove_book(book, remove_snippets=False)
+
+        book_doc = self.create_book_doc(book)
+        meta_fields = self.extract_metadata(book, book_info, dc_only=['source_name', 'authors', 'title'])
+        # let's not index it - it's only used for extracting publish date
+        if 'source_name' in meta_fields:
+            del meta_fields['source_name']
+
+        for n, f in meta_fields.items():
+            book_doc[n] = f
+
+        book_doc['uid'] = "book%s" % book_doc['book_id']
+        self.index.add(book_doc)
+        del book_doc
+        book_fields = {
+            'title': meta_fields['title'],
+            'authors': meta_fields['authors'],
+            'published_date': meta_fields['published_date']
+            }
+        if 'translators' in meta_fields:
+            book_fields['translators'] = meta_fields['translators']
+
+        self.index_content(book, book_fields=book_fields)
 
     master_tags = [
         'opowiadanie',
@@ -166,33 +267,39 @@ class Index(IndexStore):
         'dramat_wierszowany_l',
         'dramat_wierszowany_lp',
         'dramat_wspolczesny', 'liryka_l', 'liryka_lp',
-        'wywiad'
+        'wywiad',
+        ]
+
+    ignore_content_tags = [
+        'uwaga', 'extra',
+        'zastepnik_tekstu', 'sekcja_asterysk', 'separator_linia', 'zastepnik_wersu',
+        'didaskalia',
+        'naglowek_aktu', 'naglowek_sceny', 'naglowek_czesc',
         ]
 
-    skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne']
+    footnote_tags = ['pa', 'pt', 'pr', 'pe']
 
-    def create_book_doc(self, book):
+    skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne', '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF']
+
+    published_date_re = re.compile("([0-9]+)[\]. ]*$")
+
+    def extract_metadata(self, book, book_info=None, dc_only=None):
         """
-        Create a lucene document connected to the book
+        Extract metadata from book and returns a map of fields keyed by fieldname
         """
-        doc = Document()
-        doc.add(NumericField("book_id", Field.Store.YES, True).setIntValue(book.id))
-        if book.parent is not None:
-            doc.add(NumericField("parent_id", Field.Store.YES, True).setIntValue(book.parent.id))
-        return doc
+        fields = {}
 
-    def extract_metadata(self, book):
-        book_info = dcparser.parse(book.xml_file)
+        if book_info is None:
+            book_info = dcparser.parse(open(book.xml_file.path))
 
-        print("extract metadata for book %s id=%d, thread%d" % (book.slug, book.id, current_thread().ident))
-
-        doc = self.create_book_doc(book)
-        doc.add(Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS))
-        doc.add(Field("tags", ','.join([t.name for t in book.tags]), Field.Store.NO, Field.Index.ANALYZED))
-        doc.add(Field("is_book", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED))
+        fields['slug'] = book.slug
+        fields['tags'] = [t.name  for t in book.tags]
+        fields['is_book'] = True
 
         # validator, name
         for field in dcparser.BookInfo.FIELDS:
+            if dc_only and field.name not in dc_only:
+                continue
             if hasattr(book_info, field.name):
                 if not getattr(book_info, field.name):
                     continue
@@ -202,345 +309,379 @@ class Index(IndexStore):
                     s = getattr(book_info, field.name)
                     if field.multiple:
                         s = ', '.join(s)
-                    try:
-                        doc.add(Field(field.name, s, Field.Store.NO, Field.Index.ANALYZED))
-                    except JavaError as je:
-                        raise Exception("failed to add field: %s = '%s', %s(%s)" % (field.name, s, je.message, je.args))
+                    fields[field.name] = s
                 elif type_indicator == dcparser.as_person:
                     p = getattr(book_info, field.name)
                     if isinstance(p, dcparser.Person):
                         persons = unicode(p)
                     else:
                         persons = ', '.join(map(unicode, p))
-                    doc.add(Field(field.name, persons, Field.Store.NO, Field.Index.ANALYZED))
+                    fields[field.name] = persons
                 elif type_indicator == dcparser.as_date:
                     dt = getattr(book_info, field.name)
-                    doc.add(Field(field.name, "%04d%02d%02d" % (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED))
-        return doc
+                    fields[field.name] = dt
+
+        # get published date
+        pd = None
+        if hasattr(book_info, 'source_name') and book_info.source_name:
+            match = self.published_date_re.search(book_info.source_name)
+            if match is not None:
+                pd = str(match.groups()[0])
+        if not pd: pd = ""
+        fields["published_date"] = pd
+
+        return fields
+
+    # def add_gaps(self, fields, fieldname):
+    #     """
+    #     Interposes a list of fields with gap-fields, which are indexed spaces and returns it.
+    #     This allows for doing phrase queries which do not overlap the gaps (when slop is 0).
+    #     """
+    #     def gap():
+    #         while True:
+    #             yield Field(fieldname, ' ', Field.Store.NO, Field.Index.NOT_ANALYZED)
+    #     return reduce(lambda a, b: a + b, zip(fields, gap()))[0:-1]
 
     def get_master(self, root):
+        """
+        Returns the first master tag from an etree.
+        """
         for master in root.iter():
             if master.tag in self.master_tags:
                 return master
 
-    def extract_content(self, book):
-        wld = WLDocument.from_file(book.xml_file.path)
+    def index_content(self, book, book_fields={}):
+        """
+        Walks the book XML and extract content from it.
+        Adds parts for each header tag and for each fragment.
+        """
+        wld = WLDocument.from_file(book.xml_file.path, parse_dublincore=False)
         root = wld.edoc.getroot()
 
         master = self.get_master(root)
         if master is None:
             return []
 
-        def walker(node):
-            yield node, None
-            for child in list(node):
-                for b, e in walker(child):
-                    yield b, e
-            yield None, node
+        def walker(node, ignore_tags=[]):
+
+            if node.tag not in ignore_tags:
+                yield node, None, None
+                if node.text is not None:
+                    yield None, node.text, None
+                for child in list(node):
+                    for b, t, e in walker(child):
+                        yield b, t, e
+                yield None, None, node
+
+            if node.tail is not None:
+                yield None, node.tail, None
             return
 
         def fix_format(text):
-            return re.sub("/$", "", text, flags=re.M)
-
-        # header_type
-        # header_index
-        header_docs = []
-        # Then we create a document for each fragments
-        # fragment_anchor - the anchor
-        # themes - list of themes [not indexed]
-        fragment_docs = []
-        # will contain (framgent id -> { content: [], themes: [] }
+            #            separator = [u" ", u"\t", u".", u";", u","]
+            if isinstance(text, list):
+                # need to join it first
+                text = filter(lambda s: s is not None, content)
+                text = u' '.join(text)
+                # for i in range(len(text)):
+                #     if i > 0:
+                #         if text[i][0] not in separator\
+                #             and text[i - 1][-1] not in separator:
+                #          text.insert(i, u" ")
+
+            return re.sub("(?m)/$", "", text)
+
+        def add_part(snippets, **fields):
+            doc = self.create_book_doc(book)
+            for n, v in book_fields.items():
+                doc[n] = v
+
+            doc['header_index'] = fields["header_index"]
+            doc['header_span'] = 'header_span' in fields and fields['header_span'] or 1
+            doc['header_type'] = fields['header_type']
+
+            doc['text'] = fields['text']
+
+            # snippets
+            snip_pos = snippets.add(fields["text"])
+
+            doc['snippets_position'] = snip_pos[0]
+            doc['snippets_length'] = snip_pos[1]
+            if snippets.revision:
+                doc["snippets_revision"] = snippets.revision
+
+            if 'fragment_anchor' in fields:
+                doc["fragment_anchor"] = fields['fragment_anchor']
+
+            if 'themes' in fields:
+                doc['themes'] = fields['themes']
+            doc['uid'] = "part%s%s%s" % (doc['header_index'],
+                                         doc['header_span'],
+                                         doc.get('fragment_anchor', ''))
+            return doc
+
+        def give_me_utf8(s):
+            if isinstance(s, unicode):
+                return s.encode('utf-8')
+            else:
+                return s
+
         fragments = {}
         snippets = Snippets(book.id).open('w')
         try:
             for header, position in zip(list(master), range(len(master))):
-                sys.stdout.write("\rsection: %d" % position)
 
                 if header.tag in self.skip_header_tags:
                     continue
+                if header.tag is etree.Comment:
+                    continue
 
-                doc = self.create_book_doc(book)
-
-                doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position))
-                doc.add(Field("header_type", header.tag, Field.Store.YES, Field.Index.NOT_ANALYZED))
-
-                content = u' '.join([t for t in header.itertext()])
-                content = fix_format(content)
-
-                doc.add(Field("content", content, Field.Store.NO, Field.Index.ANALYZED))
-                snip_pos = snippets.add(content)
-                doc.add(NumericField("snippets_position", Field.Store.YES, True).setIntValue(snip_pos[0]))
-                doc.add(NumericField("snippets_length", Field.Store.YES, True).setIntValue(snip_pos[0]))
-
-                header_docs.append(doc)
-
-                for start, end in walker(master):
+                # section content
+                content = []
+                footnote = []
+
+                def all_content(text):
+                    for frag in fragments.values():
+                        frag['text'].append(text)
+                    content.append(text)
+                handle_text = [all_content]
+
+                for start, text, end in walker(header, ignore_tags=self.ignore_content_tags):
+                    # handle footnotes
+                    if start is not None and start.tag in self.footnote_tags:
+                        footnote = []
+
+                        def collect_footnote(t):
+                            footnote.append(t)
+
+                        handle_text.append(collect_footnote)
+                    elif end is not None and footnote is not [] and end.tag in self.footnote_tags:
+                        handle_text.pop()
+                        doc = add_part(snippets, header_index=position, header_type=header.tag,
+                                       text=u''.join(footnote),
+                                       is_footnote=True)
+                        self.index.add(doc)
+                        #print "@ footnote text: %s" % footnote
+                        footnote = []
+
+                    # handle fragments and themes.
                     if start is not None and start.tag == 'begin':
                         fid = start.attrib['id'][1:]
-                        fragments[fid] = {'content': [], 'themes': [], 'start_section': position, 'start_header': header.tag}
-                        fragments[fid]['content'].append(start.tail)
+                        fragments[fid] = {'text': [], 'themes': [], 'start_section': position, 'start_header': header.tag}
+
+                    # themes for this fragment
                     elif start is not None and start.tag == 'motyw':
                         fid = start.attrib['id'][1:]
-                        fragments[fid]['themes'].append(start.text)
-                        fragments[fid]['content'].append(start.tail)
+                        handle_text.append(None)
+                        if start.text is not None:
+                            fragments[fid]['themes'] += map(unicode.strip, map(unicode, (start.text.split(','))))
+                    elif end is not None and end.tag == 'motyw':
+                        handle_text.pop()
+
                     elif start is not None and start.tag == 'end':
                         fid = start.attrib['id'][1:]
                         if fid not in fragments:
                             continue  # a broken <end> node, skip it
                         frag = fragments[fid]
+                        if frag['themes'] == []:
+                            continue  # empty themes list.
                         del fragments[fid]
 
-                        def jstr(l):
-                            return u' '.join(map(
-                                lambda x: x == None and u'(none)' or unicode(x),
-                                l))
-
-                        doc = self.create_book_doc(book)
-
-                        doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position))
-                        doc.add(NumericField("header_span", Field.Store.YES, True).setIntValue(position - frag['start_section'] + 1))
-                        doc.add(Field("header_type", frag['start_header'], Field.Store.YES, Field.Index.NOT_ANALYZED))
-
-                        doc.add(Field("fragment_anchor", fid,
-                                      Field.Store.YES, Field.Index.NOT_ANALYZED))
-                        doc.add(Field("content",
-                                      u' '.join(filter(lambda s: s is not None, frag['content'])),
-                                      Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS))
-
-                        snip_pos = snippets.add(content)
-                        doc.add(NumericField("snippets_position", Field.Store.YES, True).setIntValue(snip_pos[0]))
-                        doc.add(NumericField("snippets_length", Field.Store.YES, True).setIntValue(snip_pos[0]))
-
-                        doc.add(Field("themes",
-                                      u' '.join(filter(lambda s: s is not None, frag['themes'])),
-                                      Field.Store.NO, Field.Index.ANALYZED))
-
-                        fragment_docs.append(doc)
-                    elif start is not None:
-                        for frag in fragments.values():
-                            frag['content'].append(start.text)
-                    elif end is not None:
-                        for frag in fragments.values():
-                            frag['content'].append(end.tail)
-        finally:
-            snippets.close()
-
-        return header_docs + fragment_docs
+                        doc = add_part(snippets,
+                                       header_type=frag['start_header'],
+                                       header_index=frag['start_section'],
+                                       header_span=position - frag['start_section'] + 1,
+                                       fragment_anchor=fid,
+                                       text=fix_format(frag['text']),
+                                       themes=frag['themes'])
+                        #print '@ FRAG %s' % frag['content']
+                        self.index.add(doc)
 
-    def __enter__(self):
-        self.open()
-        return self
-
-    def __exit__(self, type, value, tb):
-        self.close()
+                        # Collect content.
 
+                    if text is not None and handle_text is not []:
+                        hdl = handle_text[-1]
+                        if hdl is not None:
+                            hdl(text)
 
-def log_exception_wrapper(f):
-    def _wrap(*a):
-        try:
-            f(*a)
-        except Exception, e:
-            print("Error in indexing thread: %s" % e)
-            traceback.print_exc()
-            raise e
-    return _wrap
+                        # in the end, add a section text.
+                doc = add_part(snippets, header_index=position,
+                               header_type=header.tag, text=fix_format(content))
+                #print '@ CONTENT: %s' % fix_format(content)
 
+                self.index.add(doc)
 
-class ReusableIndex(Index):
-    """
-    Works like index, but does not close/optimize Lucene index
-    until program exit (uses atexit hook).
-    This is usefull for importbooks command.
-
-    if you cannot rely on atexit, use ReusableIndex.close_reusable() yourself.
-    """
-    index = None
-    pool = None
-    pool_jobs = None
-
-    def open(self, analyzer=None, threads=4):
-        if ReusableIndex.index is not None:
-            self.index = ReusableIndex.index
-        else:
-            print("opening index")
-            ReusableIndex.pool = ThreadPool(threads, initializer=lambda: JVM.attachCurrentThread() )
-            ReusableIndex.pool_jobs = []
-            Index.open(self, analyzer)
-            ReusableIndex.index = self.index
-            atexit.register(ReusableIndex.close_reusable)
-
-    def index_book(self, *args, **kw):
-        job = ReusableIndex.pool.apply_async(log_exception_wrapper(Index.index_book), (self,) + args, kw)
-        ReusableIndex.pool_jobs.append(job)
-
-    @staticmethod
-    def close_reusable():
-        if ReusableIndex.index is not None:
-            print("wait for indexing to finish")
-            for job in ReusableIndex.pool_jobs:
-                job.get()
-                sys.stdout.write('.')
-                sys.stdout.flush()
-            print("done.")
-            ReusableIndex.pool.close()
-
-            ReusableIndex.index.optimize()
-            ReusableIndex.index.close()
-            ReusableIndex.index = None
-
-    def close(self):
-        pass
-
-
-class Search(IndexStore):
-    def __init__(self, default_field="content"):
-        IndexStore.__init__(self)
-        self.analyzer = WLAnalyzer() #PolishAnalyzer(Version.LUCENE_34)
-        ## self.analyzer = WLAnalyzer()
-        self.searcher = IndexSearcher(self.store, True)
-        self.parser = QueryParser(Version.LUCENE_34, default_field,
-                                  self.analyzer)
-
-        self.parent_filter = TermsFilter()
-        self.parent_filter.addTerm(Term("is_book", "true"))
-
-    def query(self, query):
-        return self.parser.parse(query)
-
-    def wrapjoins(self, query, fields=[]):
-        """
-        This functions modifies the query in a recursive way,
-        so Term and Phrase Queries contained, which match
-        provided fields are wrapped in a BlockJoinQuery,
-        and so delegated to children documents.
-        """
-        if BooleanQuery.instance_(query):
-            qs = BooleanQuery.cast_(query)
-            for clause in qs:
-                clause = BooleanClause.cast_(clause)
-                clause.setQuery(self.wrapjoins(clause.getQuery(), fields))
-            return qs
-        else:
-            termset = HashSet()
-            query.extractTerms(termset)
-            for t in termset:
-                t = Term.cast_(t)
-                if t.field() not in fields:
-                    return query
-            return BlockJoinQuery(query, self.parent_filter,
-                                  BlockJoinQuery.ScoreMode.Total)
-
-    def simple_search(self, query, max_results=50):
-        """Returns (books, total_hits)
-        """
-
-        tops = self.searcher.search(self.query(query), max_results)
-        bks = []
-        for found in tops.scoreDocs:
-            doc = self.searcher.doc(found.doc)
-            bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
-        return (bks, tops.totalHits)
-
-    def search(self, query, max_results=50):
-        query = self.query(query)
-        query = self.wrapjoins(query, ["content", "themes"])
-
-        tops = self.searcher.search(query, max_results)
-        bks = []
-        for found in tops.scoreDocs:
-            doc = self.searcher.doc(found.doc)
-            bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
-        return (bks, tops.totalHits)
-
-    def bsearch(self, query, max_results=50):
-        q = self.query(query)
-        bjq = BlockJoinQuery(q, self.parent_filter, BlockJoinQuery.ScoreMode.Avg)
-
-        tops = self.searcher.search(bjq, max_results)
-        bks = []
-        for found in tops.scoreDocs:
-            doc = self.searcher.doc(found.doc)
-            bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
-        return (bks, tops.totalHits)
-
-# TokenStream tokenStream = analyzer.tokenStream(fieldName, reader);
-# OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class);
-# CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
-
-# while (tokenStream.incrementToken()) {
-#     int startOffset = offsetAttribute.startOffset();
-#     int endOffset = offsetAttribute.endOffset();
-#     String term = charTermAttribute.toString();
-# }
+        finally:
+            snippets.close()
 
 
 class SearchResult(object):
-    def __init__(self, searcher, scoreDocs, score=None, how_found=None, snippets_cb=None):
-        if score:
-            self.score = score
+    def __init__(self, doc, how_found=None, query=None, query_terms=None):
+        #        self.search = search
+        self.boost = 1.0
+        self._hits = []
+        self._processed_hits = None  # processed hits
+        self.snippets = []
+        self.query_terms = query_terms
+
+        if 'score' in doc:
+            self._score = doc['score']
         else:
-            self.score = scoreDocs.score
-
-        self.hits = []
+            self._score = 0
 
-        stored = searcher.doc(scoreDocs.doc)
-        self.book_id = int(stored.get("book_id"))
+        self.book_id = int(doc["book_id"])
 
-        header_type = stored.get("header_type")
-        sec = (header_type, int(stored.get("header_index")))
-        header_span = stored.get('header_span')
-        header_span = header_span is not None and int(header_span) or 1
-        stored = searcher.doc(scoreDocs.doc)
-        self.book_id = int(stored.get("book_id"))
+        try:
+            self.published_date = int(doc.get("published_date"))
+        except ValueError:
+            self.published_date = 0
+
+        # content hits
+        header_type = doc.get("header_type", None)
+        # we have a content hit in some header of fragment
+        if header_type is not None:
+            sec = (header_type, int(doc["header_index"]))
+            header_span = doc['header_span']
+            header_span = header_span is not None and int(header_span) or 1
+            fragment = doc.get("fragment_anchor", None)
+            snippets_pos = (doc['snippets_position'], doc['snippets_length'])
+            snippets_rev = doc['snippets_revision']
+
+            hit = (sec + (header_span,), fragment, self._score, {
+                'how_found': how_found,
+                'snippets_pos': snippets_pos,
+                'snippets_revision': snippets_rev,
+                'themes': doc.get('themes', []),
+                'themes_pl': doc.get('themes_pl', [])
+                })
+
+            self._hits.append(hit)
 
-        fragment = stored.get("fragment_anchor")
+    def __unicode__(self):
+        return u"<SR id=%d %d(%d) hits score=%f %d snippets" % \
+            (self.book_id, len(self._hits), self._processed_hits and len(self._processed_hits) or -1, self._score, len(self.snippets))
 
-        hit = (sec + (header_span,), fragment, scoreDocs.score, {'how_found': how_found, 'snippets_cb': snippets_cb})
+    def __str__(self):
+        return unicode(self).encode('utf-8')
 
-        self.hits.append(hit)
+    @property
+    def score(self):
+        return self._score * self.boost
 
     def merge(self, other):
         if self.book_id != other.book_id:
             raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id))
-        self.hits += other.hits
+        self._hits += other._hits
         if other.score > self.score:
-            self.score = other.score
-        return self
-
-    def add_snippets(self, snippets):
-        self.snippets += snippets
+            self._score = other._score
         return self
 
     def get_book(self):
-        return catalogue.models.Book.objects.get(id=self.book_id)
+        if hasattr(self, '_book'):
+            return self._book
+        self._book = catalogue.models.Book.objects.get(id=self.book_id)
+        return self._book
 
     book = property(get_book)
 
-    def get_parts(self):
-        book = self.book
+    POSITION = 0
+    FRAGMENT = 1
+    POSITION_INDEX = 1
+    POSITION_SPAN = 2
+    SCORE = 2
+    OTHER = 3
+
+    @property
+    def hits(self):
+        if self._processed_hits is not None:
+            return self._processed_hits
+
+        # to sections and fragments
+        frags = filter(lambda r: r[self.FRAGMENT] is not None, self._hits)
+
+        sect = filter(lambda r: r[self.FRAGMENT] is None, self._hits)
+
+        # sections not covered by fragments
+        sect = filter(lambda s: 0 == len(filter(
+            lambda f: s[self.POSITION][self.POSITION_INDEX] >= f[self.POSITION][self.POSITION_INDEX]
+            and s[self.POSITION][self.POSITION_INDEX] < f[self.POSITION][self.POSITION_INDEX] + f[self.POSITION][self.POSITION_SPAN],
+            frags)), sect)
+
+        hits = []
+
+        def remove_duplicates(lst, keyfn, compare):
+            els = {}
+            for e in lst:
+                eif = keyfn(e)
+                if eif in els:
+                    if compare(els[eif], e) >= 1:
+                        continue
+                els[eif] = e
+            return els.values()
+
+        # remove fragments with duplicated fid's and duplicated snippets
+        frags = remove_duplicates(frags, lambda f: f[self.FRAGMENT], lambda a, b: cmp(a[self.SCORE], b[self.SCORE]))
+        # frags = remove_duplicates(frags, lambda f: f[OTHER]['snippet_pos'] and f[OTHER]['snippet_pos'] or f[FRAGMENT],
+        #                           lambda a, b: cmp(a[SCORE], b[SCORE]))
+
+        # remove duplicate sections
+        sections = {}
+
+        for s in sect:
+            si = s[self.POSITION][self.POSITION_INDEX]
+            # skip existing
+            if si in sections:
+                if sections[si]['score'] >= s[self.SCORE]:
+                    continue
 
-        def sections_covered(results):
-            frags = filter(lambda r: r[1] is not None, results)
-            sect = filter(lambda r: r[1] is None, results)
-            sect = filter(lambda s: 0 == len(filter(
-                lambda f: s[0][1] >= f[0][1] and s[0][1] < f[0][1] + f[0][2],
-                frags)), sect)
-            print "filtered, non overlapped sections: %s" % sect
-            return frags + sect
-            
-            
-        parts = [{"header": s[0], "position": s[1], '_score_key': s} for s in self.sections] \
-            + [{"fragment": book.fragments.get(anchor=f), '_score_key':f} for f in self.fragments]
+            m = {'score': s[self.SCORE],
+                 'section_number': s[self.POSITION][self.POSITION_INDEX] + 1,
+                 }
+            m.update(s[self.OTHER])
+            sections[si] = m
+
+        hits = sections.values()
+
+        for f in frags:
+            try:
+                frag = catalogue.models.Fragment.objects.get(anchor=f[self.FRAGMENT], book__id=self.book_id)
+            except catalogue.models.Fragment.DoesNotExist:
+                # stale index
+                continue
+            # Figure out if we were searching for a token matching some word in theme name.
+            themes = frag.tags.filter(category='theme')
+            themes_hit = set()
+            if self.query_terms is not None:
+                for i in range(0, len(f[self.OTHER]['themes'])):
+                    tms = f[self.OTHER]['themes'][i].split(r' +') + f[self.OTHER]['themes_pl'][i].split(' ')
+                    tms = map(unicode.lower, tms)
+                    for qt in self.query_terms:
+                        if qt in tms:
+                            themes_hit.add(f[self.OTHER]['themes'][i])
+                            break
+
+            def theme_by_name(n):
+                th = filter(lambda t: t.name == n, themes)
+                if th:
+                    return th[0]
+                else:
+                    return None
+            themes_hit = filter(lambda a: a is not None, map(theme_by_name, themes_hit))
 
-        parts.sort(lambda a, b: cmp(self.scores[a['_score_key']], self.scores[b['_score_key']]))
-        print("bookid: %d parts: %s" % (self.book_id, parts))
-        return parts
+            m = {'score': f[self.SCORE],
+                 'fragment': frag,
+                 'section_number': f[self.POSITION][self.POSITION_INDEX] + 1,
+                 'themes': themes,
+                 'themes_hit': themes_hit
+                 }
+            m.update(f[self.OTHER])
+            hits.append(m)
 
-    parts = property(get_parts)
+        hits.sort(lambda a, b: cmp(a['score'], b['score']), reverse=True)
 
+        self._processed_hits = hits
 
-    def __unicode__(self):
-        return u'SearchResult(book_id=%d, score=%d)' % (self.book_id, self.score)
+        return hits
 
     @staticmethod
     def aggregate(*result_lists):
@@ -549,218 +690,414 @@ class SearchResult(object):
             for r in rl:
                 if r.book_id in books:
                     books[r.book_id].merge(r)
-                    #print(u"already have one with score %f, and this one has score %f" % (books[book.id][0], found.score))
                 else:
                     books[r.book_id] = r
         return books.values()
 
     def __cmp__(self, other):
-        return cmp(self.score, other.score)
-
-
-class MultiSearch(Search):
-    """Class capable of IMDb-like searching"""
-    def get_tokens(self, searched, field='content'):
-        """returns tokens analyzed by a proper (for a field) analyzer
-        argument can be: StringReader, string/unicode, or tokens. In the last case
-        they will just be returned (so we can reuse tokens, if we don't change the analyzer)
-        """
-        if isinstance(searched, str) or isinstance(searched, unicode):
-            searched = StringReader(searched)
-        elif isinstance(searched, list):
-            return searched
-
-        searched.reset()
-        tokens = self.analyzer.reusableTokenStream(field, searched)
-        toks = []
-        while tokens.incrementToken():
-            cta = tokens.getAttribute(CharTermAttribute.class_)
-            toks.append(cta.toString())
-        return toks
-
-    def fuzziness(self, fuzzy):
-        if not fuzzy:
-            return None
-        if isinstance(fuzzy, float) and fuzzy > 0.0 and fuzzy <= 1.0:
-            return fuzzy
-        else:
-            return 0.5
-
-    def make_phrase(self, tokens, field='content', slop=2, fuzzy=False):
-        if fuzzy:
-            phrase = MultiPhraseQuery()
-            for t in tokens:
-                term = Term(field, t)
-                fuzzterm = FuzzyTermEnum(self.searcher.getIndexReader(), term, self.fuzziness(fuzzy))
-                fuzzterms = []
-
-                while True:
-                    #                    print("fuzz %s" % unicode(fuzzterm.term()).encode('utf-8'))
-                    ft = fuzzterm.term()
-                    if ft:
-                        fuzzterms.append(ft)
-                    if not fuzzterm.next(): break
-                if fuzzterms:
-                    phrase.add(JArray('object')(fuzzterms, Term))
-                else:
-                    phrase.add(term)
+        c = cmp(self.score, other.score)
+        if c == 0:
+            # this is inverted, because earlier date is better
+            return cmp(other.published_date, self.published_date)
         else:
-            phrase = PhraseQuery()
-            phrase.setSlop(slop)
-            for t in tokens:
-                term = Term(field, t)
-                phrase.add(term)
-        return phrase
-
-    def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD, fuzzy=False):
-        q = BooleanQuery()
-        for t in tokens:
-            term = Term(field, t)
-            if fuzzy:
-                term = FuzzyQuery(term, self.fuzziness(fuzzy))
-            else:
-                term = TermQuery(term)
-            q.add(BooleanClause(term, modal))
-        return q
+            return c
 
-    def content_query(self, query):
-        return BlockJoinQuery(query, self.parent_filter,
-                              BlockJoinQuery.ScoreMode.Total)
+    def __len__(self):
+        return len(self.hits)
 
-    def search_perfect_book(self, searched, max_results=20, fuzzy=False):
-        qrys = [self.make_phrase(self.get_tokens(searched, field=fld), field=fld, fuzzy=fuzzy) for fld in ['author', 'title']]
+    def snippet_pos(self, idx=0):
+        return self.hits[idx]['snippets_pos']
 
-        books = []
-        for q in qrys:
-            top = self.searcher.search(q, max_results)
-            for found in top.scoreDocs:
-                books.append(SearchResult(self.searcher, found))
-        return books
+    def snippet_revision(self, idx=0):
+        try:
+            return self.hits[idx]['snippets_revision']
+        except:
+            return None
 
-    def search_perfect_parts(self, searched, max_results=20, fuzzy=False):
-        qrys = [self.make_phrase(self.get_tokens(searched), field=fld, fuzzy=fuzzy) for fld in ['content']]
 
-        books = []
-        for q in qrys:
-            top = self.searcher.search(q, max_results)
-            for found in top.scoreDocs:
-                books.append(SearchResult(self.searcher, found).add_snippets(self.get_snippets(found, q)))
+class Search(SolrIndex):
+    """
+    Search facilities.
+    """
+    def __init__(self, default_field="text"):
+        super(Search, self).__init__(mode='r')
+
+    # def get_tokens(self, searched, field='text', cached=None):
+    #     """returns tokens analyzed by a proper (for a field) analyzer
+    #     argument can be: StringReader, string/unicode, or tokens. In the last case
+    #     they will just be returned (so we can reuse tokens, if we don't change the analyzer)
+    #     """
+    #     if cached is not None and field in cached:
+    #         return cached[field]
+
+    #     if isinstance(searched, str) or isinstance(searched, unicode):
+    #         searched = StringReader(searched)
+    #     elif isinstance(searched, list):
+    #         return searched
+
+    #     searched.reset()
+    #     tokens = self.analyzer.reusableTokenStream(field, searched)
+    #     toks = []
+    #     while tokens.incrementToken():
+    #         cta = tokens.getAttribute(CharTermAttribute.class_)
+    #         toks.append(cta.toString())
+
+    #     if cached is not None:
+    #         cached[field] = toks
+
+    #     return toks
+
+    # @staticmethod
+    # def fuzziness(fuzzy):
+    #     """Helper method to sanitize fuzziness"""
+    #     if not fuzzy:
+    #         return None
+    #     if isinstance(fuzzy, float) and fuzzy > 0.0 and fuzzy <= 1.0:
+    #         return fuzzy
+    #     else:
+    #         return 0.5
+
+    # def make_phrase(self, tokens, field='text', slop=2, fuzzy=False):
+    #     """
+    #     Return a PhraseQuery with a series of tokens.
+    #     """
+    #     if fuzzy:
+    #         phrase = MultiPhraseQuery()
+    #         for t in tokens:
+    #             term = Term(field, t)
+    #             fuzzterm = FuzzyTermEnum(self.searcher.getIndexReader(), term, self.fuzziness(fuzzy))
+    #             fuzzterms = []
+
+    #             while True:
+    #                 ft = fuzzterm.term()
+    #                 if ft:
+    #                     fuzzterms.append(ft)
+    #                 if not fuzzterm.next(): break
+    #             if fuzzterms:
+    #                 phrase.add(JArray('object')(fuzzterms, Term))
+    #             else:
+    #                 phrase.add(term)
+    #     else:
+    #         phrase = PhraseQuery()
+    #         phrase.setSlop(slop)
+    #         for t in tokens:
+    #             term = Term(field, t)
+    #             phrase.add(term)
+    #     return phrase
+
+    def make_term_query(self, query, field='text', modal=operator.or_):
+        """
+        Returns term queries joined by boolean query.
+        modal - applies to boolean query
+        fuzzy - should the query by fuzzy.
+        """
+        q = self.index.Q()
+        q = reduce(modal, map(lambda s: self.index.Q(**{field: s}),
+                        query.split(r" ")), q)
 
-        return books
+        return q
 
-    def search_everywhere(self, searched, max_results=20, fuzzy=False):
+    def search_phrase(self, searched, field='text', book=False,
+                      filters=None,
+                      snippets=False):
+        if filters is None: filters = []
+        if book: filters.append(self.index.Q(is_book=True))
+
+        q = self.index.query(**{field: searched})
+        q = self.apply_filters(q, filters).field_limit(score=True, all_fields=True)
+        res = q.execute()
+        return [SearchResult(found, how_found=u'search_phrase') for found in res]
+
+    def search_some(self, searched, fields, book=True,
+                    filters=None, snippets=True, query_terms=None):
+        assert isinstance(fields, list)
+        if filters is None: filters = []
+        if book: filters.append(self.index.Q(is_book=True))
+
+        query = self.index.Q()
+
+        for fld in fields:
+            query = self.index.Q(query | self.make_term_query(searched, fld))
+
+        query = self.index.query(query)
+        query = self.apply_filters(query, filters).field_limit(score=True, all_fields=True)
+        res = query.execute()
+        return [SearchResult(found, how_found='search_some', query_terms=query_terms) for found in res]
+
+    # def search_perfect_book(self, searched, max_results=20, fuzzy=False, hint=None):
+    #     """
+    #     Search for perfect book matches. Just see if the query matches with some author or title,
+    #     taking hints into account.
+    #     """
+    #     fields_to_search = ['authors', 'title']
+    #     only_in = None
+    #     if hint:
+    #         if not hint.should_search_for_book():
+    #             return []
+    #         fields_to_search = hint.just_search_in(fields_to_search)
+    #         only_in = hint.book_filter()
+
+    #     qrys = [self.make_phrase(self.get_tokens(searched, field=fld), field=fld, fuzzy=fuzzy) for fld in fields_to_search]
+
+    #     books = []
+    #     for q in qrys:
+    #         top = self.searcher.search(q,
+    #             self.chain_filters([only_in, self.term_filter(Term('is_book', 'true'))]),
+    #             max_results)
+    #         for found in top.scoreDocs:
+    #             books.append(SearchResult(self, found, how_found="search_perfect_book"))
+    #     return books
+
+    # def search_book(self, searched, max_results=20, fuzzy=False, hint=None):
+    #     fields_to_search = ['tags', 'authors', 'title']
+
+    #     only_in = None
+    #     if hint:
+    #         if not hint.should_search_for_book():
+    #             return []
+    #         fields_to_search = hint.just_search_in(fields_to_search)
+    #         only_in = hint.book_filter()
+
+    #     tokens = self.get_tokens(searched, field='SIMPLE')
+
+    #     q = BooleanQuery()
+
+    #     for fld in fields_to_search:
+    #         q.add(BooleanClause(self.make_term_query(tokens, field=fld,
+    #                             fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
+
+    #     books = []
+    #     top = self.searcher.search(q,
+    #                                self.chain_filters([only_in, self.term_filter(Term('is_book', 'true'))]),
+    #         max_results)
+    #     for found in top.scoreDocs:
+    #         books.append(SearchResult(self, found, how_found="search_book"))
+
+    #     return books
+
+    # def search_perfect_parts(self, searched, max_results=20, fuzzy=False, hint=None):
+    #     """
+    #     Search for book parts which contains a phrase perfectly matching (with a slop of 2, default for make_phrase())
+    #     some part/fragment of the book.
+    #     """
+    #     qrys = [self.make_phrase(self.get_tokens(searched), field=fld, fuzzy=fuzzy) for fld in ['text']]
+
+    #     flt = None
+    #     if hint:
+    #         flt = hint.part_filter()
+
+    #     books = []
+    #     for q in qrys:
+    #         top = self.searcher.search(q,
+    #                                    self.chain_filters([self.term_filter(Term('is_book', 'true'), inverse=True),
+    #                                                        flt]),
+    #                                    max_results)
+    #         for found in top.scoreDocs:
+    #             books.append(SearchResult(self, found, snippets=self.get_snippets(found, q), how_found='search_perfect_parts'))
+
+    #     return books
+
+    def search_everywhere(self, searched, query_terms=None):
+        """
+        Tries to use search terms to match different fields of book (or its parts).
+        E.g. one word can be an author survey, another be a part of the title, and the rest
+        are some words from third chapter.
+        """
         books = []
-
         # content only query : themes x content
-        q = BooleanQuery()
+        q = self.make_term_query(searched, 'text')
+        q_themes = self.make_term_query(searched, 'themes_pl')
 
-        tokens = self.get_tokens(searched)
-        q.add(BooleanClause(self.make_term_query(tokens, field='themes', fuzzy=fuzzy), BooleanClause.Occur.MUST))
-        q.add(BooleanClause(self.make_term_query(tokens, field='content', fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
+        query = self.index.query(q).query(q_themes).field_limit(score=True, all_fields=True)
+        res = query.execute()
 
-        topDocs = self.searcher.search(q, max_results)
-        for found in topDocs.scoreDocs:
-            books.append(SearchResult(self.searcher, found))
+        for found in res:
+            books.append(SearchResult(found, how_found='search_everywhere_themesXcontent', query_terms=query_terms))
 
-        # joined query themes/content x author/title/epochs/genres/kinds
-        q = BooleanQuery()
-        in_meta = BooleanQuery()
-        in_content = BooleanQuery()
+        # query themes/content x author/title/tags
+        in_content = self.index.Q()
+        in_meta = self.index.Q()
 
-        for fld in ['themes', 'content']:
-            in_content.add(BooleanClause(self.make_term_query(tokens, field=fld, fuzzy=False), BooleanClause.Occur.SHOULD))
+        for fld in ['themes_pl', 'text']:
+            in_content |= self.make_term_query(searched, field=fld)
 
-        in_meta.add(BooleanClause(self.make_term_query(
-            self.get_tokens(searched, field='author'), field='author', fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
+        for fld in ['tags', 'authors', 'title']:
+            in_meta |= self.make_term_query(searched, field=fld)
 
-        for fld in ['title', 'epochs', 'genres', 'kinds']:
-            in_meta.add(BooleanClause(self.make_term_query(tokens, field=fld, fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
+        q = in_content & in_meta
+        res = self.index.query(q).field_limit(score=True, all_fields=True).execute()
 
-        q.add(BooleanClause(in_meta, BooleanClause.Occur.MUST))
-        in_content_join = self.content_query(in_content)
-        q.add(BooleanClause(in_content_join, BooleanClause.Occur.MUST))
-        #        import pdb; pdb.set_trace()
-        collector = BlockJoinCollector(Sort.RELEVANCE, 100, True, True)
+        for found in res:
+            books.append(SearchResult(found, how_found='search_everywhere', query_terms=query_terms))
 
-        self.searcher.search(q, collector)
-
-        top_groups = collector.getTopGroups(in_content_join, Sort.RELEVANCE, 0, max_results, 0, True)
-        if top_groups:
-            for grp in top_groups.groups:
-                for part in grp.scoreDocs:
-                    books.append(SearchResult(self.searcher, part, score=grp.maxScore))
         return books
 
-    def multisearch(self, query, max_results=50):
+    def get_snippets(self, searchresult, query, field='text', num=1):
         """
-        Search strategy:
-        - (phrase) OR -> content
-                      -> title
-                      -> author
-        - (keywords)  -> author
-                      -> motyw
-                      -> tags
-                      -> content
+        Returns a snippet for found scoreDoc.
         """
-        # queryreader = StringReader(query)
-        # tokens = self.get_tokens(queryreader)
+        maxnum = len(searchresult)
+        if num is None or num < 0 or num > maxnum:
+            num = maxnum
+        book_id = searchresult.book_id
+        revision = searchresult.snippet_revision()
+        snippets = Snippets(book_id, revision=revision)
+        snips = [None] * maxnum
+        try:
+            snippets.open()
+            idx = 0
+            while idx < maxnum and num > 0:
+                position, length = searchresult.snippet_pos(idx)
+                if position is None or length is None:
+                    continue
+                text = snippets.get((int(position),
+                                     int(length)))
+                snip = self.index.highlight(text=text, field=field, q=query)
+                snips[idx] = snip
+                if snip:
+                    num -= 1
+                idx += 1
+
+        except IOError, e:
+            log.error("Cannot open snippet file for book id = %d [rev=%d], %s" % (book_id, revision, e))
+            return []
+        finally:
+            snippets.close()
 
-        # top_level = BooleanQuery()
-        # Should = BooleanClause.Occur.SHOULD
+            # remove verse end markers..
+        snips = map(lambda s: s and s.replace("/\n", "\n"), snips)
 
-        # phrase_level = BooleanQuery()
-        # phrase_level.setBoost(1.3)
+        searchresult.snippets = snips
 
-        # p_content = self.make_phrase(tokens, joined=True)
-        # p_title = self.make_phrase(tokens, 'title')
-        # p_author = self.make_phrase(tokens, 'author')
+        return snips
 
-        # phrase_level.add(BooleanClause(p_content, Should))
-        # phrase_level.add(BooleanClause(p_title, Should))
-        # phrase_level.add(BooleanClause(p_author, Should))
+    def hint_tags(self, query, pdcounter=True, prefix=True):
+        """
+        Return auto-complete hints for tags
+        using prefix search.
+        """
+        q = self.index.Q()
+        query = query.strip()
+        for field in ['tag_name', 'tag_name_pl']:
+            if prefix:
+                q |= self.index.Q(**{field: query + "*"})
+            else:
+                q |= self.make_term_query(query, field=field)
+        qu = self.index.query(q).exclude(tag_category="book")
 
-        # kw_level = BooleanQuery()
+        return self.search_tags(qu, pdcounter=pdcounter)
 
-        # kw_level.add(self.make_term_query(tokens, 'author'), Should)
-        # j_themes = self.make_term_query(tokens, 'themes', joined=True)
-        # kw_level.add(j_themes, Should)
-        # kw_level.add(self.make_term_query(tokens, 'tags'), Should)
-        # j_con = self.make_term_query(tokens, joined=True)
-        # kw_level.add(j_con, Should)
+    def search_tags(self, query, filters=None, pdcounter=False):
+        """
+        Search for Tag objects using query.
+        """
+        if not filters: filters = []
+        if not pdcounter:
+            filters.append(~self.index.Q(is_pdcounter=True))
+        res = self.apply_filters(query, filters).execute()
+
+        tags = []
+        for doc in res:
+            is_pdcounter = doc.get('is_pdcounter', False)
+            category = doc.get('tag_category')
+            try:
+                if is_pdcounter == True:
+                    if category == 'pd_author':
+                        tag = PDCounterAuthor.objects.get(id=doc.get('tag_id'))
+                    elif category == 'pd_book':
+                        tag = PDCounterBook.objects.get(id=doc.get('tag_id'))
+                        tag.category = 'pd_book'  # make it look more lik a tag.
+                    else:
+                        print "Warning. cannot get pdcounter tag_id=%d from db; cat=%s" % (int(doc.get('tag_id')), category)
+                else:
+                    tag = catalogue.models.Tag.objects.get(id=doc.get("tag_id"))
+                    # don't add the pdcounter tag if same tag already exists
 
-        # top_level.add(BooleanClause(phrase_level, Should))
-        # top_level.add(BooleanClause(kw_level, Should))
+                tags.append(tag)
 
-        return None
+            except catalogue.models.Tag.DoesNotExist: pass
+            except PDCounterAuthor.DoesNotExist: pass
+            except PDCounterBook.DoesNotExist: pass
 
-    def do_search(self, query, max_results=50, collector=None):
-        tops = self.searcher.search(query, max_results)
-        #tops = self.searcher.search(p_content, max_results)
+        log.debug('search_tags: %s' % tags)
 
-        bks = []
-        for found in tops.scoreDocs:
-            doc = self.searcher.doc(found.doc)
-            b = catalogue.models.Book.objects.get(id=doc.get("book_id"))
-            bks.append(b)
-            print "%s (%d) -> %f" % (b, b.id, found.score)
-        return (bks, tops.totalHits)
+        return tags
 
-    def get_snippets(self, scoreDoc, query, field='content'):
-        htmlFormatter = SimpleHTMLFormatter()
-        highlighter = Highlighter(htmlFormatter, QueryScorer(query))
+    def hint_books(self, query, prefix=True):
+        """
+        Returns auto-complete hints for book titles
+        Because we do not index 'pseudo' title-tags.
+        Prefix search.
+        """
+        q = self.index.Q()
+        query = query.strip()
+        if prefix:
+            q |= self.index.Q(title=query + "*")
+        else:
+            q |= self.make_term_query(query, field='title')
+        qu = self.index.query(q)
+        only_books = self.index.Q(is_book=True)
+        return self.search_books(qu, [only_books])
 
-        stored = self.searcher.doc(scoreDoc.doc)
+    def search_books(self, query, filters=None, max_results=10):
+        """
+        Searches for Book objects using query
+        """
+        bks = []
+        res = self.apply_filters(query, filters).field_limit(['book_id'])
+        for r in res:
+            try:
+                bks.append(catalogue.models.Book.objects.get(id=r['book_id']))
+            except catalogue.models.Book.DoesNotExist: pass
+        return bks
+    # def make_prefix_phrase(self, toks, field):
+    #     q = MultiPhraseQuery()
+    #     for i in range(len(toks)):
+    #         t = Term(field, toks[i])
+    #         if i == len(toks) - 1:
+    #             pterms = Search.enum_to_array(PrefixTermEnum(self.searcher.getIndexReader(), t))
+    #             if pterms:
+    #                 q.add(pterms)
+    #             else:
+    #                 q.add(t)
+    #         else:
+    #             q.add(t)
+    #     return q
+
+    # @staticmethod
+    # def term_filter(term, inverse=False):
+    #     only_term = TermsFilter()
+    #     only_term.addTerm(term)
+
+    #     if inverse:
+    #         neg = BooleanFilter()
+    #         neg.add(FilterClause(only_term, BooleanClause.Occur.MUST_NOT))
+    #         only_term = neg
+
+    #     return only_term
 
-        # locate content.
-        snippets = Snippets(stored.get('book_id')).open()
-        try:
-            text = snippets.get(stored.get('snippets_position'), stored.get('snippets_length'))
-        finally:
-            snippets.close()
 
-        tokenStream = TokenSources.getAnyTokenStream(self.searcher.getIndexReader(), scoreDoc.doc, field, self.analyzer)
-        #  highlighter.getBestTextFragments(tokenStream, text, False, 10)
-        snip = highlighter.getBestFragments(tokenStream, text, 3, "...")
-        print('snips: %s' % snip)
 
-        return [snip]
+    @staticmethod
+    def apply_filters(query, filters):
+        """
+        Apply filters to a query
+        """
+        if filters is None: filters = []
+        filters = filter(lambda x: x is not None, filters)
+        for f in filters:
+            query = query.query(f)
+        return query
+
+    # def filtered_categories(self, tags):
+    #     """
+    #     Return a list of tag categories, present in tags list.
+    #     """
+    #     cats = {}
+    #     for t in tags:
+    #         cats[t.category] = True
+    #     return cats.keys()
+
+    # def hint(self):
+    #     return Hint(self)