multisearch fails to see joined queries -> will try to use one toplevel
[wolnelektury.git] / apps / search / index.py
index d5d696d..53812cb 100644 (file)
@@ -6,13 +6,19 @@ from lucene import SimpleFSDirectory, IndexWriter, File, Field, \
     SimpleAnalyzer, PolishAnalyzer, ArrayList, \
     KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \
     BlockJoinQuery, BlockJoinCollector, TermsFilter, \
-    HashSet, BooleanClause, Term
+    HashSet, BooleanClause, Term, CharTermAttribute, \
+    PhraseQuery, StringReader, TermQuery, BlockJoinQuery, \
+    Sort
     # KeywordAnalyzer
+import sys
 import os
 import errno
 from librarian import dcparser
 from librarian.parser import WLDocument
-from catalogue.models import Book
+import catalogue.models
+from multiprocessing.pool import ThreadPool
+from threading import current_thread
+import atexit
 
 
 class WLAnalyzer(PerFieldAnalyzerWrapper):
@@ -79,6 +85,7 @@ class Index(IndexStore):
         if overwrite:
             self.remove_book(book)
 
+
         doc = self.extract_metadata(book)
         parts = self.extract_content(book)
         block = ArrayList().of_(Document)
@@ -97,6 +104,8 @@ class Index(IndexStore):
         'wywiad'
         ]
 
+    skip_header_tags = ['autor_utworu', 'nazwa_utworu']
+
     def create_book_doc(self, book):
         """
         Create a lucene document connected to the book
@@ -110,6 +119,8 @@ class Index(IndexStore):
     def extract_metadata(self, book):
         book_info = dcparser.parse(book.xml_file)
 
+        print("extract metadata for book %s id=%d, thread%d" % (book.slug, book.id, current_thread().ident))
+        
         doc = self.create_book_doc(book)
         doc.add(Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS))
         doc.add(Field("tags", ','.join([t.name for t in book.tags]), Field.Store.NO, Field.Index.ANALYZED))
@@ -147,6 +158,7 @@ class Index(IndexStore):
             if master.tag in self.master_tags:
                 return master
 
+    
     def extract_content(self, book):
         wld = WLDocument.from_file(book.xml_file.path)
         root = wld.edoc.getroot()
@@ -156,9 +168,13 @@ class Index(IndexStore):
         # header_index - the 0-indexed position of header element.
         # content
         master = self.get_master(root)
+        if master is None:
+            return []
+        
         header_docs = []
         for header, position in zip(list(master), range(len(master))):
-            print("header %s @%d" % (header, position))
+            if header.tag in self.skip_header_tags:
+                continue
             doc = self.create_book_doc(book)
             doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position))
             doc.add(Field("header_type", header.tag, Field.Store.YES, Field.Index.NOT_ANALYZED))
@@ -181,7 +197,6 @@ class Index(IndexStore):
         # will contain (framgent id -> { content: [], themes: [] }
         fragments = {}
         for start, end in walker(master):
-            print("%s %s" % (start, end))
             if start is not None and start.tag == 'begin':
                 fid = start.attrib['id'][1:]
                 fragments[fid] = {'content': [], 'themes': []}
@@ -192,14 +207,26 @@ class Index(IndexStore):
                 fragments[fid]['content'].append(start.tail)
             elif start is not None and start.tag == 'end':
                 fid = start.attrib['id'][1:]
+                if fid not in fragments:
+                    continue  # a broken <end> node, skip it
                 frag = fragments[fid]
                 del fragments[fid]
-                print("Fragment %s complete, themes: %s contents: %s" % (fid, frag['themes'], frag['content']))
+
+                def jstr(l):
+                    return u' '.join(map(
+                        lambda x: x == None and u'(none)' or unicode(x),
+                        l))
 
                 doc = self.create_book_doc(book)
-                doc.add(Field("fragment_anchor", fid, Field.Store.YES, Field.Index.NOT_ANALYZED))
-                doc.add(Field("content", u' '.join(filter(lambda s: s is not None, frag['content'])), Field.Store.NO, Field.Index.ANALYZED))
-                doc.add(Field("themes", u' '.join(frag['themes']), Field.Store.NO, Field.Index.ANALYZED))
+                doc.add(Field("fragment_anchor", fid,
+                              Field.Store.YES, Field.Index.NOT_ANALYZED))
+                doc.add(Field("content",
+                              u' '.join(filter(lambda s: s is not None, frag['content'])),
+                              Field.Store.NO, Field.Index.ANALYZED))
+                doc.add(Field("themes",
+                              u' '.join(filter(lambda s: s is not None, frag['themes'])),
+                              Field.Store.NO, Field.Index.ANALYZED))
+
                 fragment_docs.append(doc)
             elif start is not None:
                 for frag in fragments.values():
@@ -218,6 +245,49 @@ class Index(IndexStore):
         self.close()
 
 
+class ReusableIndex(Index):
+    """
+    Works like index, but does not close/optimize Lucene index
+    until program exit (uses atexit hook).
+    This is usefull for importbooks command.
+
+    if you cannot rely on atexit, use ReusableIndex.close_reusable() yourself.
+    """
+    index = None
+    pool = None
+    pool_jobs = None
+
+    def open(self, analyzer=None, threads=4):
+        if ReusableIndex.index is not None:
+            self.index = ReusableIndex.index
+        else:
+            print("opening index")
+            ReusableIndex.pool = ThreadPool(threads)
+            ReusableIndex.pool_jobs = []
+            Index.open(self, analyzer)
+            ReusableIndex.index = self.index
+            atexit.register(ReusableIndex.close_reusable)
+
+    def index_book(self, *args, **kw):
+        job = ReusableIndex.pool.apply_async(Index.index_book, (self,)+ args, kw)
+        ReusableIndex.pool_jobs.append(job)
+
+    @staticmethod
+    def close_reusable():
+        if ReusableIndex.index is not None:
+            print("closing index")
+            for job in ReusableIndex.pool_jobs:
+                job.wait()
+            ReusableIndex.pool.close()
+
+            ReusableIndex.index.optimize()
+            ReusableIndex.index.close()
+            ReusableIndex.index = None
+
+    def close(self):
+        pass
+
+
 class Search(IndexStore):
     def __init__(self, default_field="content"):
         IndexStore.__init__(self)
@@ -264,7 +334,7 @@ class Search(IndexStore):
         bks = []
         for found in tops.scoreDocs:
             doc = self.searcher.doc(found.doc)
-            bks.append(Book.objects.get(id=doc.get("book_id")))
+            bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
         return (bks, tops.totalHits)
 
     def search(self, query, max_results=50):
@@ -275,18 +345,131 @@ class Search(IndexStore):
         bks = []
         for found in tops.scoreDocs:
             doc = self.searcher.doc(found.doc)
-            bks.append(Book.objects.get(id=doc.get("book_id")))
+            bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
         return (bks, tops.totalHits)
 
     def bsearch(self, query, max_results=50):
         q = self.query(query)
-        f = TermsFilter()
-        f.addTerm(Term("is_book", "true"))
-        bjq = BlockJoinQuery(q, f, BlockJoinQuery.ScoreMode.Avg)
+        bjq = BlockJoinQuery(q, self.parent_filter, BlockJoinQuery.ScoreMode.Avg)
 
         tops = self.searcher.search(bjq, max_results)
         bks = []
         for found in tops.scoreDocs:
             doc = self.searcher.doc(found.doc)
-            bks.append(Book.objects.get(id=doc.get("book_id")))
+            bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
+        return (bks, tops.totalHits)
+
+# TokenStream tokenStream = analyzer.tokenStream(fieldName, reader);
+# OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class);
+# CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
+
+# while (tokenStream.incrementToken()) {
+#     int startOffset = offsetAttribute.startOffset();
+#     int endOffset = offsetAttribute.endOffset();
+#     String term = charTermAttribute.toString();
+# }
+
+
+class MultiSearch(Search):
+    """Class capable of IMDb-like searching"""
+    def get_tokens(self, queryreader):
+        if isinstance(queryreader, str):
+            queryreader = StringReader(queryreader)
+        queryreader.reset()
+        tokens = self.analyzer.reusableTokenStream('content', queryreader)
+        toks = []
+        while tokens.incrementToken():
+            cta = tokens.getAttribute(CharTermAttribute.class_)
+            toks.append(cta.toString())
+        return toks
+
+    def make_phrase(self, tokens, field='content', joined=False, slop=2):
+        phrase = PhraseQuery()
+        phrase.setSlop(slop)
+        for t in tokens:
+            term = Term(field, t)
+            phrase.add(term)
+        if joined:
+            phrase = self.content_query(phrase)
+        return phrase
+
+    def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD, joined=False):
+        q = BooleanQuery()
+        for t in tokens:
+            term = Term(field, t)
+            q.add(BooleanClause(TermQuery(term), modal))
+        if joined:
+            q = self.content_query(q)
+        return q
+
+    def content_query(self, query):
+        return BlockJoinQuery(query, self.parent_filter,
+                              BlockJoinQuery.ScoreMode.Total)
+
+    def multisearch(self, query, max_results=50):
+        """
+        Search strategy:
+        - (phrase) OR -> content
+                      -> title
+                      -> author
+        - (keywords)  -> author
+                      -> motyw
+                      -> tags
+                      -> content
+        """
+        queryreader = StringReader(query)
+        tokens = self.get_tokens(queryreader)
+
+        top_level = BooleanQuery()
+        Should = BooleanClause.Occur.SHOULD
+
+        phrase_level = BooleanQuery()
+        phrase_level.setBoost(1.3)
+
+        p_content = self.make_phrase(tokens, joined=True)
+        p_title = self.makxe_phrase(tokens, 'title')
+        p_author = self.make_phrase(tokens, 'author')
+
+        phrase_level.add(BooleanClause(p_content, Should))
+        phrase_level.add(BooleanClause(p_title, Should))
+        phrase_level.add(BooleanClause(p_author, Should))
+
+        kw_level = BooleanQuery()
+
+        kw_level.add(self.make_term_query(tokens, 'author'), Should)
+        j_themes = self.make_term_query(tokens, 'themes', joined=True)
+        kw_level.add(j_themes, Should)
+        kw_level.add(self.make_term_query(tokens, 'tags'), Should)
+        j_con = self.make_term_query(tokens, joined=True)
+        kw_level.add(j_con, Should)
+
+        top_level.add(BooleanClause(phrase_level, Should))
+        top_level.add(BooleanClause(kw_level, Should))
+
+        collector = BlockJoinCollector(Sort.RELEVANCE, 100, True, True)
+
+        self.searcher.search(kw_level, collector)
+
+        # frazy w treści:
+        # ph1 = collector.getTopGroups(j_themes, Sort.RELEVANCE,
+        #                                        0, 10, 0, True)
+        #  reload(search.index); realod(search); s = search.MultiSearch(); s.multisearch(u'dusiołek')       
+        #        ph2 = collector.getTopGroups(j_con, Sort.RELEVANCE,
+        #                                     0, 10, 0, True)
+
+        import pdb; pdb.set_trace();
+        
+        return None
+
+    
+    def do_search(self, query, max_results=50, collector=None):
+        tops = self.searcher.search(query, max_results)
+        #tops = self.searcher.search(p_content, max_results)
+
+        bks = []
+        for found in tops.scoreDocs:
+            doc = self.searcher.doc(found.doc)
+            b = catalogue.models.Book.objects.get(id=doc.get("book_id"))
+            bks.append(b)
+            print "%s (%d) -> %f" % (b, b.id, found.score)
         return (bks, tops.totalHits)