rich search workon
[wolnelektury.git] / apps / search / index.py
index f5489a5..42a271e 100644 (file)
@@ -7,7 +7,7 @@ from lucene import SimpleFSDirectory, IndexWriter, File, Field, \
     KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \
     BlockJoinQuery, BlockJoinCollector, TermsFilter, \
     HashSet, BooleanClause, Term, CharTermAttribute, \
-    PhraseQuery, StringReader
+    PhraseQuery, StringReader, TermQuery
     # KeywordAnalyzer
 import sys
 import os
@@ -82,6 +82,7 @@ class Index(IndexStore):
     def index_book(self, book, overwrite=True):
         if overwrite:
             self.remove_book(book)
+            
 
         doc = self.extract_metadata(book)
         parts = self.extract_content(book)
@@ -263,19 +264,14 @@ class ReusableIndex(Index):
             atexit.register(ReusableIndex.close_reusable)
 
     def index_book(self, *args, **kw):
-        job = ReusableIndex.pool.apply_async(Index.index_book, args, kw)
+        job = ReusableIndex.pool.apply_async(Index.index_book, (self,)+ args, kw)
         ReusableIndex.pool_jobs.append(job)
 
     @staticmethod
     def close_reusable():
         if ReusableIndex.index is not None:
-            all_jobs = len(ReusableIndex.pool_jobs)
-            waited=1
             for job in ReusableIndex.pool_jobs:
-                sys.stdout.write("\rWaiting for search index job: %d/%d..." % 
                 job.wait()
-                waited+=1
-            print("Indexing done.")
             ReusableIndex.pool.close()
 
             ReusableIndex.index.optimize()
@@ -378,11 +374,12 @@ class MultiSearch(Search):
         toks = []
         while tokens.incrementToken():
             cta = tokens.getAttribute(CharTermAttribute.class_)
-            toks.append(cta)
+            toks.append(cta.toString())
         return toks
 
-    def make_phrase(self, tokens, field='content', joined=False):
+    def make_phrase(self, tokens, field='content', joined=False, slop=2):
         phrase = PhraseQuery()
+        phrase.setSlop(slop)
         for t in tokens:
             term = Term(field, t)
             phrase.add(term)
@@ -394,7 +391,7 @@ class MultiSearch(Search):
         q = BooleanQuery()
         for t in tokens:
             term = Term(field, t)
-            q.add(BooleanClause(term, modal))
+            q.add(BooleanClause(TermQuery(term), modal))
         if joined:
             self.content_query(q)
         return q
@@ -403,7 +400,7 @@ class MultiSearch(Search):
         return BlockJoinQuery(query, self.parent_filter,
                               BlockJoinQuery.ScoreMode.Total)
 
-    def multiseach(self, query, max_results=50):
+    def multisearch(self, query, max_results=50):
         """
         Search strategy:
         - (phrase) OR -> content
@@ -421,6 +418,7 @@ class MultiSearch(Search):
         Should = BooleanClause.Occur.SHOULD
 
         phrase_level = BooleanQuery()
+        phrase_level.setBoost(1.3)
 
         p_content = self.make_phrase(tokens, joined=True)
         p_title = self.make_phrase(tokens, 'title')
@@ -440,9 +438,18 @@ class MultiSearch(Search):
         top_level.add(BooleanClause(phrase_level, Should))
         top_level.add(BooleanClause(kw_level, Should))
 
-        tops = self.searcher.search(top_level, max_results)
+        print self.do_search(phrase_level)
+        print self.do_search(kw_level)
+        print self.do_search(top_level)
+
+    def do_search(self, query, max_results=50):
+        tops = self.searcher.search(query, max_results)
+        #tops = self.searcher.search(p_content, max_results)
+
         bks = []
         for found in tops.scoreDocs:
             doc = self.searcher.doc(found.doc)
-            bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
+            b = catalogue.models.Book.objects.get(id=doc.get("book_id"))
+            bks.append(b)
+            print "%s (%d) -> %f" % (b, b.id, found.score)
         return (bks, tops.totalHits)