X-Git-Url: https://git.mdrn.pl/wolnelektury.git/blobdiff_plain/d595ad44e359460abafebfcb4146140a169312c5..4d5f62cd29ea6056605de06356fad641100f426b:/apps/search/index.py diff --git a/apps/search/index.py b/apps/search/index.py index a6d5cf6eb..53812cbb0 100644 --- a/apps/search/index.py +++ b/apps/search/index.py @@ -7,7 +7,8 @@ from lucene import SimpleFSDirectory, IndexWriter, File, Field, \ KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \ BlockJoinQuery, BlockJoinCollector, TermsFilter, \ HashSet, BooleanClause, Term, CharTermAttribute, \ - PhraseQuery, StringReader + PhraseQuery, StringReader, TermQuery, BlockJoinQuery, \ + Sort # KeywordAnalyzer import sys import os @@ -16,6 +17,7 @@ from librarian import dcparser from librarian.parser import WLDocument import catalogue.models from multiprocessing.pool import ThreadPool +from threading import current_thread import atexit @@ -82,7 +84,7 @@ class Index(IndexStore): def index_book(self, book, overwrite=True): if overwrite: self.remove_book(book) - + doc = self.extract_metadata(book) parts = self.extract_content(book) @@ -117,6 +119,8 @@ class Index(IndexStore): def extract_metadata(self, book): book_info = dcparser.parse(book.xml_file) + print("extract metadata for book %s id=%d, thread%d" % (book.slug, book.id, current_thread().ident)) + doc = self.create_book_doc(book) doc.add(Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS)) doc.add(Field("tags", ','.join([t.name for t in book.tags]), Field.Store.NO, Field.Index.ANALYZED)) @@ -257,6 +261,7 @@ class ReusableIndex(Index): if ReusableIndex.index is not None: self.index = ReusableIndex.index else: + print("opening index") ReusableIndex.pool = ThreadPool(threads) ReusableIndex.pool_jobs = [] Index.open(self, analyzer) @@ -270,6 +275,7 @@ class ReusableIndex(Index): @staticmethod def close_reusable(): if ReusableIndex.index is not None: + print("closing index") for job in ReusableIndex.pool_jobs: job.wait() ReusableIndex.pool.close() @@ -374,11 +380,12 @@ class MultiSearch(Search): toks = [] while tokens.incrementToken(): cta = tokens.getAttribute(CharTermAttribute.class_) - toks.append(cta) + toks.append(cta.toString()) return toks - def make_phrase(self, tokens, field='content', joined=False): + def make_phrase(self, tokens, field='content', joined=False, slop=2): phrase = PhraseQuery() + phrase.setSlop(slop) for t in tokens: term = Term(field, t) phrase.add(term) @@ -390,16 +397,16 @@ class MultiSearch(Search): q = BooleanQuery() for t in tokens: term = Term(field, t) - q.add(BooleanClause(term, modal)) + q.add(BooleanClause(TermQuery(term), modal)) if joined: - self.content_query(q) + q = self.content_query(q) return q def content_query(self, query): return BlockJoinQuery(query, self.parent_filter, BlockJoinQuery.ScoreMode.Total) - def multiseach(self, query, max_results=50): + def multisearch(self, query, max_results=50): """ Search strategy: - (phrase) OR -> content @@ -417,9 +424,10 @@ class MultiSearch(Search): Should = BooleanClause.Occur.SHOULD phrase_level = BooleanQuery() + phrase_level.setBoost(1.3) p_content = self.make_phrase(tokens, joined=True) - p_title = self.make_phrase(tokens, 'title') + p_title = self.makxe_phrase(tokens, 'title') p_author = self.make_phrase(tokens, 'author') phrase_level.add(BooleanClause(p_content, Should)) @@ -429,16 +437,39 @@ class MultiSearch(Search): kw_level = BooleanQuery() kw_level.add(self.make_term_query(tokens, 'author'), Should) - kw_level.add(self.make_term_query(tokens, 'themes', joined=True), Should) + j_themes = self.make_term_query(tokens, 'themes', joined=True) + kw_level.add(j_themes, Should) kw_level.add(self.make_term_query(tokens, 'tags'), Should) - kw_level.add(self.make_term_query(tokens, joined=True), Should) + j_con = self.make_term_query(tokens, joined=True) + kw_level.add(j_con, Should) top_level.add(BooleanClause(phrase_level, Should)) top_level.add(BooleanClause(kw_level, Should)) - tops = self.searcher.search(top_level, max_results) + collector = BlockJoinCollector(Sort.RELEVANCE, 100, True, True) + + self.searcher.search(kw_level, collector) + + # frazy w treści: + # ph1 = collector.getTopGroups(j_themes, Sort.RELEVANCE, + # 0, 10, 0, True) + # reload(search.index); realod(search); s = search.MultiSearch(); s.multisearch(u'dusiołek') + # ph2 = collector.getTopGroups(j_con, Sort.RELEVANCE, + # 0, 10, 0, True) + + import pdb; pdb.set_trace(); + + return None + + + def do_search(self, query, max_results=50, collector=None): + tops = self.searcher.search(query, max_results) + #tops = self.searcher.search(p_content, max_results) + bks = [] for found in tops.scoreDocs: doc = self.searcher.doc(found.doc) - bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) + b = catalogue.models.Book.objects.get(id=doc.get("book_id")) + bks.append(b) + print "%s (%d) -> %f" % (b, b.id, found.score) return (bks, tops.totalHits)