X-Git-Url: https://git.mdrn.pl/wolnelektury.git/blobdiff_plain/8ebbecdeab595766e659a0116245da8a925bdfeb..cdf0ea700156d13cd27a670d9363d92ebaffa93f:/apps/search/index.py diff --git a/apps/search/index.py b/apps/search/index.py index f5489a5ca..7ec88c619 100644 --- a/apps/search/index.py +++ b/apps/search/index.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- + from django.conf import settings from lucene import SimpleFSDirectory, IndexWriter, File, Field, \ NumericField, Version, Document, JavaError, IndexSearcher, \ @@ -7,8 +8,11 @@ from lucene import SimpleFSDirectory, IndexWriter, File, Field, \ KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \ BlockJoinQuery, BlockJoinCollector, TermsFilter, \ HashSet, BooleanClause, Term, CharTermAttribute, \ - PhraseQuery, StringReader + PhraseQuery, StringReader, TermQuery, BlockJoinQuery, \ + Sort, Integer, \ + initVM, CLASSPATH # KeywordAnalyzer +JVM = initVM(CLASSPATH) import sys import os import errno @@ -16,8 +20,9 @@ from librarian import dcparser from librarian.parser import WLDocument import catalogue.models from multiprocessing.pool import ThreadPool +from threading import current_thread import atexit - +import traceback class WLAnalyzer(PerFieldAnalyzerWrapper): def __init__(self): @@ -101,7 +106,7 @@ class Index(IndexStore): 'wywiad' ] - skip_header_tags = ['autor_utworu', 'nazwa_utworu'] + skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne'] def create_book_doc(self, book): """ @@ -116,9 +121,11 @@ class Index(IndexStore): def extract_metadata(self, book): book_info = dcparser.parse(book.xml_file) + print("extract metadata for book %s id=%d, thread%d" % (book.slug, book.id, current_thread().ident)) + doc = self.create_book_doc(book) doc.add(Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS)) - doc.add(Field("tags", ','.join([t.name for t in book.tags]), Field.Store.NO, Field.Index.ANALYZED)) + #doc.add(Field("tags", ','.join([t.name for t in book.tags]), Field.Store.NO, Field.Index.ANALYZED)) doc.add(Field("is_book", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED)) # validator, name @@ -174,7 +181,7 @@ class Index(IndexStore): doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position)) doc.add(Field("header_type", header.tag, Field.Store.YES, Field.Index.NOT_ANALYZED)) content = u' '.join([t for t in header.itertext()]) - doc.add(Field("content", content, Field.Store.NO, Field.Index.ANALYZED)) + doc.add(Field("content", content, Field.Store.YES, Field.Index.ANALYZED)) header_docs.append(doc) def walker(node): @@ -217,7 +224,7 @@ class Index(IndexStore): Field.Store.YES, Field.Index.NOT_ANALYZED)) doc.add(Field("content", u' '.join(filter(lambda s: s is not None, frag['content'])), - Field.Store.NO, Field.Index.ANALYZED)) + Field.Store.YES, Field.Index.ANALYZED)) doc.add(Field("themes", u' '.join(filter(lambda s: s is not None, frag['themes'])), Field.Store.NO, Field.Index.ANALYZED)) @@ -240,6 +247,17 @@ class Index(IndexStore): self.close() +def log_exception_wrapper(f): + def _wrap(*a): + try: + f(*a) + except Exception, e: + print("Error in indexing thread: %s" % e) + traceback.print_exc() + raise e + return _wrap + + class ReusableIndex(Index): """ Works like index, but does not close/optimize Lucene index @@ -256,26 +274,26 @@ class ReusableIndex(Index): if ReusableIndex.index is not None: self.index = ReusableIndex.index else: - ReusableIndex.pool = ThreadPool(threads) + print("opening index") + ReusableIndex.pool = ThreadPool(threads, initializer=lambda: JVM.attachCurrentThread() ) ReusableIndex.pool_jobs = [] Index.open(self, analyzer) ReusableIndex.index = self.index atexit.register(ReusableIndex.close_reusable) def index_book(self, *args, **kw): - job = ReusableIndex.pool.apply_async(Index.index_book, args, kw) + job = ReusableIndex.pool.apply_async(log_exception_wrapper(Index.index_book), (self,) + args, kw) ReusableIndex.pool_jobs.append(job) @staticmethod def close_reusable(): if ReusableIndex.index is not None: - all_jobs = len(ReusableIndex.pool_jobs) - waited=1 + print("wait for indexing to finish") for job in ReusableIndex.pool_jobs: - sys.stdout.write("\rWaiting for search index job: %d/%d..." % - job.wait() - waited+=1 - print("Indexing done.") + job.get() + sys.stdout.write('.') + sys.stdout.flush() + print("done.") ReusableIndex.pool.close() ReusableIndex.index.optimize() @@ -368,42 +386,158 @@ class Search(IndexStore): # } +class SearchResult(object): + def __init__(self, searcher, scoreDocs, score=None): + if score: + self.score = score + else: + self.score = scoreDocs.score + + self.fragments = [] + self.scores = {} + self.sections = [] + + stored = searcher.doc(scoreDocs.doc) + self.book_id = int(stored.get("book_id")) + + fragment = stored.get("fragment_anchor") + if fragment: + self.fragments.append(fragment) + self.scores[fragment] = scoreDocs.score + + header_type = stored.get("header_type") + if header_type: + sec = (header_type, int(stored.get("header_index"))) + self.sections.append(sec) + self.scores[sec] = scoreDocs.score + + def get_book(self): + return catalogue.models.Book.objects.get(id=self.book_id) + + book = property(get_book) + + def get_parts(self): + book = self.book + parts = [{"header": s[0], "position": s[1], '_score_key': s} for s in self.sections] \ + + [{"fragment": book.fragments.get(anchor=f), '_score_key':f} for f in self.fragments] + + parts.sort(lambda a, b: cmp(self.scores[a['_score_key']], self.scores[b['_score_key']])) + print("bookid: %d parts: %s" % (self.book_id, parts)) + return parts + + parts = property(get_parts) + + def merge(self, other): + if self.book_id != other.book_id: + raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id)) + self.fragments += other.fragments + self.sections += other.sections + self.scores.update(other.scores) + if other.score > self.score: + self.score = other.score + return self + + def __unicode__(self): + return u'SearchResult(book_id=%d, score=%d)' % (self.book_id, self.score) + + @staticmethod + def aggregate(*result_lists): + books = {} + for rl in result_lists: + for r in rl: + if r.book_id in books: + books[r.book_id].merge(r) + #print(u"already have one with score %f, and this one has score %f" % (books[book.id][0], found.score)) + else: + books[r.book_id] = r + return books.values() + + def __cmp__(self, other): + return cmp(self.score, other.score) + + class MultiSearch(Search): """Class capable of IMDb-like searching""" def get_tokens(self, queryreader): - if isinstance(queryreader, str): + if isinstance(queryreader, str) or isinstance(queryreader, unicode): queryreader = StringReader(queryreader) queryreader.reset() tokens = self.analyzer.reusableTokenStream('content', queryreader) toks = [] while tokens.incrementToken(): cta = tokens.getAttribute(CharTermAttribute.class_) - toks.append(cta) + toks.append(cta.toString()) return toks - def make_phrase(self, tokens, field='content', joined=False): + def make_phrase(self, tokens, field='content', slop=2): phrase = PhraseQuery() + phrase.setSlop(slop) for t in tokens: term = Term(field, t) phrase.add(term) - if joined: - phrase = self.content_query(phrase) return phrase - def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD, joined=False): + def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD): q = BooleanQuery() for t in tokens: term = Term(field, t) - q.add(BooleanClause(term, modal)) - if joined: - self.content_query(q) + q.add(BooleanClause(TermQuery(term), modal)) return q def content_query(self, query): return BlockJoinQuery(query, self.parent_filter, BlockJoinQuery.ScoreMode.Total) - def multiseach(self, query, max_results=50): + def search_perfect_book(self, tokens, max_results=20): + qrys = [self.make_phrase(tokens, field=fld) for fld in ['author', 'title']] + + books = [] + for q in qrys: + top = self.searcher.search(q, max_results) + for found in top.scoreDocs: + books.append(SearchResult(self.searcher, found)) + return books + + def search_perfect_parts(self, tokens, max_results=20): + qrys = [self.make_phrase(tokens, field=fld) for fld in ['content']] + + books = [] + for q in qrys: + top = self.searcher.search(q, max_results) + for found in top.scoreDocs: + books.append(SearchResult(self.searcher, found)) + + return books + + def search_everywhere(self, tokens, max_results=20): + q = BooleanQuery() + in_meta = BooleanQuery() + in_content = BooleanQuery() + + for fld in ['themes', 'content']: + in_content.add(BooleanClause(self.make_term_query(tokens, field=fld), BooleanClause.Occur.SHOULD)) + + for fld in ['author', 'title', 'epochs', 'genres', 'kinds']: + in_meta.add(BooleanClause(self.make_term_query(tokens, field=fld), BooleanClause.Occur.SHOULD)) + + q.add(BooleanClause(in_meta, BooleanClause.Occur.MUST)) + in_content_join = self.content_query(in_content) + q.add(BooleanClause(in_content_join, BooleanClause.Occur.MUST)) + + collector = BlockJoinCollector(Sort.RELEVANCE, 100, True, True) + + self.searcher.search(q, collector) + + books = [] + + top_groups = collector.getTopGroups(in_content_join, Sort.RELEVANCE, 0, max_results, 0, True) + if top_groups: + for grp in top_groups.groups: + for part in grp.scoreDocs: + books.append(SearchResult(self.searcher, part, score=grp.maxScore)) + return books + + def multisearch(self, query, max_results=50): """ Search strategy: - (phrase) OR -> content @@ -414,35 +548,46 @@ class MultiSearch(Search): -> tags -> content """ - queryreader = StringReader(query) - tokens = self.get_tokens(queryreader) + # queryreader = StringReader(query) + # tokens = self.get_tokens(queryreader) + + # top_level = BooleanQuery() + # Should = BooleanClause.Occur.SHOULD - top_level = BooleanQuery() - Should = BooleanClause.Occur.SHOULD + # phrase_level = BooleanQuery() + # phrase_level.setBoost(1.3) - phrase_level = BooleanQuery() + # p_content = self.make_phrase(tokens, joined=True) + # p_title = self.make_phrase(tokens, 'title') + # p_author = self.make_phrase(tokens, 'author') - p_content = self.make_phrase(tokens, joined=True) - p_title = self.make_phrase(tokens, 'title') - p_author = self.make_phrase(tokens, 'author') + # phrase_level.add(BooleanClause(p_content, Should)) + # phrase_level.add(BooleanClause(p_title, Should)) + # phrase_level.add(BooleanClause(p_author, Should)) - phrase_level.add(BooleanClause(p_content, Should)) - phrase_level.add(BooleanClause(p_title, Should)) - phrase_level.add(BooleanClause(p_author, Should)) + # kw_level = BooleanQuery() - kw_level = BooleanQuery() + # kw_level.add(self.make_term_query(tokens, 'author'), Should) + # j_themes = self.make_term_query(tokens, 'themes', joined=True) + # kw_level.add(j_themes, Should) + # kw_level.add(self.make_term_query(tokens, 'tags'), Should) + # j_con = self.make_term_query(tokens, joined=True) + # kw_level.add(j_con, Should) - kw_level.add(self.make_term_query(tokens, 'author'), Should) - kw_level.add(self.make_term_query(tokens, 'themes', joined=True), Should) - kw_level.add(self.make_term_query(tokens, 'tags'), Should) - kw_level.add(self.make_term_query(tokens, joined=True), Should) + # top_level.add(BooleanClause(phrase_level, Should)) + # top_level.add(BooleanClause(kw_level, Should)) - top_level.add(BooleanClause(phrase_level, Should)) - top_level.add(BooleanClause(kw_level, Should)) + return None + + + def do_search(self, query, max_results=50, collector=None): + tops = self.searcher.search(query, max_results) + #tops = self.searcher.search(p_content, max_results) - tops = self.searcher.search(top_level, max_results) bks = [] for found in tops.scoreDocs: doc = self.searcher.doc(found.doc) - bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) + b = catalogue.models.Book.objects.get(id=doc.get("book_id")) + bks.append(b) + print "%s (%d) -> %f" % (b, b.id, found.score) return (bks, tops.totalHits)