X-Git-Url: https://git.mdrn.pl/wolnelektury.git/blobdiff_plain/3fc3ab8693fc63c350e26484e876a8092f6b5d90..45b36abf753366dd523cae85a8eced7e5c9711e0:/apps/search/index.py?ds=sidebyside diff --git a/apps/search/index.py b/apps/search/index.py index 5ebae2c42..9972c2c56 100644 --- a/apps/search/index.py +++ b/apps/search/index.py @@ -1,34 +1,48 @@ # -*- coding: utf-8 -*- from django.conf import settings -from lucene import SimpleFSDirectory, IndexWriter, File, Field, \ +from lucene import SimpleFSDirectory, IndexWriter, CheckIndex, \ + File, Field, Integer, \ NumericField, Version, Document, JavaError, IndexSearcher, \ - QueryParser, Term, PerFieldAnalyzerWrapper, \ + QueryParser, PerFieldAnalyzerWrapper, \ SimpleAnalyzer, PolishAnalyzer, ArrayList, \ - KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \ - BlockJoinQuery, BlockJoinCollector, TermsFilter, \ + KeywordAnalyzer, NumericRangeQuery, NumericRangeFilter, BooleanQuery, \ + BlockJoinQuery, BlockJoinCollector, Filter, TermsFilter, ChainedFilter, \ HashSet, BooleanClause, Term, CharTermAttribute, \ - PhraseQuery, StringReader, TermQuery, BlockJoinQuery, \ - Sort, Integer, \ - initVM, CLASSPATH + PhraseQuery, MultiPhraseQuery, StringReader, TermQuery, \ + FuzzyQuery, FuzzyTermEnum, PrefixTermEnum, Sort, Integer, \ + SimpleHTMLFormatter, Highlighter, QueryScorer, TokenSources, TextFragment, \ + BooleanFilter, TermsFilter, FilterClause, QueryWrapperFilter, \ + initVM, CLASSPATH, JArray, JavaError # KeywordAnalyzer + +# Initialize jvm JVM = initVM(CLASSPATH) + import sys import os +import re import errno from librarian import dcparser from librarian.parser import WLDocument +from lxml import etree import catalogue.models from multiprocessing.pool import ThreadPool from threading import current_thread import atexit +import traceback class WLAnalyzer(PerFieldAnalyzerWrapper): def __init__(self): polish = PolishAnalyzer(Version.LUCENE_34) + # polish_gap.setPositionIncrementGap(999) + simple = SimpleAnalyzer(Version.LUCENE_34) + # simple_gap.setPositionIncrementGap(999) + keyword = KeywordAnalyzer(Version.LUCENE_34) + # not sure if needed: there's NOT_ANALYZED meaning basically the same PerFieldAnalyzerWrapper.__init__(self, polish) @@ -40,13 +54,31 @@ class WLAnalyzer(PerFieldAnalyzerWrapper): self.addAnalyzer("source_url", keyword) self.addAnalyzer("source_name", simple) self.addAnalyzer("publisher", simple) - self.addAnalyzer("author", simple) + self.addAnalyzer("authors", simple) + self.addAnalyzer("title", simple) + self.addAnalyzer("is_book", keyword) + # shouldn't the title have two forms? _pl and simple? - #self.addanalyzer("fragment_anchor", keyword) + self.addAnalyzer("themes", simple) + self.addAnalyzer("themes_pl", polish) + + self.addAnalyzer("tag_name", simple) + self.addAnalyzer("tag_name_pl", polish) + + self.addAnalyzer("translators", simple) + + self.addAnalyzer("KEYWORD", keyword) + self.addAnalyzer("SIMPLE", simple) + self.addAnalyzer("POLISH", polish) class IndexStore(object): + """ + Provides access to search index. + + self.store - lucene index directory + """ def __init__(self): self.make_index_dir() self.store = SimpleFSDirectory(File(settings.SEARCH_INDEX)) @@ -60,9 +92,77 @@ class IndexStore(object): else: raise -class Index(IndexStore): - def __init__(self, analyzer=None): +class IndexChecker(IndexStore): + def __init__(self): IndexStore.__init__(self) + + def check(self): + checker = CheckIndex(self.store) + status = checker.checkIndex() + return status + + +class Snippets(object): + """ + This class manages snippet files for indexed object (book) + the snippets are concatenated together, and their positions and + lengths are kept in lucene index fields. + """ + SNIPPET_DIR = "snippets" + + def __init__(self, book_id): + try: + os.makedirs(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR)) + except OSError as exc: + if exc.errno == errno.EEXIST: + pass + else: raise + self.book_id = book_id + self.file = None + + def open(self, mode='r'): + """ + Open the snippet file. Call .close() afterwards. + """ + if not 'b' in mode: + mode += 'b' + self.file = open(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR, str(self.book_id)), mode) + self.position = 0 + return self + + def add(self, snippet): + """ + Append a snippet (unicode) to the snippet file. + Return a (position, length) tuple + """ + txt = snippet.encode('utf-8') + l = len(txt) + self.file.write(txt) + pos = (self.position, l) + self.position += l + return pos + + def get(self, pos): + """ + Given a tuple of (position, length) return an unicode + of the snippet stored there. + """ + self.file.seek(pos[0], 0) + txt = self.file.read(pos[1]).decode('utf-8') + return txt + + def close(self): + """Close snippet file""" + self.file.close() + + +class BaseIndex(IndexStore): + """ + Base index class. + Provides basic operations on index: opening, closing, optimizing. + """ + def __init__(self, analyzer=None): + super(BaseIndex, self).__init__() self.index = None if not analyzer: analyzer = WLAnalyzer() @@ -75,27 +175,88 @@ class Index(IndexStore): IndexWriter.MaxFieldLength.LIMITED) return self.index - def close(self): + def optimize(self): self.index.optimize() + + def close(self): + try: + self.index.optimize() + except JavaError, je: + print "Error during optimize phase, check index: %s" % je + self.index.close() self.index = None + def __enter__(self): + self.open() + return self + + def __exit__(self, type, value, tb): + self.close() + + +class Index(BaseIndex): + """ + Class indexing books. + """ + def __init__(self, analyzer=None): + super(Index, self).__init__(analyzer) + + def index_tags(self): + """ + Re-index global tag list. + Removes all tags from index, then index them again. + Indexed fields include: id, name (with and without polish stems), category + """ + q = NumericRangeQuery.newIntRange("tag_id", 0, Integer.MAX_VALUE, True, True) + self.index.deleteDocuments(q) + + for tag in catalogue.models.Tag.objects.all(): + doc = Document() + doc.add(NumericField("tag_id", Field.Store.YES, True).setIntValue(int(tag.id))) + doc.add(Field("tag_name", tag.name, Field.Store.NO, Field.Index.ANALYZED)) + doc.add(Field("tag_name_pl", tag.name, Field.Store.NO, Field.Index.ANALYZED)) + doc.add(Field("tag_category", tag.category, Field.Store.NO, Field.Index.NOT_ANALYZED)) + self.index.addDocument(doc) + + def create_book_doc(self, book): + """ + Create a lucene document referring book id. + """ + doc = Document() + doc.add(NumericField("book_id", Field.Store.YES, True).setIntValue(int(book.id))) + if book.parent is not None: + doc.add(NumericField("parent_id", Field.Store.YES, True).setIntValue(int(book.parent.id))) + return doc + def remove_book(self, book): - q = NumericRangeQuery.newIntRange("book_id", book.id, book.id, True,True) + """Removes a book from search index. + book - Book instance.""" + q = NumericRangeQuery.newIntRange("book_id", book.id, book.id, True, True) self.index.deleteDocuments(q) - def index_book(self, book, overwrite=True): + def index_book(self, book, book_info=None, overwrite=True): + """ + Indexes the book. + Creates a lucene document for extracted metadata + and calls self.index_content() to index the contents of the book. + """ if overwrite: self.remove_book(book) - doc = self.extract_metadata(book) - parts = self.extract_content(book) - block = ArrayList().of_(Document) + book_doc = self.create_book_doc(book) + meta_fields = self.extract_metadata(book, book_info) + for f in meta_fields.values(): + if isinstance(f, list) or isinstance(f, tuple): + for elem in f: + book_doc.add(elem) + else: + book_doc.add(f) + + self.index.addDocument(book_doc) + del book_doc - for p in parts: - block.add(p) - block.add(doc) - self.index.addDocuments(block) + self.index_content(book, book_fields=[meta_fields['title'], meta_fields['authors']]) master_tags = [ 'opowiadanie', @@ -108,25 +269,18 @@ class Index(IndexStore): skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne'] - def create_book_doc(self, book): + def extract_metadata(self, book, book_info=None): """ - Create a lucene document connected to the book + Extract metadata from book and returns a map of fields keyed by fieldname """ - doc = Document() - doc.add(NumericField("book_id", Field.Store.YES, True).setIntValue(book.id)) - if book.parent is not None: - doc.add(NumericField("parent_id", Field.Store.YES, True).setIntValue(book.parent.id)) - return doc + fields = {} - def extract_metadata(self, book): - book_info = dcparser.parse(book.xml_file) + if book_info is None: + book_info = dcparser.parse(open(book.xml_file.path)) - print("extract metadata for book %s id=%d, thread%d" % (book.slug, book.id, current_thread().ident)) - - doc = self.create_book_doc(book) - doc.add(Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS)) - doc.add(Field("tags", ','.join([t.name for t in book.tags]), Field.Store.NO, Field.Index.ANALYZED)) - doc.add(Field("is_book", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED)) + fields['slug'] = Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS) + fields['tags'] = self.add_gaps([Field("tags", t.name, Field.Store.NO, Field.Index.ANALYZED) for t in book.tags], 'tags') + fields['is_book'] = Field("is_book", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED) # validator, name for field in dcparser.BookInfo.FIELDS: @@ -140,7 +294,7 @@ class Index(IndexStore): if field.multiple: s = ', '.join(s) try: - doc.add(Field(field.name, s, Field.Store.NO, Field.Index.ANALYZED)) + fields[field.name] = Field(field.name, s, Field.Store.NO, Field.Index.ANALYZED) except JavaError as je: raise Exception("failed to add field: %s = '%s', %s(%s)" % (field.name, s, je.message, je.args)) elif type_indicator == dcparser.as_person: @@ -149,40 +303,43 @@ class Index(IndexStore): persons = unicode(p) else: persons = ', '.join(map(unicode, p)) - doc.add(Field(field.name, persons, Field.Store.NO, Field.Index.ANALYZED)) + fields[field.name] = Field(field.name, persons, Field.Store.NO, Field.Index.ANALYZED) elif type_indicator == dcparser.as_date: dt = getattr(book_info, field.name) - doc.add(Field(field.name, "%04d%02d%02d" % (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED)) - return doc + fields[field.name] = Field(field.name, "%04d%02d%02d" %\ + (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED) + + return fields + + def add_gaps(self, fields, fieldname): + """ + Interposes a list of fields with gap-fields, which are indexed spaces and returns it. + This allows for doing phrase queries which do not overlap the gaps (when slop is 0). + """ + def gap(): + while True: + yield Field(fieldname, ' ', Field.Store.NO, Field.Index.NOT_ANALYZED) + return reduce(lambda a, b: a + b, zip(fields, gap()))[0:-1] def get_master(self, root): + """ + Returns the first master tag from an etree. + """ for master in root.iter(): if master.tag in self.master_tags: return master - - def extract_content(self, book): - wld = WLDocument.from_file(book.xml_file.path) + def index_content(self, book, book_fields=[]): + """ + Walks the book XML and extract content from it. + Adds parts for each header tag and for each fragment. + """ + wld = WLDocument.from_file(book.xml_file.path, parse_dublincore=False) root = wld.edoc.getroot() - # first we build a sequence of top-level items. - # book_id - # header_index - the 0-indexed position of header element. - # content master = self.get_master(root) if master is None: return [] - - header_docs = [] - for header, position in zip(list(master), range(len(master))): - if header.tag in self.skip_header_tags: - continue - doc = self.create_book_doc(book) - doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position)) - doc.add(Field("header_type", header.tag, Field.Store.YES, Field.Index.NOT_ANALYZED)) - content = u' '.join([t for t in header.itertext()]) - doc.add(Field("content", content, Field.Store.YES, Field.Index.ANALYZED)) - header_docs.append(doc) def walker(node): yield node, None @@ -192,59 +349,130 @@ class Index(IndexStore): yield None, node return - # Then we create a document for each fragments - # fragment_anchor - the anchor - # themes - list of themes [not indexed] - fragment_docs = [] - # will contain (framgent id -> { content: [], themes: [] } - fragments = {} - for start, end in walker(master): - if start is not None and start.tag == 'begin': - fid = start.attrib['id'][1:] - fragments[fid] = {'content': [], 'themes': []} - fragments[fid]['content'].append(start.tail) - elif start is not None and start.tag == 'motyw': - fid = start.attrib['id'][1:] - fragments[fid]['themes'].append(start.text) - fragments[fid]['content'].append(start.tail) - elif start is not None and start.tag == 'end': - fid = start.attrib['id'][1:] - if fid not in fragments: - continue # a broken node, skip it - frag = fragments[fid] - del fragments[fid] - - def jstr(l): - return u' '.join(map( - lambda x: x == None and u'(none)' or unicode(x), - l)) - - doc = self.create_book_doc(book) - doc.add(Field("fragment_anchor", fid, + def fix_format(text): + return re.sub("(?m)/$", "", text) + + def add_part(snippets, **fields): + doc = self.create_book_doc(book) + for f in book_fields: + doc.add(f) + + doc.add(NumericField('header_index', Field.Store.YES, True).setIntValue(fields["header_index"])) + doc.add(NumericField("header_span", Field.Store.YES, True)\ + .setIntValue('header_span' in fields and fields['header_span'] or 1)) + doc.add(Field('header_type', fields["header_type"], Field.Store.YES, Field.Index.NOT_ANALYZED)) + + doc.add(Field('content', fields["content"], Field.Store.NO, Field.Index.ANALYZED, \ + Field.TermVector.WITH_POSITIONS_OFFSETS)) + + snip_pos = snippets.add(fields["content"]) + doc.add(NumericField("snippets_position", Field.Store.YES, True).setIntValue(snip_pos[0])) + doc.add(NumericField("snippets_length", Field.Store.YES, True).setIntValue(snip_pos[1])) + + if 'fragment_anchor' in fields: + doc.add(Field("fragment_anchor", fields['fragment_anchor'], Field.Store.YES, Field.Index.NOT_ANALYZED)) - doc.add(Field("content", - u' '.join(filter(lambda s: s is not None, frag['content'])), - Field.Store.YES, Field.Index.ANALYZED)) - doc.add(Field("themes", - u' '.join(filter(lambda s: s is not None, frag['themes'])), - Field.Store.NO, Field.Index.ANALYZED)) - - fragment_docs.append(doc) - elif start is not None: - for frag in fragments.values(): - frag['content'].append(start.text) - elif end is not None: - for frag in fragments.values(): - frag['content'].append(end.tail) - - return header_docs + fragment_docs - def __enter__(self): - self.open() - return self + if 'themes' in fields: + themes, themes_pl = zip(*[ + (Field("themes", theme, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS), + Field("themes_pl", theme, Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)) + for theme in fields['themes']]) - def __exit__(self, type, value, tb): - self.close() + themes = self.add_gaps(themes, 'themes') + themes_pl = self.add_gaps(themes_pl, 'themes_pl') + + for t in themes: + doc.add(t) + for t in themes_pl: + doc.add(t) + + return doc + + def give_me_utf8(s): + if isinstance(s, unicode): + return s.encode('utf-8') + else: + return s + + fragments = {} + snippets = Snippets(book.id).open('w') + try: + for header, position in zip(list(master), range(len(master))): + + if header.tag in self.skip_header_tags: + continue + if header.tag is etree.Comment: + continue + + # section content + content = [] + + for start, end in walker(header): + # handle fragments and themes. + if start is not None and start.tag == 'begin': + fid = start.attrib['id'][1:] + fragments[fid] = {'content': [], 'themes': [], 'start_section': position, 'start_header': header.tag} + + elif start is not None and start.tag == 'motyw': + fid = start.attrib['id'][1:] + if start.text is not None: + fragments[fid]['themes'] += map(str.strip, map(give_me_utf8, start.text.split(','))) + + elif start is not None and start.tag == 'end': + fid = start.attrib['id'][1:] + if fid not in fragments: + continue # a broken node, skip it + # import pdb; pdb.set_trace() + frag = fragments[fid] + if frag['themes'] == []: + continue # empty themes list. + del fragments[fid] + + def jstr(l): + return u' '.join(map( + lambda x: x == None and u'(none)' or unicode(x), + l)) + + doc = add_part(snippets, + header_type=frag['start_header'], + header_index=frag['start_section'], + header_span=position - frag['start_section'] + 1, + fragment_anchor=fid, + content=u' '.join(filter(lambda s: s is not None, frag['content'])), + themes=frag['themes']) + + self.index.addDocument(doc) + + # Collect content. + elif start is not None: + for frag in fragments.values(): + frag['content'].append(start.text) + content.append(start.text) + elif end is not None: + for frag in fragments.values(): + frag['content'].append(end.tail) + content.append(end.tail) + + # in the end, add a section text. + doc = add_part(snippets, header_index=position, header_type=header.tag, + content=fix_format(u' '.join(filter(lambda s: s is not None, content)))) + + self.index.addDocument(doc) + + finally: + snippets.close() + + +def log_exception_wrapper(f): + def _wrap(*a): + try: + f(*a) + except Exception, e: + print("Error in indexing thread: %s" % e) + traceback.print_exc() + raise e + return _wrap class ReusableIndex(Index): @@ -256,34 +484,23 @@ class ReusableIndex(Index): if you cannot rely on atexit, use ReusableIndex.close_reusable() yourself. """ index = None - pool = None - pool_jobs = None def open(self, analyzer=None, threads=4): if ReusableIndex.index is not None: self.index = ReusableIndex.index else: print("opening index") - ReusableIndex.pool = ThreadPool(threads, initializer=lambda: JVM.attachCurrentThread() ) - ReusableIndex.pool_jobs = [] Index.open(self, analyzer) ReusableIndex.index = self.index atexit.register(ReusableIndex.close_reusable) - def index_book(self, *args, **kw): - job = ReusableIndex.pool.apply_async(Index.index_book, (self,) + args, kw) - ReusableIndex.pool_jobs.append(job) + # def index_book(self, *args, **kw): + # job = ReusableIndex.pool.apply_async(log_exception_wrapper(Index.index_book), (self,) + args, kw) + # ReusableIndex.pool_jobs.append(job) @staticmethod def close_reusable(): if ReusableIndex.index is not None: - print("closing index") - for job in ReusableIndex.pool_jobs: - job.get() - sys.stdout.write('.') - sys.stdout.flush() - ReusableIndex.pool.close() - ReusableIndex.index.optimize() ReusableIndex.index.close() ReusableIndex.index = None @@ -292,20 +509,13 @@ class ReusableIndex(Index): pass -class Search(IndexStore): - def __init__(self, default_field="content"): - IndexStore.__init__(self) - self.analyzer = PolishAnalyzer(Version.LUCENE_34) - ## self.analyzer = WLAnalyzer() - self.searcher = IndexSearcher(self.store, True) - self.parser = QueryParser(Version.LUCENE_34, default_field, - self.analyzer) - - self.parent_filter = TermsFilter() - self.parent_filter.addTerm(Term("is_book", "true")) - - def query(self, query): - return self.parser.parse(query) +class JoinSearch(object): + """ + This mixin could be used to handle block join queries. + (currently unused) + """ + def __init__(self, *args, **kw): + super(JoinSearch, self).__init__(*args, **kw) def wrapjoins(self, query, fields=[]): """ @@ -330,28 +540,6 @@ class Search(IndexStore): return BlockJoinQuery(query, self.parent_filter, BlockJoinQuery.ScoreMode.Total) - def simple_search(self, query, max_results=50): - """Returns (books, total_hits) - """ - - tops = self.searcher.search(self.query(query), max_results) - bks = [] - for found in tops.scoreDocs: - doc = self.searcher.doc(found.doc) - bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) - return (bks, tops.totalHits) - - def search(self, query, max_results=50): - query = self.query(query) - query = self.wrapjoins(query, ["content", "themes"]) - - tops = self.searcher.search(query, max_results) - bks = [] - for found in tops.scoreDocs: - doc = self.searcher.doc(found.doc) - bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) - return (bks, tops.totalHits) - def bsearch(self, query, max_results=50): q = self.query(query) bjq = BlockJoinQuery(q, self.parent_filter, BlockJoinQuery.ScoreMode.Avg) @@ -363,68 +551,142 @@ class Search(IndexStore): bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) return (bks, tops.totalHits) -# TokenStream tokenStream = analyzer.tokenStream(fieldName, reader); -# OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class); -# CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class); - -# while (tokenStream.incrementToken()) { -# int startOffset = offsetAttribute.startOffset(); -# int endOffset = offsetAttribute.endOffset(); -# String term = charTermAttribute.toString(); -# } - class SearchResult(object): - def __init__(self, searcher, scoreDocs, score=None): + def __init__(self, searcher, scoreDocs, score=None, how_found=None, snippets=None, searched=None, tokens_cache=None): + if tokens_cache is None: tokens_cache = {} + if score: - self.score = score + self._score = score else: - self.score = scoreDocs.score + self._score = scoreDocs.score + + self.boost = 1.0 - self.fragments = [] - self.scores = {} - self.sections = [] + self._hits = [] + self.hits = None # processed hits stored = searcher.doc(scoreDocs.doc) self.book_id = int(stored.get("book_id")) - fragment = stored.get("fragment_anchor") - if fragment: - self.fragments.append(fragment) - self.scores[fragment] = scoreDocs.score - header_type = stored.get("header_type") - if header_type: - sec = (header_type, int(stored.get("header_index"))) - self.sections.append(sec) - self.scores[sec] = scoreDocs.score + if not header_type: + return - def get_book(self): - return catalogue.models.Book.objects.get(id=self.book_id) + sec = (header_type, int(stored.get("header_index"))) + header_span = stored.get('header_span') + header_span = header_span is not None and int(header_span) or 1 - book = property(get_book) + fragment = stored.get("fragment_anchor") + + if snippets: + snippets = snippets.replace("/\n", "\n") + hit = (sec + (header_span,), fragment, scoreDocs.score, {'how_found': how_found, 'snippets': snippets and [snippets] or []}) - def get_parts(self): - book = self.book - parts = [{"header": s[0], "position": s[1], '_score_key': s} for s in self.sections] \ - + [{"fragment": book.fragments.get(anchor=f), '_score_key':f} for f in self.fragments] + self._hits.append(hit) - parts.sort(lambda a, b: cmp(self.scores[a['_score_key']], self.scores[b['_score_key']])) - print("bookid: %d parts: %s" % (self.book_id, parts)) - return parts + self.searcher = searcher + self.searched = searched + self.tokens_cache = tokens_cache - parts = property(get_parts) + @property + def score(self): + return self._score * self.boost def merge(self, other): if self.book_id != other.book_id: raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id)) - self.fragments += other.fragments - self.sections += other.sections - self.scores.update(other.scores) + self._hits += other._hits if other.score > self.score: self.score = other.score return self + def get_book(self): + return catalogue.models.Book.objects.get(id=self.book_id) + + book = property(get_book) + + def process_hits(self): + POSITION = 0 + FRAGMENT = 1 + POSITION_INDEX = 1 + POSITION_SPAN = 2 + SCORE = 2 + OTHER = 3 + + # to sections and fragments + frags = filter(lambda r: r[FRAGMENT] is not None, self._hits) + sect = filter(lambda r: r[FRAGMENT] is None, self._hits) + sect = filter(lambda s: 0 == len(filter( + lambda f: s[POSITION][POSITION_INDEX] >= f[POSITION][POSITION_INDEX] + and s[POSITION][POSITION_INDEX] < f[POSITION][POSITION_INDEX] + f[POSITION][POSITION_SPAN], + frags)), sect) + + hits = [] + + # remove duplicate fragments + fragments = {} + for f in frags: + fid = f[FRAGMENT] + if fid in fragments: + if fragments[fid][SCORE] >= f[SCORE]: + continue + fragments[fid] = f + frags = fragments.values() + + # remove duplicate sections + sections = {} + + for s in sect: + si = s[POSITION][POSITION_INDEX] + # skip existing + if si in sections: + if sections[si]['score'] >= s[SCORE]: + continue + + m = {'score': s[SCORE], + 'section_number': s[POSITION][POSITION_INDEX] + 1, + } + m.update(s[OTHER]) + sections[si] = m + + hits = sections.values() + + for f in frags: + try: + frag = catalogue.models.Fragment.objects.get(anchor=f[FRAGMENT]) + except catalogue.models.Fragment.DoesNotExist: + # stale index + continue + + # Figure out if we were searching for a token matching some word in theme name. + themes = frag.tags.filter(category='theme') + themes_hit = [] + if self.searched is not None: + tokens = self.searcher.get_tokens(self.searched, 'POLISH', tokens_cache=self.tokens_cache) + for theme in themes: + name_tokens = self.searcher.get_tokens(theme.name, 'POLISH') + for t in tokens: + if name_tokens.index(t): + if not theme in themes_hit: + themes_hit.append(theme) + break + + m = {'score': f[SCORE], + 'fragment': frag, + 'section_number': f[POSITION][POSITION_INDEX] + 1, + 'themes': themes, + 'themes_hit': themes_hit + } + m.update(f[OTHER]) + hits.append(m) + + hits.sort(lambda a, b: cmp(a['score'], b['score']), reverse=True) + + self.hits = hits + + return self + def __unicode__(self): return u'SearchResult(book_id=%d, score=%d)' % (self.book_id, self.score) @@ -444,98 +706,383 @@ class SearchResult(object): return cmp(self.score, other.score) -class MultiSearch(Search): - """Class capable of IMDb-like searching""" - def get_tokens(self, queryreader): - if isinstance(queryreader, str) or isinstance(queryreader, unicode): - queryreader = StringReader(queryreader) - queryreader.reset() - tokens = self.analyzer.reusableTokenStream('content', queryreader) +class Hint(object): + """ + Given some hint information (information we already know about) + our search target - like author, title (specific book), epoch, genre, kind + we can narrow down search using filters. + """ + def __init__(self, search): + """ + Accepts a Searcher instance. + """ + self.search = search + self.book_tags = {} + self.part_tags = [] + self._books = [] + + def books(self, *books): + """ + Give a hint that we search these books. + """ + self._books = books + + def tags(self, tags): + """ + Give a hint that these Tag objects (a list of) + is necessary. + """ + for t in tags: + if t.category in ['author', 'title', 'epoch', 'genre', 'kind']: + lst = self.book_tags.get(t.category, []) + lst.append(t) + self.book_tags[t.category] = lst + if t.category in ['theme', 'theme_pl']: + self.part_tags.append(t) + + def tag_filter(self, tags, field='tags'): + """ + Given a lsit of tags and an optional field (but they are normally in tags field) + returns a filter accepting only books with specific tags. + """ + q = BooleanQuery() + + for tag in tags: + toks = self.search.get_tokens(tag.name, field=field) + tag_phrase = PhraseQuery() + for tok in toks: + tag_phrase.add(Term(field, tok)) + q.add(BooleanClause(tag_phrase, BooleanClause.Occur.MUST)) + + return QueryWrapperFilter(q) + + def book_filter(self): + """ + Filters using book tags (all tag kinds except a theme) + """ + tags = reduce(lambda a, b: a + b, self.book_tags.values(), []) + if tags: + return self.tag_filter(tags) + else: + return None + + def part_filter(self): + """ + This filter can be used to look for book parts. + It filters on book id and/or themes. + """ + fs = [] + if self.part_tags: + fs.append(self.tag_filter(self.part_tags, field='themes')) + + if self._books != []: + bf = BooleanFilter() + for b in self._books: + id_filter = NumericRangeFilter.newIntRange('book_id', b.id, b.id, True, True) + bf.add(FilterClause(id_filter, BooleanClause.Occur.SHOULD)) + fs.append(bf) + + return Search.chain_filters(fs) + + def should_search_for_book(self): + return self._books == [] + + def just_search_in(self, all): + """Holds logic to figure out which indexes should be search, when we have some hinst already""" + some = [] + for field in all: + if field == 'authors' and 'author' in self.book_tags: + continue + if field == 'title' and self._books != []: + continue + if (field == 'themes' or field == 'themes_pl') and self.part_tags: + continue + some.append(field) + return some + + +class Search(IndexStore): + """ + Search facilities. + """ + def __init__(self, default_field="content"): + IndexStore.__init__(self) + self.analyzer = WLAnalyzer() # PolishAnalyzer(Version.LUCENE_34) + # self.analyzer = WLAnalyzer() + self.searcher = IndexSearcher(self.store, True) + self.parser = QueryParser(Version.LUCENE_34, default_field, + self.analyzer) + + self.parent_filter = TermsFilter() + self.parent_filter.addTerm(Term("is_book", "true")) + + def query(self, query): + """Parse query in default Lucene Syntax. (for humans) + """ + return self.parser.parse(query) + + def simple_search(self, query, max_results=50): + """Runs a query for books using lucene syntax. (for humans) + Returns (books, total_hits) + """ + + tops = self.searcher.search(self.query(query), max_results) + bks = [] + for found in tops.scoreDocs: + doc = self.searcher.doc(found.doc) + bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) + return (bks, tops.totalHits) + + def get_tokens(self, searched, field='content', cached=None): + """returns tokens analyzed by a proper (for a field) analyzer + argument can be: StringReader, string/unicode, or tokens. In the last case + they will just be returned (so we can reuse tokens, if we don't change the analyzer) + """ + if cached is not None and field in cached: + return cached[field] + + if isinstance(searched, str) or isinstance(searched, unicode): + searched = StringReader(searched) + elif isinstance(searched, list): + return searched + + searched.reset() + tokens = self.analyzer.reusableTokenStream(field, searched) toks = [] while tokens.incrementToken(): cta = tokens.getAttribute(CharTermAttribute.class_) toks.append(cta.toString()) + + if cached is not None: + cached[field] = toks + return toks - def make_phrase(self, tokens, field='content', slop=2): - phrase = PhraseQuery() - phrase.setSlop(slop) - for t in tokens: - term = Term(field, t) - phrase.add(term) + def fuzziness(self, fuzzy): + """Helper method to sanitize fuzziness""" + if not fuzzy: + return None + if isinstance(fuzzy, float) and fuzzy > 0.0 and fuzzy <= 1.0: + return fuzzy + else: + return 0.5 + + def make_phrase(self, tokens, field='content', slop=2, fuzzy=False): + """ + Return a PhraseQuery with a series of tokens. + """ + if fuzzy: + phrase = MultiPhraseQuery() + for t in tokens: + term = Term(field, t) + fuzzterm = FuzzyTermEnum(self.searcher.getIndexReader(), term, self.fuzziness(fuzzy)) + fuzzterms = [] + + while True: + # print("fuzz %s" % unicode(fuzzterm.term()).encode('utf-8')) + ft = fuzzterm.term() + if ft: + fuzzterms.append(ft) + if not fuzzterm.next(): break + if fuzzterms: + phrase.add(JArray('object')(fuzzterms, Term)) + else: + phrase.add(term) + else: + phrase = PhraseQuery() + phrase.setSlop(slop) + for t in tokens: + term = Term(field, t) + phrase.add(term) return phrase - def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD): + def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD, fuzzy=False): + """ + Returns term queries joined by boolean query. + modal - applies to boolean query + fuzzy - should the query by fuzzy. + """ q = BooleanQuery() for t in tokens: term = Term(field, t) - q.add(BooleanClause(TermQuery(term), modal)) + if fuzzy: + term = FuzzyQuery(term, self.fuzziness(fuzzy)) + else: + term = TermQuery(term) + q.add(BooleanClause(term, modal)) return q - def content_query(self, query): - return BlockJoinQuery(query, self.parent_filter, - BlockJoinQuery.ScoreMode.Total) + def search_phrase(self, searched, field, book=True, max_results=20, fuzzy=False, + filters=None, tokens_cache=None, boost=None): + if filters is None: filters = [] + if tokens_cache is None: tokens_cache = {} + + tokens = self.get_tokens(searched, field, cached=tokens_cache) + + query = self.make_phrase(tokens, field=field, fuzzy=fuzzy) + if book: + filters.append(self.term_filter(Term('is_book', 'true'))) + top = self.searcher.search(query, self.chain_filters(filters), max_results) + + return [SearchResult(self.searcher, found) for found in top.scoreDocs] + + def search_some(self, searched, fields, book=True, max_results=20, fuzzy=False, + filters=None, tokens_cache=None, boost=None): + if filters is None: filters = [] + if tokens_cache is None: tokens_cache = {} + + if book: + filters.append(self.term_filter(Term('is_book', 'true'))) + + query = BooleanQuery() + + for fld in fields: + tokens = self.get_tokens(searched, fld, cached=tokens_cache) + + query.add(BooleanClause(self.make_term_query(tokens, field=fld, + fuzzy=fuzzy), BooleanClause.Occur.SHOULD)) + + top = self.searcher.search(query, self.chain_filters(filters), max_results) - def search_perfect_book(self, tokens, max_results=20): - qrys = [self.make_phrase(tokens, field=fld) for fld in ['author', 'title']] + return [SearchResult(self.searcher, found, searched=searched, tokens_cache=tokens_cache) for found in top.scoreDocs] + + def search_perfect_book(self, searched, max_results=20, fuzzy=False, hint=None): + """ + Search for perfect book matches. Just see if the query matches with some author or title, + taking hints into account. + """ + fields_to_search = ['authors', 'title'] + only_in = None + if hint: + if not hint.should_search_for_book(): + return [] + fields_to_search = hint.just_search_in(fields_to_search) + only_in = hint.book_filter() + + qrys = [self.make_phrase(self.get_tokens(searched, field=fld), field=fld, fuzzy=fuzzy) for fld in fields_to_search] books = [] for q in qrys: - top = self.searcher.search(q, max_results) + top = self.searcher.search(q, + self.chain_filters([only_in, self.term_filter(Term('is_book', 'true'))]), + max_results) for found in top.scoreDocs: - books.append(SearchResult(self.searcher, found)) + books.append(SearchResult(self.searcher, found, how_found="search_perfect_book")) return books - def search_perfect_parts(self, tokens, max_results=20): - qrys = [self.make_phrase(tokens, field=fld) for fld in ['content']] + def search_book(self, searched, max_results=20, fuzzy=False, hint=None): + fields_to_search = ['tags', 'authors', 'title'] + + only_in = None + if hint: + if not hint.should_search_for_book(): + return [] + fields_to_search = hint.just_search_in(fields_to_search) + only_in = hint.book_filter() + + tokens = self.get_tokens(searched, field='SIMPLE') + + q = BooleanQuery() + + for fld in fields_to_search: + q.add(BooleanClause(self.make_term_query(tokens, field=fld, + fuzzy=fuzzy), BooleanClause.Occur.SHOULD)) + + books = [] + top = self.searcher.search(q, + self.chain_filters([only_in, self.term_filter(Term('is_book', 'true'))]), + max_results) + for found in top.scoreDocs: + books.append(SearchResult(self.searcher, found, how_found="search_book")) + + return books + + def search_perfect_parts(self, searched, max_results=20, fuzzy=False, hint=None): + """ + Search for book parts which contains a phrase perfectly matching (with a slop of 2, default for make_phrase()) + some part/fragment of the book. + """ + qrys = [self.make_phrase(self.get_tokens(searched), field=fld, fuzzy=fuzzy) for fld in ['content']] + + flt = None + if hint: + flt = hint.part_filter() books = [] for q in qrys: - top = self.searcher.search(q, max_results) + top = self.searcher.search(q, + self.chain_filters([self.term_filter(Term('is_book', 'true'), inverse=True), + flt]), + max_results) for found in top.scoreDocs: - books.append(SearchResult(self.searcher, found)) + books.append(SearchResult(self.searcher, found, snippets=self.get_snippets(found, q), how_found='search_perfect_parts')) return books - def search_everywhere(self, tokens, max_results=20): + def search_everywhere(self, searched, max_results=20, fuzzy=False, hint=None, tokens_cache=None): + """ + Tries to use search terms to match different fields of book (or its parts). + E.g. one word can be an author survey, another be a part of the title, and the rest + are some words from third chapter. + """ + if tokens_cache is None: tokens_cache = {} + books = [] + only_in = None + + if hint: + only_in = hint.part_filter() + + # content only query : themes x content q = BooleanQuery() - in_meta = BooleanQuery() - in_content = BooleanQuery() - for fld in ['themes', 'content']: - in_content.add(BooleanClause(self.make_term_query(tokens, field=fld), BooleanClause.Occur.SHOULD)) + tokens_pl = self.get_tokens(searched, field='content', cached=tokens_cache) + tokens = self.get_tokens(searched, field='SIMPLE', cached=tokens_cache) + + # only search in themes when we do not already filter by themes + if hint is None or hint.just_search_in(['themes']) != []: + q.add(BooleanClause(self.make_term_query(tokens_pl, field='themes_pl', + fuzzy=fuzzy), BooleanClause.Occur.MUST)) + + q.add(BooleanClause(self.make_term_query(tokens_pl, field='content', + fuzzy=fuzzy), BooleanClause.Occur.SHOULD)) - for fld in ['author', 'title', 'epochs', 'genres', 'kinds']: - in_meta.add(BooleanClause(self.make_term_query(tokens, field=fld), BooleanClause.Occur.SHOULD)) + topDocs = self.searcher.search(q, only_in, max_results) + for found in topDocs.scoreDocs: + books.append(SearchResult(self.searcher, found, how_found='search_everywhere_themesXcontent')) + print "* %s theme x content: %s" % (searched, books[-1]._hits) - q.add(BooleanClause(in_meta, BooleanClause.Occur.MUST)) - in_content_join = self.content_query(in_content) - q.add(BooleanClause(in_content_join, BooleanClause.Occur.MUST)) + # query themes/content x author/title/tags + q = BooleanQuery() + in_content = BooleanQuery() + in_meta = BooleanQuery() - collector = BlockJoinCollector(Sort.RELEVANCE, 100, True, True) + for fld in ['themes_pl', 'content']: + in_content.add(BooleanClause(self.make_term_query(tokens_pl, field=fld, fuzzy=False), BooleanClause.Occur.SHOULD)) - self.searcher.search(q, collector) + for fld in ['tags', 'authors', 'title']: + in_meta.add(BooleanClause(self.make_term_query(tokens, field=fld, fuzzy=False), BooleanClause.Occur.SHOULD)) - books = [] + q.add(BooleanClause(in_content, BooleanClause.Occur.MUST)) + q.add(BooleanClause(in_meta, BooleanClause.Occur.SHOULD)) + + topDocs = self.searcher.search(q, only_in, max_results) + for found in topDocs.scoreDocs: + books.append(SearchResult(self.searcher, found, how_found='search_everywhere')) + print "* %s scatter search: %s" % (searched, books[-1]._hits) - top_groups = collector.getTopGroups(in_content_join, Sort.RELEVANCE, 0, max_results, 0, True) - if top_groups: - for grp in top_groups.groups: - for part in grp.scoreDocs: - books.append(SearchResult(self.searcher, part, score=grp.maxScore)) return books - def multisearch(self, query, max_results=50): - """ - Search strategy: - - (phrase) OR -> content - -> title - -> author - - (keywords) -> author - -> motyw - -> tags - -> content - """ + # def multisearch(self, query, max_results=50): + # """ + # Search strategy: + # - (phrase) OR -> content + # -> title + # -> authors + # - (keywords) -> authors + # -> motyw + # -> tags + # -> content + # """ # queryreader = StringReader(query) # tokens = self.get_tokens(queryreader) @@ -565,17 +1112,147 @@ class MultiSearch(Search): # top_level.add(BooleanClause(phrase_level, Should)) # top_level.add(BooleanClause(kw_level, Should)) - return None + # return None + + def get_snippets(self, scoreDoc, query, field='content'): + """ + Returns a snippet for found scoreDoc. + """ + htmlFormatter = SimpleHTMLFormatter() + highlighter = Highlighter(htmlFormatter, QueryScorer(query)) + + stored = self.searcher.doc(scoreDoc.doc) + + # locate content. + snippets = Snippets(stored.get('book_id')).open() + try: + text = snippets.get((int(stored.get('snippets_position')), + int(stored.get('snippets_length')))) + finally: + snippets.close() + + tokenStream = TokenSources.getAnyTokenStream(self.searcher.getIndexReader(), scoreDoc.doc, field, self.analyzer) + # highlighter.getBestTextFragments(tokenStream, text, False, 10) + snip = highlighter.getBestFragments(tokenStream, text, 3, "...") + + return snip - - def do_search(self, query, max_results=50, collector=None): - tops = self.searcher.search(query, max_results) - #tops = self.searcher.search(p_content, max_results) + @staticmethod + def enum_to_array(enum): + """ + Converts a lucene TermEnum to array of Terms, suitable for + addition to queries + """ + terms = [] + + while True: + t = enum.term() + if t: + terms.append(t) + if not enum.next(): break + + if terms: + return JArray('object')(terms, Term) + + def search_tags(self, query, filter=None, max_results=40): + """ + Search for Tag objects using query. + """ + tops = self.searcher.search(query, filter, max_results) + + tags = [] + for found in tops.scoreDocs: + doc = self.searcher.doc(found.doc) + tag = catalogue.models.Tag.objects.get(id=doc.get("tag_id")) + tags.append(tag) + print "%s (%d) -> %f" % (tag, tag.id, found.score) + return tags + + def search_books(self, query, filter=None, max_results=10): + """ + Searches for Book objects using query + """ bks = [] + tops = self.searcher.search(query, filter, max_results) for found in tops.scoreDocs: doc = self.searcher.doc(found.doc) - b = catalogue.models.Book.objects.get(id=doc.get("book_id")) - bks.append(b) - print "%s (%d) -> %f" % (b, b.id, found.score) - return (bks, tops.totalHits) + bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) + return bks + + def create_prefix_phrase(self, toks, field): + q = MultiPhraseQuery() + for i in range(len(toks)): + t = Term(field, toks[i]) + if i == len(toks) - 1: + pterms = Search.enum_to_array(PrefixTermEnum(self.searcher.getIndexReader(), t)) + if pterms: + q.add(pterms) + else: + q.add(t) + else: + q.add(t) + return q + + @staticmethod + def term_filter(term, inverse=False): + only_term = TermsFilter() + only_term.addTerm(term) + + if inverse: + neg = BooleanFilter() + neg.add(FilterClause(only_term, BooleanClause.Occur.MUST_NOT)) + only_term = neg + + return only_term + + def hint_tags(self, string, max_results=50): + """ + Return auto-complete hints for tags + using prefix search. + """ + toks = self.get_tokens(string, field='SIMPLE') + top = BooleanQuery() + + for field in ['tag_name', 'tag_name_pl']: + q = self.create_prefix_phrase(toks, field) + top.add(BooleanClause(q, BooleanClause.Occur.SHOULD)) + + no_book_cat = self.term_filter(Term("tag_category", "book"), inverse=True) + + return self.search_tags(top, no_book_cat, max_results=max_results) + + def hint_books(self, string, max_results=50): + """ + Returns auto-complete hints for book titles + Because we do not index 'pseudo' title-tags. + Prefix search. + """ + toks = self.get_tokens(string, field='SIMPLE') + + q = self.create_prefix_phrase(toks, 'title') + + return self.search_books(q, self.term_filter(Term("is_book", "true")), max_results=max_results) + + @staticmethod + def chain_filters(filters, op=ChainedFilter.AND): + """ + Chains a filter list together + """ + filters = filter(lambda x: x is not None, filters) + if not filters or filters is []: + return None + chf = ChainedFilter(JArray('object')(filters, Filter), op) + return chf + + def filtered_categories(self, tags): + """ + Return a list of tag categories, present in tags list. + """ + cats = {} + for t in tags: + cats[t.category] = True + return cats.keys() + + def hint(self): + return Hint(self)