1 # -*- coding: utf-8 -*-
 
   3 from django.conf import settings
 
   4 from lucene import SimpleFSDirectory, IndexWriter, CheckIndex, \
 
   5     File, Field, Integer, \
 
   6     NumericField, Version, Document, JavaError, IndexSearcher, \
 
   7     QueryParser, PerFieldAnalyzerWrapper, \
 
   8     SimpleAnalyzer, PolishAnalyzer, ArrayList, \
 
   9     KeywordAnalyzer, NumericRangeQuery, NumericRangeFilter, BooleanQuery, \
 
  10     BlockJoinQuery, BlockJoinCollector, Filter, TermsFilter, ChainedFilter, \
 
  11     HashSet, BooleanClause, Term, CharTermAttribute, \
 
  12     PhraseQuery, MultiPhraseQuery, StringReader, TermQuery, \
 
  13     FuzzyQuery, FuzzyTermEnum, PrefixTermEnum, Sort, Integer, \
 
  14     SimpleHTMLFormatter, Highlighter, QueryScorer, TokenSources, TextFragment, \
 
  15     BooleanFilter, TermsFilter, FilterClause, QueryWrapperFilter, \
 
  16     initVM, CLASSPATH, JArray, JavaError
 
  20 JVM = initVM(CLASSPATH)
 
  26 from librarian import dcparser
 
  27 from librarian.parser import WLDocument
 
  28 import catalogue.models
 
  29 from multiprocessing.pool import ThreadPool
 
  30 from threading import current_thread
 
  35 class WLAnalyzer(PerFieldAnalyzerWrapper):
 
  37         polish = PolishAnalyzer(Version.LUCENE_34)
 
  38         #        polish_gap.setPositionIncrementGap(999)
 
  40         simple = SimpleAnalyzer(Version.LUCENE_34)
 
  41         #        simple_gap.setPositionIncrementGap(999)
 
  43         keyword = KeywordAnalyzer(Version.LUCENE_34)
 
  45         # not sure if needed: there's NOT_ANALYZED meaning basically the same
 
  47         PerFieldAnalyzerWrapper.__init__(self, polish)
 
  49         self.addAnalyzer("tags", simple)
 
  50         self.addAnalyzer("technical_editors", simple)
 
  51         self.addAnalyzer("editors", simple)
 
  52         self.addAnalyzer("url", keyword)
 
  53         self.addAnalyzer("source_url", keyword)
 
  54         self.addAnalyzer("source_name", simple)
 
  55         self.addAnalyzer("publisher", simple)
 
  56         self.addAnalyzer("author", simple)
 
  57         self.addAnalyzer("is_book", keyword)
 
  59         self.addAnalyzer("themes", simple)
 
  60         self.addAnalyzer("themes_pl", polish)
 
  62         self.addAnalyzer("tag_name", simple)
 
  63         self.addAnalyzer("tag_name_pl", polish)
 
  65         self.addAnalyzer("KEYWORD", keyword)
 
  66         self.addAnalyzer("SIMPLE", simple)
 
  67         self.addAnalyzer("POLISH", polish)
 
  70 class IndexStore(object):
 
  73         self.store = SimpleFSDirectory(File(settings.SEARCH_INDEX))
 
  75     def make_index_dir(self):
 
  77             os.makedirs(settings.SEARCH_INDEX)
 
  78         except OSError as exc:
 
  79             if exc.errno == errno.EEXIST:
 
  84 class IndexChecker(IndexStore):
 
  86         IndexStore.__init__(self)
 
  89         checker = CheckIndex(self.store)
 
  90         status = checker.checkIndex()
 
  94 class Snippets(object):
 
  95     SNIPPET_DIR = "snippets"
 
  97     def __init__(self, book_id):
 
  99             os.makedirs(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR))
 
 100         except OSError as exc:
 
 101             if exc.errno == errno.EEXIST:
 
 104         self.book_id = book_id
 
 107     def open(self, mode='r'):
 
 110         self.file = open(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR, str(self.book_id)), mode)
 
 114     def add(self, snippet):
 
 115         txt = snippet.encode('utf-8')
 
 118         pos = (self.position, l)
 
 120         print "Snip<%s>%s</s>" %(pos, txt)
 
 124         self.file.seek(pos[0], 0)
 
 125         txt = self.file.read(pos[1]).decode('utf-8')
 
 126         print "got from snippets %d bytes from %s:" % (len(txt), pos)
 
 133 class Index(IndexStore):
 
 134     def __init__(self, analyzer=None):
 
 135         IndexStore.__init__(self)
 
 138             analyzer = WLAnalyzer()
 
 139         self.analyzer = analyzer
 
 141     def open(self, analyzer=None):
 
 143             raise Exception("Index is already opened")
 
 144         self.index = IndexWriter(self.store, self.analyzer,\
 
 145                                  IndexWriter.MaxFieldLength.LIMITED)
 
 149         self.index.optimize()
 
 153             self.index.optimize()
 
 154         except JavaError, je:
 
 155             print "Error during optimize phase, check index: %s" % je
 
 160     def index_tags(self):
 
 161         q = NumericRangeQuery.newIntRange("tag_id", 0, Integer.MAX_VALUE, True, True)
 
 162         self.index.deleteDocuments(q)
 
 164         for tag in catalogue.models.Tag.objects.all():
 
 166             doc.add(NumericField("tag_id", Field.Store.YES, True).setIntValue(tag.id))
 
 167             doc.add(Field("tag_name", tag.name, Field.Store.NO, Field.Index.ANALYZED))
 
 168             doc.add(Field("tag_name_pl", tag.name, Field.Store.NO, Field.Index.ANALYZED))
 
 169             doc.add(Field("tag_category", tag.category, Field.Store.NO, Field.Index.NOT_ANALYZED))
 
 170             self.index.addDocument(doc)
 
 172     def remove_book(self, book):
 
 173         q = NumericRangeQuery.newIntRange("book_id", book.id, book.id, True, True)
 
 174         self.index.deleteDocuments(q)
 
 176     def index_book(self, book, overwrite=True):
 
 178             self.remove_book(book)
 
 180         book_doc = self.create_book_doc(book)
 
 181         meta_fields = self.extract_metadata(book)
 
 182         for f in meta_fields.values():
 
 183             if isinstance(f, list) or isinstance(f, tuple):
 
 189         self.index.addDocument(book_doc)
 
 192         self.index_content(book, book_fields=[meta_fields['title'], meta_fields['author']])
 
 197         'dramat_wierszowany_l',
 
 198         'dramat_wierszowany_lp',
 
 199         'dramat_wspolczesny', 'liryka_l', 'liryka_lp',
 
 203     skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne']
 
 205     def create_book_doc(self, book):
 
 207         Create a lucene document connected to the book
 
 210         doc.add(NumericField("book_id", Field.Store.YES, True).setIntValue(book.id))
 
 211         if book.parent is not None:
 
 212             doc.add(NumericField("parent_id", Field.Store.YES, True).setIntValue(book.parent.id))
 
 215     def extract_metadata(self, book):
 
 217         book_info = dcparser.parse(book.xml_file)
 
 219         print("extract metadata for book %s id=%d, thread%d" % (book.slug, book.id, current_thread().ident))
 
 221         fields['slug'] = Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS)
 
 222         fields['tags'] = self.add_gaps([Field("tags", t.name, Field.Store.NO, Field.Index.ANALYZED) for t in book.tags], 'tags')
 
 223         fields['is_book'] = Field("is_book", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED)
 
 226         for field in dcparser.BookInfo.FIELDS:
 
 227             if hasattr(book_info, field.name):
 
 228                 if not getattr(book_info, field.name):
 
 230                 # since no type information is available, we use validator
 
 231                 type_indicator = field.validator
 
 232                 if type_indicator == dcparser.as_unicode:
 
 233                     s = getattr(book_info, field.name)
 
 237                         fields[field.name] = Field(field.name, s, Field.Store.NO, Field.Index.ANALYZED)
 
 238                     except JavaError as je:
 
 239                         raise Exception("failed to add field: %s = '%s', %s(%s)" % (field.name, s, je.message, je.args))
 
 240                 elif type_indicator == dcparser.as_person:
 
 241                     p = getattr(book_info, field.name)
 
 242                     if isinstance(p, dcparser.Person):
 
 245                         persons = ', '.join(map(unicode, p))
 
 246                     fields[field.name] = Field(field.name, persons, Field.Store.NO, Field.Index.ANALYZED)
 
 247                 elif type_indicator == dcparser.as_date:
 
 248                     dt = getattr(book_info, field.name)
 
 249                     fields[field.name] = Field(field.name, "%04d%02d%02d" %\
 
 250                                                (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED)
 
 253     def get_master(self, root):
 
 254         for master in root.iter():
 
 255             if master.tag in self.master_tags:
 
 258     def add_gaps(self, fields, fieldname):
 
 261                 yield Field(fieldname, ' ', Field.Store.NO, Field.Index.NOT_ANALYZED)
 
 262         return reduce(lambda a, b: a + b, zip(fields, gap()))[0:-1]
 
 264     def index_content(self, book, book_fields=[]):
 
 265         wld = WLDocument.from_file(book.xml_file.path)
 
 266         root = wld.edoc.getroot()
 
 268         master = self.get_master(root)
 
 274             for child in list(node):
 
 275                 for b, e in walker(child):
 
 280         def fix_format(text):
 
 281             return re.sub("/$", "", text, flags=re.M)
 
 283         def add_part(snippets, **fields):
 
 284             doc = self.create_book_doc(book)
 
 285             for f in book_fields:
 
 288             doc.add(NumericField('header_index', Field.Store.YES, True).setIntValue(fields["header_index"]))
 
 289             doc.add(NumericField("header_span", Field.Store.YES, True)\
 
 290                     .setIntValue('header_span' in fields and fields['header_span'] or 1))
 
 291             doc.add(Field('header_type', fields["header_type"], Field.Store.YES, Field.Index.NOT_ANALYZED))
 
 293             doc.add(Field('content', fields["content"], Field.Store.NO, Field.Index.ANALYZED, \
 
 294                           Field.TermVector.WITH_POSITIONS_OFFSETS))
 
 296             snip_pos = snippets.add(fields["content"])
 
 297             doc.add(NumericField("snippets_position", Field.Store.YES, True).setIntValue(snip_pos[0]))
 
 298             doc.add(NumericField("snippets_length", Field.Store.YES, True).setIntValue(snip_pos[1]))
 
 300             if 'fragment_anchor' in fields:
 
 301                 doc.add(Field("fragment_anchor", fields['fragment_anchor'],
 
 302                               Field.Store.YES, Field.Index.NOT_ANALYZED))
 
 304             if 'themes' in fields:
 
 305                 themes, themes_pl = zip(*[
 
 306                     (Field("themes", theme, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS),
 
 307                      Field("themes_pl", theme, Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS))
 
 308                      for theme in fields['themes']])
 
 310                 themes = self.add_gaps(themes, 'themes')
 
 311                 themes_pl = self.add_gaps(themes_pl, 'themes_pl')
 
 321         snippets = Snippets(book.id).open('w')
 
 323             for header, position in zip(list(master), range(len(master))):
 
 325                 if header.tag in self.skip_header_tags:
 
 328                 content = u' '.join([t for t in header.itertext()])
 
 329                 content = fix_format(content)
 
 331                 doc = add_part(snippets, header_index=position, header_type=header.tag, content=content)
 
 333                 self.index.addDocument(doc)
 
 335                 for start, end in walker(header):
 
 336                     if start is not None and start.tag == 'begin':
 
 337                         fid = start.attrib['id'][1:]
 
 338                         fragments[fid] = {'content': [], 'themes': [], 'start_section': position, 'start_header': header.tag}
 
 339                         fragments[fid]['content'].append(start.tail)
 
 340                     elif start is not None and start.tag == 'motyw':
 
 341                         fid = start.attrib['id'][1:]
 
 342                         fragments[fid]['themes'].append(start.text)
 
 343                         fragments[fid]['content'].append(start.tail)
 
 344                     elif start is not None and start.tag == 'end':
 
 345                         fid = start.attrib['id'][1:]
 
 346                         if fid not in fragments:
 
 347                             continue  # a broken <end> node, skip it
 
 348                         frag = fragments[fid]
 
 352                             return u' '.join(map(
 
 353                                 lambda x: x == None and u'(none)' or unicode(x),
 
 356                         doc = add_part(snippets,
 
 357                                        header_type=frag['start_header'],
 
 358                                        header_index=frag['start_section'],
 
 359                                        header_span=position - frag['start_section'] + 1,
 
 361                                        content=u' '.join(filter(lambda s: s is not None, frag['content'])),
 
 362                                        themes=frag['themes'])
 
 364                         self.index.addDocument(doc)
 
 365                     elif start is not None:
 
 366                         for frag in fragments.values():
 
 367                             frag['content'].append(start.text)
 
 368                     elif end is not None:
 
 369                         for frag in fragments.values():
 
 370                             frag['content'].append(end.tail)
 
 379     def __exit__(self, type, value, tb):
 
 383 def log_exception_wrapper(f):
 
 388             print("Error in indexing thread: %s" % e)
 
 389             traceback.print_exc()
 
 394 class ReusableIndex(Index):
 
 396     Works like index, but does not close/optimize Lucene index
 
 397     until program exit (uses atexit hook).
 
 398     This is usefull for importbooks command.
 
 400     if you cannot rely on atexit, use ReusableIndex.close_reusable() yourself.
 
 406     def open(self, analyzer=None, threads=4):
 
 407         if ReusableIndex.index is not None:
 
 408             self.index = ReusableIndex.index
 
 410             print("opening index")
 
 411             ReusableIndex.pool = ThreadPool(threads, initializer=lambda: JVM.attachCurrentThread() )
 
 412             ReusableIndex.pool_jobs = []
 
 413             Index.open(self, analyzer)
 
 414             ReusableIndex.index = self.index
 
 415             atexit.register(ReusableIndex.close_reusable)
 
 417     def index_book(self, *args, **kw):
 
 418         job = ReusableIndex.pool.apply_async(log_exception_wrapper(Index.index_book), (self,) + args, kw)
 
 419         ReusableIndex.pool_jobs.append(job)
 
 422     def close_reusable():
 
 423         if ReusableIndex.index is not None:
 
 424             print("wait for indexing to finish")
 
 425             for job in ReusableIndex.pool_jobs:
 
 427                 sys.stdout.write('.')
 
 430             ReusableIndex.pool.close()
 
 432             ReusableIndex.index.optimize()
 
 433             ReusableIndex.index.close()
 
 434             ReusableIndex.index = None
 
 440 class Search(IndexStore):
 
 441     def __init__(self, default_field="content"):
 
 442         IndexStore.__init__(self)
 
 443         self.analyzer = WLAnalyzer() #PolishAnalyzer(Version.LUCENE_34)
 
 444         ## self.analyzer = WLAnalyzer()
 
 445         self.searcher = IndexSearcher(self.store, True)
 
 446         self.parser = QueryParser(Version.LUCENE_34, default_field,
 
 449         self.parent_filter = TermsFilter()
 
 450         self.parent_filter.addTerm(Term("is_book", "true"))
 
 452     def query(self, query):
 
 453         return self.parser.parse(query)
 
 455     def wrapjoins(self, query, fields=[]):
 
 457         This functions modifies the query in a recursive way,
 
 458         so Term and Phrase Queries contained, which match
 
 459         provided fields are wrapped in a BlockJoinQuery,
 
 460         and so delegated to children documents.
 
 462         if BooleanQuery.instance_(query):
 
 463             qs = BooleanQuery.cast_(query)
 
 465                 clause = BooleanClause.cast_(clause)
 
 466                 clause.setQuery(self.wrapjoins(clause.getQuery(), fields))
 
 470             query.extractTerms(termset)
 
 473                 if t.field() not in fields:
 
 475             return BlockJoinQuery(query, self.parent_filter,
 
 476                                   BlockJoinQuery.ScoreMode.Total)
 
 478     def simple_search(self, query, max_results=50):
 
 479         """Returns (books, total_hits)
 
 482         tops = self.searcher.search(self.query(query), max_results)
 
 484         for found in tops.scoreDocs:
 
 485             doc = self.searcher.doc(found.doc)
 
 486             bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
 
 487         return (bks, tops.totalHits)
 
 489     def search(self, query, max_results=50):
 
 490         query = self.query(query)
 
 491         query = self.wrapjoins(query, ["content", "themes"])
 
 493         tops = self.searcher.search(query, max_results)
 
 495         for found in tops.scoreDocs:
 
 496             doc = self.searcher.doc(found.doc)
 
 497             bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
 
 498         return (bks, tops.totalHits)
 
 500     def bsearch(self, query, max_results=50):
 
 501         q = self.query(query)
 
 502         bjq = BlockJoinQuery(q, self.parent_filter, BlockJoinQuery.ScoreMode.Avg)
 
 504         tops = self.searcher.search(bjq, max_results)
 
 506         for found in tops.scoreDocs:
 
 507             doc = self.searcher.doc(found.doc)
 
 508             bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
 
 509         return (bks, tops.totalHits)
 
 511 # TokenStream tokenStream = analyzer.tokenStream(fieldName, reader);
 
 512 # OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class);
 
 513 # CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
 
 515 # while (tokenStream.incrementToken()) {
 
 516 #     int startOffset = offsetAttribute.startOffset();
 
 517 #     int endOffset = offsetAttribute.endOffset();
 
 518 #     String term = charTermAttribute.toString();
 
 522 class SearchResult(object):
 
 523     def __init__(self, searcher, scoreDocs, score=None, how_found=None, snippets=None):
 
 529             self.score = scoreDocs.score
 
 533         stored = searcher.doc(scoreDocs.doc)
 
 534         self.book_id = int(stored.get("book_id"))
 
 536         header_type = stored.get("header_type")
 
 540         sec = (header_type, int(stored.get("header_index")))
 
 541         header_span = stored.get('header_span')
 
 542         header_span = header_span is not None and int(header_span) or 1
 
 544         fragment = stored.get("fragment_anchor")
 
 546         hit = (sec + (header_span,), fragment, scoreDocs.score, {'how_found': how_found, 'snippets': snippets})
 
 548         self.hits.append(hit)
 
 550     def merge(self, other):
 
 551         if self.book_id != other.book_id:
 
 552             raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id))
 
 553         self.hits += other.hits
 
 554         if other.score > self.score:
 
 555             self.score = other.score
 
 559         return catalogue.models.Book.objects.get(id=self.book_id)
 
 561     book = property(get_book)
 
 563     def process_hits(self):
 
 564         frags = filter(lambda r: r[1] is not None, self.hits)
 
 565         sect = filter(lambda r: r[1] is None, self.hits)
 
 566         sect = filter(lambda s: 0 == len(filter(
 
 567             lambda f: s[0][1] >= f[0][1] and s[0][1] < f[0][1] + f[0][2],
 
 574                  'header_index': s[0][1]
 
 580             frag = catalogue.models.Fragment.objects.get(anchor=f[1])
 
 583                  'themes': frag.tags.filter(category='theme')
 
 588         hits.sort(lambda a, b: cmp(a['score'], b['score']), reverse=True)
 
 590         print("--- %s" % hits)
 
 594     def __unicode__(self):
 
 595         return u'SearchResult(book_id=%d, score=%d)' % (self.book_id, self.score)
 
 598     def aggregate(*result_lists):
 
 600         for rl in result_lists:
 
 602                 if r.book_id in books:
 
 603                     books[r.book_id].merge(r)
 
 604                     #print(u"already have one with score %f, and this one has score %f" % (books[book.id][0], found.score))
 
 607         return books.values()
 
 609     def __cmp__(self, other):
 
 610         return cmp(self.score, other.score)
 
 614     def __init__(self, search):
 
 620     def book(self, book):
 
 623     def tags(self, tags):
 
 625             if t.category in ['author', 'title', 'epoch', 'genre', 'kind']:
 
 626                 lst = self.book_tags.get(t.category, [])
 
 628                 self.book_tags[t.category] = lst
 
 629             if t.category in ['theme']:
 
 630                 self.part_tags.append(t)
 
 632     def tag_filter(self, tags, field='tags'):
 
 636             toks = self.search.get_tokens(tag.name, field=field)
 
 637             tag_phrase = PhraseQuery()
 
 639                 tag_phrase.add(Term(field, tok))
 
 640             q.add(BooleanClause(tag_phrase, BooleanClause.Occur.MUST))
 
 642         return QueryWrapperFilter(q)
 
 644     def book_filter(self):
 
 645         tags = reduce(lambda a, b: a + b, self.book_tags.values(), [])
 
 647             return self.tag_filter(tags)
 
 651     def part_filter(self):
 
 654             fs.append(self.tag_filter(self.part_tags, field='themes'))
 
 655         if self._book is not None:
 
 656             fs.append(NumericRangeFilter.newIntRange('book_id', self._book.id, self._book.id, True, True))
 
 657         return MultiSearch.chain_filters(fs)
 
 659     def should_search_for_book(self):
 
 660         return self._book is None
 
 662     def just_search_in(self, all):
 
 663         """Holds logic to figure out which indexes should be search, when we have some hinst already"""
 
 666             if field == 'author' and 'author' in self.book_tags:
 
 668             if field == 'title' and self._book is not None:
 
 670             if (field == 'themes' or field == 'themes_pl') and self.part_tags:
 
 676 class MultiSearch(Search):
 
 677     """Class capable of IMDb-like searching"""
 
 678     def get_tokens(self, searched, field='content'):
 
 679         """returns tokens analyzed by a proper (for a field) analyzer
 
 680         argument can be: StringReader, string/unicode, or tokens. In the last case
 
 681         they will just be returned (so we can reuse tokens, if we don't change the analyzer)
 
 683         if isinstance(searched, str) or isinstance(searched, unicode):
 
 684             searched = StringReader(searched)
 
 685         elif isinstance(searched, list):
 
 689         tokens = self.analyzer.reusableTokenStream(field, searched)
 
 691         while tokens.incrementToken():
 
 692             cta = tokens.getAttribute(CharTermAttribute.class_)
 
 693             toks.append(cta.toString())
 
 696     def fuzziness(self, fuzzy):
 
 699         if isinstance(fuzzy, float) and fuzzy > 0.0 and fuzzy <= 1.0:
 
 704     def make_phrase(self, tokens, field='content', slop=2, fuzzy=False):
 
 706             phrase = MultiPhraseQuery()
 
 708                 term = Term(field, t)
 
 709                 fuzzterm = FuzzyTermEnum(self.searcher.getIndexReader(), term, self.fuzziness(fuzzy))
 
 713                     #                    print("fuzz %s" % unicode(fuzzterm.term()).encode('utf-8'))
 
 717                     if not fuzzterm.next(): break
 
 719                     phrase.add(JArray('object')(fuzzterms, Term))
 
 723             phrase = PhraseQuery()
 
 726                 term = Term(field, t)
 
 730     def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD, fuzzy=False):
 
 733             term = Term(field, t)
 
 735                 term = FuzzyQuery(term, self.fuzziness(fuzzy))
 
 737                 term = TermQuery(term)
 
 738             q.add(BooleanClause(term, modal))
 
 741     def content_query(self, query):
 
 742         return BlockJoinQuery(query, self.parent_filter,
 
 743                               BlockJoinQuery.ScoreMode.Total)
 
 745     def search_perfect_book(self, searched, max_results=20, fuzzy=False, hint=None):
 
 746         fields_to_search = ['author', 'title']
 
 749             if not hint.should_search_for_book():
 
 751             fields_to_search = hint.just_search_in(fields_to_search)
 
 752             only_in = hint.book_filter()
 
 754         qrys = [self.make_phrase(self.get_tokens(searched, field=fld), field=fld, fuzzy=fuzzy) for fld in fields_to_search]
 
 758             top = self.searcher.search(q,
 
 759                                        self.chain_filters([only_in, self.term_filter(Term('is_book', 'true'))]),
 
 761             for found in top.scoreDocs:
 
 762                 books.append(SearchResult(self.searcher, found))
 
 765     def search_perfect_parts(self, searched, max_results=20, fuzzy=False, hint=None):
 
 766         qrys = [self.make_phrase(self.get_tokens(searched), field=fld, fuzzy=fuzzy) for fld in ['content']]
 
 770             flt = hint.part_filter()
 
 774             top = self.searcher.search(q,
 
 775                                        self.chain_filters([self.term_filter(Term('is_book', 'true'), inverse=True),
 
 779             for found in top.scoreDocs:
 
 780                 books.append(SearchResult(self.searcher, found, snippets=self.get_snippets(found, q)))
 
 784     def search_everywhere(self, searched, max_results=20, fuzzy=False, hint=None):
 
 789             only_in = hint.part_filter()
 
 791         # content only query : themes x content
 
 794         tokens = self.get_tokens(searched)
 
 795         if hint is None or hint.just_search_in(['themes_pl']) != []:
 
 796             q.add(BooleanClause(self.make_term_query(tokens, field='themes_pl',
 
 797                                                      fuzzy=fuzzy), BooleanClause.Occur.MUST))
 
 799         q.add(BooleanClause(self.make_term_query(tokens, field='content',
 
 800                                                  fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
 
 802         topDocs = self.searcher.search(q, only_in, max_results)
 
 803         for found in topDocs.scoreDocs:
 
 804             books.append(SearchResult(self.searcher, found))
 
 806         # query themes/content x author/title/tags
 
 808         in_meta = BooleanQuery()
 
 809         in_content = BooleanQuery()
 
 811         for fld in ['themes', 'content', 'tags', 'author', 'title']:
 
 812             in_content.add(BooleanClause(self.make_term_query(tokens, field=fld, fuzzy=False), BooleanClause.Occur.SHOULD))
 
 814         topDocs = self.searcher.search(q, only_in, max_results)
 
 815         for found in topDocs.scoreDocs:
 
 816             books.append(SearchResult(self.searcher, found))
 
 821     def multisearch(self, query, max_results=50):
 
 824         - (phrase) OR -> content
 
 827         - (keywords)  -> author
 
 832         # queryreader = StringReader(query)
 
 833         # tokens = self.get_tokens(queryreader)
 
 835         # top_level = BooleanQuery()
 
 836         # Should = BooleanClause.Occur.SHOULD
 
 838         # phrase_level = BooleanQuery()
 
 839         # phrase_level.setBoost(1.3)
 
 841         # p_content = self.make_phrase(tokens, joined=True)
 
 842         # p_title = self.make_phrase(tokens, 'title')
 
 843         # p_author = self.make_phrase(tokens, 'author')
 
 845         # phrase_level.add(BooleanClause(p_content, Should))
 
 846         # phrase_level.add(BooleanClause(p_title, Should))
 
 847         # phrase_level.add(BooleanClause(p_author, Should))
 
 849         # kw_level = BooleanQuery()
 
 851         # kw_level.add(self.make_term_query(tokens, 'author'), Should)
 
 852         # j_themes = self.make_term_query(tokens, 'themes', joined=True)
 
 853         # kw_level.add(j_themes, Should)
 
 854         # kw_level.add(self.make_term_query(tokens, 'tags'), Should)
 
 855         # j_con = self.make_term_query(tokens, joined=True)
 
 856         # kw_level.add(j_con, Should)
 
 858         # top_level.add(BooleanClause(phrase_level, Should))
 
 859         # top_level.add(BooleanClause(kw_level, Should))
 
 863     def book_search(self, query, filter=None, max_results=50, collector=None):
 
 864         tops = self.searcher.search(query, filter, max_results)
 
 865         #tops = self.searcher.search(p_content, max_results)
 
 868         for found in tops.scoreDocs:
 
 869             doc = self.searcher.doc(found.doc)
 
 870             b = catalogue.models.Book.objects.get(id=doc.get("book_id"))
 
 872             print "%s (%d) -> %f" % (b, b.id, found.score)
 
 875     def get_snippets(self, scoreDoc, query, field='content'):
 
 876         htmlFormatter = SimpleHTMLFormatter()
 
 877         highlighter = Highlighter(htmlFormatter, QueryScorer(query))
 
 879         stored = self.searcher.doc(scoreDoc.doc)
 
 882         snippets = Snippets(stored.get('book_id')).open()
 
 884             text = snippets.get((int(stored.get('snippets_position')),
 
 885                                  int(stored.get('snippets_length'))))
 
 889         tokenStream = TokenSources.getAnyTokenStream(self.searcher.getIndexReader(), scoreDoc.doc, field, self.analyzer)
 
 890         #  highlighter.getBestTextFragments(tokenStream, text, False, 10)
 
 891         #        import pdb; pdb.set_trace()
 
 892         snip = highlighter.getBestFragments(tokenStream, text, 3, "...")
 
 893         print('snips: %s' % snip)
 
 898     def enum_to_array(enum):
 
 900         Converts a lucene TermEnum to array of Terms, suitable for
 
 909             if not enum.next(): break
 
 912             return JArray('object')(terms, Term)
 
 914     def search_tags(self, query, filter=None, max_results=40):
 
 915         tops = self.searcher.search(query, filter, max_results)
 
 918         for found in tops.scoreDocs:
 
 919             doc = self.searcher.doc(found.doc)
 
 920             tag = catalogue.models.Tag.objects.get(id=doc.get("tag_id"))
 
 922             print "%s (%d) -> %f" % (tag, tag.id, found.score)
 
 926     def create_prefix_phrase(self, toks, field):
 
 927         q = MultiPhraseQuery()
 
 928         for i in range(len(toks)):
 
 929             t = Term(field, toks[i])
 
 930             if i == len(toks) - 1:
 
 931                 pterms = MultiSearch.enum_to_array(PrefixTermEnum(self.searcher.getIndexReader(), t))
 
 941     def term_filter(term, inverse=False):
 
 942         only_term = TermsFilter()
 
 943         only_term.addTerm(term)
 
 946             neg = BooleanFilter()
 
 947             neg.add(FilterClause(only_term, BooleanClause.Occur.MUST_NOT))
 
 952     def hint_tags(self, string, max_results=50):
 
 953         toks = self.get_tokens(string, field='SIMPLE')
 
 956         for field in ['tag_name', 'tag_name_pl']:
 
 957             q = self.create_prefix_phrase(toks, field)
 
 958             top.add(BooleanClause(q, BooleanClause.Occur.SHOULD))
 
 960         no_book_cat = self.term_filter(Term("tag_category", "book"), inverse=True)
 
 962         return self.search_tags(top, no_book_cat, max_results=max_results)
 
 964     def hint_books(self, string, max_results=50):
 
 965         toks = self.get_tokens(string, field='SIMPLE')
 
 967         q = self.create_prefix_phrase(toks, 'title')
 
 969         return self.book_search(q, self.term_filter(Term("is_book", "true")), max_results=max_results)
 
 972     def chain_filters(filters, op=ChainedFilter.AND):
 
 973         filters = filter(lambda x: x is not None, filters)
 
 976         chf = ChainedFilter(JArray('object')(filters, Filter), op)
 
 979     def filtered_categories(self, tags):
 
 982             cats[t.category] = True