1 # -*- coding: utf-8 -*-
3 from django.conf import settings
4 from lucene import SimpleFSDirectory, IndexWriter, CheckIndex, \
5 File, Field, Integer, \
6 NumericField, Version, Document, JavaError, IndexSearcher, \
7 QueryParser, PerFieldAnalyzerWrapper, \
8 SimpleAnalyzer, PolishAnalyzer, ArrayList, \
9 KeywordAnalyzer, NumericRangeQuery, NumericRangeFilter, BooleanQuery, \
10 BlockJoinQuery, BlockJoinCollector, Filter, TermsFilter, ChainedFilter, \
11 HashSet, BooleanClause, Term, CharTermAttribute, \
12 PhraseQuery, MultiPhraseQuery, StringReader, TermQuery, \
13 FuzzyQuery, FuzzyTermEnum, PrefixTermEnum, Sort, Integer, \
14 SimpleHTMLFormatter, Highlighter, QueryScorer, TokenSources, TextFragment, \
15 BooleanFilter, TermsFilter, FilterClause, QueryWrapperFilter, \
16 initVM, CLASSPATH, JArray, JavaError
20 JVM = initVM(CLASSPATH)
26 from librarian import dcparser
27 from librarian.parser import WLDocument
28 import catalogue.models
29 from multiprocessing.pool import ThreadPool
30 from threading import current_thread
35 class WLAnalyzer(PerFieldAnalyzerWrapper):
37 polish = PolishAnalyzer(Version.LUCENE_34)
38 # polish_gap.setPositionIncrementGap(999)
40 simple = SimpleAnalyzer(Version.LUCENE_34)
41 # simple_gap.setPositionIncrementGap(999)
43 keyword = KeywordAnalyzer(Version.LUCENE_34)
45 # not sure if needed: there's NOT_ANALYZED meaning basically the same
47 PerFieldAnalyzerWrapper.__init__(self, polish)
49 self.addAnalyzer("tags", simple)
50 self.addAnalyzer("technical_editors", simple)
51 self.addAnalyzer("editors", simple)
52 self.addAnalyzer("url", keyword)
53 self.addAnalyzer("source_url", keyword)
54 self.addAnalyzer("source_name", simple)
55 self.addAnalyzer("publisher", simple)
56 self.addAnalyzer("authors", simple)
57 self.addAnalyzer("is_book", keyword)
58 # shouldn't the title have two forms? _pl and simple?
60 self.addAnalyzer("themes", simple)
61 self.addAnalyzer("themes_pl", polish)
63 self.addAnalyzer("tag_name", simple)
64 self.addAnalyzer("tag_name_pl", polish)
66 self.addAnalyzer("translators", simple)
68 self.addAnalyzer("KEYWORD", keyword)
69 self.addAnalyzer("SIMPLE", simple)
70 self.addAnalyzer("POLISH", polish)
73 class IndexStore(object):
76 self.store = SimpleFSDirectory(File(settings.SEARCH_INDEX))
78 def make_index_dir(self):
80 os.makedirs(settings.SEARCH_INDEX)
81 except OSError as exc:
82 if exc.errno == errno.EEXIST:
87 class IndexChecker(IndexStore):
89 IndexStore.__init__(self)
92 checker = CheckIndex(self.store)
93 status = checker.checkIndex()
97 class Snippets(object):
98 SNIPPET_DIR = "snippets"
100 def __init__(self, book_id):
102 os.makedirs(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR))
103 except OSError as exc:
104 if exc.errno == errno.EEXIST:
107 self.book_id = book_id
110 def open(self, mode='r'):
113 self.file = open(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR, str(self.book_id)), mode)
117 def add(self, snippet):
118 txt = snippet.encode('utf-8')
121 pos = (self.position, l)
126 self.file.seek(pos[0], 0)
127 txt = self.file.read(pos[1]).decode('utf-8')
134 class Index(IndexStore):
135 def __init__(self, analyzer=None):
136 IndexStore.__init__(self)
139 analyzer = WLAnalyzer()
140 self.analyzer = analyzer
142 def open(self, analyzer=None):
144 raise Exception("Index is already opened")
145 self.index = IndexWriter(self.store, self.analyzer,\
146 IndexWriter.MaxFieldLength.LIMITED)
150 self.index.optimize()
154 self.index.optimize()
155 except JavaError, je:
156 print "Error during optimize phase, check index: %s" % je
161 def index_tags(self):
162 q = NumericRangeQuery.newIntRange("tag_id", 0, Integer.MAX_VALUE, True, True)
163 self.index.deleteDocuments(q)
165 for tag in catalogue.models.Tag.objects.all():
167 doc.add(NumericField("tag_id", Field.Store.YES, True).setIntValue(tag.id))
168 doc.add(Field("tag_name", tag.name, Field.Store.NO, Field.Index.ANALYZED))
169 doc.add(Field("tag_name_pl", tag.name, Field.Store.NO, Field.Index.ANALYZED))
170 doc.add(Field("tag_category", tag.category, Field.Store.NO, Field.Index.NOT_ANALYZED))
171 self.index.addDocument(doc)
173 def remove_book(self, book):
174 q = NumericRangeQuery.newIntRange("book_id", book.id, book.id, True, True)
175 self.index.deleteDocuments(q)
177 def index_book(self, book, book_info=None, overwrite=True):
179 self.remove_book(book)
181 book_doc = self.create_book_doc(book)
182 meta_fields = self.extract_metadata(book, book_info)
183 for f in meta_fields.values():
184 if isinstance(f, list) or isinstance(f, tuple):
190 self.index.addDocument(book_doc)
193 self.index_content(book, book_fields=[meta_fields['title'], meta_fields['authors']])
198 'dramat_wierszowany_l',
199 'dramat_wierszowany_lp',
200 'dramat_wspolczesny', 'liryka_l', 'liryka_lp',
204 skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne']
206 def create_book_doc(self, book):
208 Create a lucene document connected to the book
211 doc.add(NumericField("book_id", Field.Store.YES, True).setIntValue(book.id))
212 if book.parent is not None:
213 doc.add(NumericField("parent_id", Field.Store.YES, True).setIntValue(book.parent.id))
216 def extract_metadata(self, book, book_info=None):
219 if book_info is None:
220 book_info = dcparser.parse(open(book.xml_file.path))
222 fields['slug'] = Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS)
223 fields['tags'] = self.add_gaps([Field("tags", t.name, Field.Store.NO, Field.Index.ANALYZED) for t in book.tags], 'tags')
224 fields['is_book'] = Field("is_book", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED)
227 for field in dcparser.BookInfo.FIELDS:
228 if hasattr(book_info, field.name):
229 if not getattr(book_info, field.name):
231 # since no type information is available, we use validator
232 type_indicator = field.validator
233 if type_indicator == dcparser.as_unicode:
234 s = getattr(book_info, field.name)
238 fields[field.name] = Field(field.name, s, Field.Store.NO, Field.Index.ANALYZED)
239 except JavaError as je:
240 raise Exception("failed to add field: %s = '%s', %s(%s)" % (field.name, s, je.message, je.args))
241 elif type_indicator == dcparser.as_person:
242 p = getattr(book_info, field.name)
243 if isinstance(p, dcparser.Person):
246 persons = ', '.join(map(unicode, p))
247 fields[field.name] = Field(field.name, persons, Field.Store.NO, Field.Index.ANALYZED)
248 elif type_indicator == dcparser.as_date:
249 dt = getattr(book_info, field.name)
250 fields[field.name] = Field(field.name, "%04d%02d%02d" %\
251 (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED)
255 def get_master(self, root):
256 for master in root.iter():
257 if master.tag in self.master_tags:
260 def add_gaps(self, fields, fieldname):
263 yield Field(fieldname, ' ', Field.Store.NO, Field.Index.NOT_ANALYZED)
264 return reduce(lambda a, b: a + b, zip(fields, gap()))[0:-1]
266 def index_content(self, book, book_fields=[]):
267 wld = WLDocument.from_file(book.xml_file.path, parse_dublincore=False)
268 root = wld.edoc.getroot()
270 master = self.get_master(root)
276 for child in list(node):
277 for b, e in walker(child):
282 def fix_format(text):
283 return re.sub("/$", "", text, flags=re.M)
285 def add_part(snippets, **fields):
286 doc = self.create_book_doc(book)
287 for f in book_fields:
290 doc.add(NumericField('header_index', Field.Store.YES, True).setIntValue(fields["header_index"]))
291 doc.add(NumericField("header_span", Field.Store.YES, True)\
292 .setIntValue('header_span' in fields and fields['header_span'] or 1))
293 doc.add(Field('header_type', fields["header_type"], Field.Store.YES, Field.Index.NOT_ANALYZED))
295 doc.add(Field('content', fields["content"], Field.Store.NO, Field.Index.ANALYZED, \
296 Field.TermVector.WITH_POSITIONS_OFFSETS))
298 snip_pos = snippets.add(fields["content"])
299 doc.add(NumericField("snippets_position", Field.Store.YES, True).setIntValue(snip_pos[0]))
300 doc.add(NumericField("snippets_length", Field.Store.YES, True).setIntValue(snip_pos[1]))
302 if 'fragment_anchor' in fields:
303 doc.add(Field("fragment_anchor", fields['fragment_anchor'],
304 Field.Store.YES, Field.Index.NOT_ANALYZED))
306 if 'themes' in fields:
307 themes, themes_pl = zip(*[
308 (Field("themes", theme, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS),
309 Field("themes_pl", theme, Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS))
310 for theme in fields['themes']])
312 themes = self.add_gaps(themes, 'themes')
313 themes_pl = self.add_gaps(themes_pl, 'themes_pl')
323 if isinstance(s, unicode):
324 return s.encode('utf-8')
330 snippets = Snippets(book.id).open('w')
332 for header, position in zip(list(master), range(len(master))):
334 if header.tag in self.skip_header_tags:
337 content = u' '.join([t for t in header.itertext()])
338 content = fix_format(content)
340 doc = add_part(snippets, header_index=position, header_type=header.tag, content=content)
342 self.index.addDocument(doc)
344 for start, end in walker(header):
345 if start is not None and start.tag == 'begin':
346 fid = start.attrib['id'][1:]
347 fragments[fid] = {'content': [], 'themes': [], 'start_section': position, 'start_header': header.tag}
348 fragments[fid]['content'].append(start.tail)
349 elif start is not None and start.tag == 'motyw':
350 fid = start.attrib['id'][1:]
351 if start.text is not None:
352 fragments[fid]['themes'] += map(str.strip, map(give_me_utf8, start.text.split(',')))
353 fragments[fid]['content'].append(start.tail)
354 elif start is not None and start.tag == 'end':
355 fid = start.attrib['id'][1:]
356 if fid not in fragments:
357 continue # a broken <end> node, skip it
358 frag = fragments[fid]
359 if frag['themes'] == []:
360 continue # empty themes list.
364 return u' '.join(map(
365 lambda x: x == None and u'(none)' or unicode(x),
368 doc = add_part(snippets,
369 header_type=frag['start_header'],
370 header_index=frag['start_section'],
371 header_span=position - frag['start_section'] + 1,
373 content=u' '.join(filter(lambda s: s is not None, frag['content'])),
374 themes=frag['themes'])
376 self.index.addDocument(doc)
377 elif start is not None:
378 for frag in fragments.values():
379 frag['content'].append(start.text)
380 elif end is not None:
381 for frag in fragments.values():
382 frag['content'].append(end.tail)
391 def __exit__(self, type, value, tb):
395 def log_exception_wrapper(f):
400 print("Error in indexing thread: %s" % e)
401 traceback.print_exc()
406 class ReusableIndex(Index):
408 Works like index, but does not close/optimize Lucene index
409 until program exit (uses atexit hook).
410 This is usefull for importbooks command.
412 if you cannot rely on atexit, use ReusableIndex.close_reusable() yourself.
418 def open(self, analyzer=None, threads=4):
419 if ReusableIndex.index is not None:
420 self.index = ReusableIndex.index
422 print("opening index")
423 ReusableIndex.pool = ThreadPool(threads, initializer=lambda: JVM.attachCurrentThread() )
424 ReusableIndex.pool_jobs = []
425 Index.open(self, analyzer)
426 ReusableIndex.index = self.index
427 atexit.register(ReusableIndex.close_reusable)
429 def index_book(self, *args, **kw):
430 job = ReusableIndex.pool.apply_async(log_exception_wrapper(Index.index_book), (self,) + args, kw)
431 ReusableIndex.pool_jobs.append(job)
434 def close_reusable():
435 if ReusableIndex.index is not None:
436 print("wait for indexing to finish")
437 for job in ReusableIndex.pool_jobs:
439 sys.stdout.write('.')
442 ReusableIndex.pool.close()
444 ReusableIndex.index.optimize()
445 ReusableIndex.index.close()
446 ReusableIndex.index = None
452 class Search(IndexStore):
453 def __init__(self, default_field="content"):
454 IndexStore.__init__(self)
455 self.analyzer = WLAnalyzer() #PolishAnalyzer(Version.LUCENE_34)
456 ## self.analyzer = WLAnalyzer()
457 self.searcher = IndexSearcher(self.store, True)
458 self.parser = QueryParser(Version.LUCENE_34, default_field,
461 self.parent_filter = TermsFilter()
462 self.parent_filter.addTerm(Term("is_book", "true"))
464 def query(self, query):
465 return self.parser.parse(query)
467 def wrapjoins(self, query, fields=[]):
469 This functions modifies the query in a recursive way,
470 so Term and Phrase Queries contained, which match
471 provided fields are wrapped in a BlockJoinQuery,
472 and so delegated to children documents.
474 if BooleanQuery.instance_(query):
475 qs = BooleanQuery.cast_(query)
477 clause = BooleanClause.cast_(clause)
478 clause.setQuery(self.wrapjoins(clause.getQuery(), fields))
482 query.extractTerms(termset)
485 if t.field() not in fields:
487 return BlockJoinQuery(query, self.parent_filter,
488 BlockJoinQuery.ScoreMode.Total)
490 def simple_search(self, query, max_results=50):
491 """Returns (books, total_hits)
494 tops = self.searcher.search(self.query(query), max_results)
496 for found in tops.scoreDocs:
497 doc = self.searcher.doc(found.doc)
498 bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
499 return (bks, tops.totalHits)
502 def search(self, query, max_results=50):
503 query = self.query(query)
504 query = self.wrapjoins(query, ["content", "themes"])
506 tops = self.searcher.search(query, max_results)
508 for found in tops.scoreDocs:
509 doc = self.searcher.doc(found.doc)
510 bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
511 return (bks, tops.totalHits)
513 def bsearch(self, query, max_results=50):
514 q = self.query(query)
515 bjq = BlockJoinQuery(q, self.parent_filter, BlockJoinQuery.ScoreMode.Avg)
517 tops = self.searcher.search(bjq, max_results)
519 for found in tops.scoreDocs:
520 doc = self.searcher.doc(found.doc)
521 bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
522 return (bks, tops.totalHits)
524 # TokenStream tokenStream = analyzer.tokenStream(fieldName, reader);
525 # OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class);
526 # CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
528 # while (tokenStream.incrementToken()) {
529 # int startOffset = offsetAttribute.startOffset();
530 # int endOffset = offsetAttribute.endOffset();
531 # String term = charTermAttribute.toString();
535 class SearchResult(object):
536 def __init__(self, searcher, scoreDocs, score=None, how_found=None, snippets=None):
542 self.score = scoreDocs.score
546 stored = searcher.doc(scoreDocs.doc)
547 self.book_id = int(stored.get("book_id"))
549 header_type = stored.get("header_type")
553 sec = (header_type, int(stored.get("header_index")))
554 header_span = stored.get('header_span')
555 header_span = header_span is not None and int(header_span) or 1
557 fragment = stored.get("fragment_anchor")
559 hit = (sec + (header_span,), fragment, scoreDocs.score, {'how_found': how_found, 'snippets': snippets})
561 self.hits.append(hit)
563 def merge(self, other):
564 if self.book_id != other.book_id:
565 raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id))
566 self.hits += other.hits
567 if other.score > self.score:
568 self.score = other.score
572 return catalogue.models.Book.objects.get(id=self.book_id)
574 book = property(get_book)
576 def process_hits(self):
577 frags = filter(lambda r: r[1] is not None, self.hits)
578 sect = filter(lambda r: r[1] is None, self.hits)
579 sect = filter(lambda s: 0 == len(filter(
580 lambda f: s[0][1] >= f[0][1] and s[0][1] < f[0][1] + f[0][2],
587 'header_index': s[0][1]
593 frag = catalogue.models.Fragment.objects.get(anchor=f[1])
596 'themes': frag.tags.filter(category='theme')
601 hits.sort(lambda a, b: cmp(a['score'], b['score']), reverse=True)
603 print("--- %s" % hits)
607 def __unicode__(self):
608 return u'SearchResult(book_id=%d, score=%d)' % (self.book_id, self.score)
611 def aggregate(*result_lists):
613 for rl in result_lists:
615 if r.book_id in books:
616 books[r.book_id].merge(r)
617 #print(u"already have one with score %f, and this one has score %f" % (books[book.id][0], found.score))
620 return books.values()
622 def __cmp__(self, other):
623 return cmp(self.score, other.score)
627 def __init__(self, search):
633 def books(self, *books):
636 def tags(self, tags):
638 if t.category in ['author', 'title', 'epoch', 'genre', 'kind']:
639 lst = self.book_tags.get(t.category, [])
641 self.book_tags[t.category] = lst
642 if t.category in ['theme']:
643 self.part_tags.append(t)
645 def tag_filter(self, tags, field='tags'):
649 toks = self.search.get_tokens(tag.name, field=field)
650 tag_phrase = PhraseQuery()
652 tag_phrase.add(Term(field, tok))
653 q.add(BooleanClause(tag_phrase, BooleanClause.Occur.MUST))
655 return QueryWrapperFilter(q)
657 def book_filter(self):
658 tags = reduce(lambda a, b: a + b, self.book_tags.values(), [])
660 return self.tag_filter(tags)
664 def part_filter(self):
667 fs.append(self.tag_filter(self.part_tags, field='themes'))
669 if self._books != []:
671 for b in self._books:
672 id_filter = NumericRangeFilter.newIntRange('book_id', b.id, b.id, True, True)
673 bf.add(FilterClause(id_filter, BooleanClause.Occur.SHOULD))
676 return MultiSearch.chain_filters(fs)
678 def should_search_for_book(self):
679 return self._books == []
681 def just_search_in(self, all):
682 """Holds logic to figure out which indexes should be search, when we have some hinst already"""
685 if field == 'authors' and 'author' in self.book_tags:
687 if field == 'title' and self._books != []:
689 if (field == 'themes' or field == 'themes_pl') and self.part_tags:
695 class MultiSearch(Search):
696 """Class capable of IMDb-like searching"""
697 def get_tokens(self, searched, field='content'):
698 """returns tokens analyzed by a proper (for a field) analyzer
699 argument can be: StringReader, string/unicode, or tokens. In the last case
700 they will just be returned (so we can reuse tokens, if we don't change the analyzer)
702 if isinstance(searched, str) or isinstance(searched, unicode):
703 searched = StringReader(searched)
704 elif isinstance(searched, list):
708 tokens = self.analyzer.reusableTokenStream(field, searched)
710 while tokens.incrementToken():
711 cta = tokens.getAttribute(CharTermAttribute.class_)
712 toks.append(cta.toString())
715 def fuzziness(self, fuzzy):
718 if isinstance(fuzzy, float) and fuzzy > 0.0 and fuzzy <= 1.0:
723 def make_phrase(self, tokens, field='content', slop=2, fuzzy=False):
725 phrase = MultiPhraseQuery()
727 term = Term(field, t)
728 fuzzterm = FuzzyTermEnum(self.searcher.getIndexReader(), term, self.fuzziness(fuzzy))
732 # print("fuzz %s" % unicode(fuzzterm.term()).encode('utf-8'))
736 if not fuzzterm.next(): break
738 phrase.add(JArray('object')(fuzzterms, Term))
742 phrase = PhraseQuery()
745 term = Term(field, t)
749 def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD, fuzzy=False):
752 term = Term(field, t)
754 term = FuzzyQuery(term, self.fuzziness(fuzzy))
756 term = TermQuery(term)
757 q.add(BooleanClause(term, modal))
760 # def content_query(self, query):
761 # return BlockJoinQuery(query, self.parent_filter,
762 # BlockJoinQuery.ScoreMode.Total)
764 def search_perfect_book(self, searched, max_results=20, fuzzy=False, hint=None):
765 fields_to_search = ['authors', 'title']
768 if not hint.should_search_for_book():
770 fields_to_search = hint.just_search_in(fields_to_search)
771 only_in = hint.book_filter()
773 qrys = [self.make_phrase(self.get_tokens(searched, field=fld), field=fld, fuzzy=fuzzy) for fld in fields_to_search]
777 top = self.searcher.search(q,
778 self.chain_filters([only_in, self.term_filter(Term('is_book', 'true'))]),
780 for found in top.scoreDocs:
781 books.append(SearchResult(self.searcher, found))
784 def search_perfect_parts(self, searched, max_results=20, fuzzy=False, hint=None):
785 qrys = [self.make_phrase(self.get_tokens(searched), field=fld, fuzzy=fuzzy) for fld in ['content']]
789 flt = hint.part_filter()
793 top = self.searcher.search(q,
794 self.chain_filters([self.term_filter(Term('is_book', 'true'), inverse=True),
798 for found in top.scoreDocs:
799 books.append(SearchResult(self.searcher, found, snippets=self.get_snippets(found, q)))
803 def search_everywhere(self, searched, max_results=20, fuzzy=False, hint=None):
808 only_in = hint.part_filter()
810 # content only query : themes x content
813 tokens = self.get_tokens(searched)
814 if hint is None or hint.just_search_in(['themes_pl']) != []:
815 q.add(BooleanClause(self.make_term_query(tokens, field='themes_pl',
816 fuzzy=fuzzy), BooleanClause.Occur.MUST))
818 q.add(BooleanClause(self.make_term_query(tokens, field='content',
819 fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
821 topDocs = self.searcher.search(q, only_in, max_results)
822 for found in topDocs.scoreDocs:
823 books.append(SearchResult(self.searcher, found))
825 # query themes/content x author/title/tags
827 # in_meta = BooleanQuery()
828 in_content = BooleanQuery()
830 for fld in ['themes', 'content', 'tags', 'authors', 'title']:
831 in_content.add(BooleanClause(self.make_term_query(tokens, field=fld, fuzzy=False), BooleanClause.Occur.SHOULD))
833 topDocs = self.searcher.search(q, only_in, max_results)
834 for found in topDocs.scoreDocs:
835 books.append(SearchResult(self.searcher, found))
839 def multisearch(self, query, max_results=50):
842 - (phrase) OR -> content
845 - (keywords) -> authors
850 # queryreader = StringReader(query)
851 # tokens = self.get_tokens(queryreader)
853 # top_level = BooleanQuery()
854 # Should = BooleanClause.Occur.SHOULD
856 # phrase_level = BooleanQuery()
857 # phrase_level.setBoost(1.3)
859 # p_content = self.make_phrase(tokens, joined=True)
860 # p_title = self.make_phrase(tokens, 'title')
861 # p_author = self.make_phrase(tokens, 'author')
863 # phrase_level.add(BooleanClause(p_content, Should))
864 # phrase_level.add(BooleanClause(p_title, Should))
865 # phrase_level.add(BooleanClause(p_author, Should))
867 # kw_level = BooleanQuery()
869 # kw_level.add(self.make_term_query(tokens, 'author'), Should)
870 # j_themes = self.make_term_query(tokens, 'themes', joined=True)
871 # kw_level.add(j_themes, Should)
872 # kw_level.add(self.make_term_query(tokens, 'tags'), Should)
873 # j_con = self.make_term_query(tokens, joined=True)
874 # kw_level.add(j_con, Should)
876 # top_level.add(BooleanClause(phrase_level, Should))
877 # top_level.add(BooleanClause(kw_level, Should))
881 def book_search(self, query, filter=None, max_results=50, collector=None):
882 tops = self.searcher.search(query, filter, max_results)
883 #tops = self.searcher.search(p_content, max_results)
886 for found in tops.scoreDocs:
887 doc = self.searcher.doc(found.doc)
888 b = catalogue.models.Book.objects.get(id=doc.get("book_id"))
890 print "%s (%d) -> %f" % (b, b.id, found.score)
893 def get_snippets(self, scoreDoc, query, field='content'):
894 htmlFormatter = SimpleHTMLFormatter()
895 highlighter = Highlighter(htmlFormatter, QueryScorer(query))
897 stored = self.searcher.doc(scoreDoc.doc)
900 snippets = Snippets(stored.get('book_id')).open()
902 text = snippets.get((int(stored.get('snippets_position')),
903 int(stored.get('snippets_length'))))
907 tokenStream = TokenSources.getAnyTokenStream(self.searcher.getIndexReader(), scoreDoc.doc, field, self.analyzer)
908 # highlighter.getBestTextFragments(tokenStream, text, False, 10)
909 # import pdb; pdb.set_trace()
910 snip = highlighter.getBestFragments(tokenStream, text, 3, "...")
915 def enum_to_array(enum):
917 Converts a lucene TermEnum to array of Terms, suitable for
926 if not enum.next(): break
929 return JArray('object')(terms, Term)
931 def search_tags(self, query, filter=None, max_results=40):
932 tops = self.searcher.search(query, filter, max_results)
935 for found in tops.scoreDocs:
936 doc = self.searcher.doc(found.doc)
937 tag = catalogue.models.Tag.objects.get(id=doc.get("tag_id"))
939 print "%s (%d) -> %f" % (tag, tag.id, found.score)
943 def search_books(self, query, filter=None, max_results=10):
945 tops = self.searcher.search(query, filter, max_results)
946 for found in tops.scoreDocs:
947 doc = self.searcher.doc(found.doc)
948 bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
951 def create_prefix_phrase(self, toks, field):
952 q = MultiPhraseQuery()
953 for i in range(len(toks)):
954 t = Term(field, toks[i])
955 if i == len(toks) - 1:
956 pterms = MultiSearch.enum_to_array(PrefixTermEnum(self.searcher.getIndexReader(), t))
966 def term_filter(term, inverse=False):
967 only_term = TermsFilter()
968 only_term.addTerm(term)
971 neg = BooleanFilter()
972 neg.add(FilterClause(only_term, BooleanClause.Occur.MUST_NOT))
977 def hint_tags(self, string, max_results=50):
978 toks = self.get_tokens(string, field='SIMPLE')
981 for field in ['tag_name', 'tag_name_pl']:
982 q = self.create_prefix_phrase(toks, field)
983 top.add(BooleanClause(q, BooleanClause.Occur.SHOULD))
985 no_book_cat = self.term_filter(Term("tag_category", "book"), inverse=True)
987 return self.search_tags(top, no_book_cat, max_results=max_results)
989 def hint_books(self, string, max_results=50):
990 toks = self.get_tokens(string, field='SIMPLE')
992 q = self.create_prefix_phrase(toks, 'title')
994 return self.book_search(q, self.term_filter(Term("is_book", "true")), max_results=max_results)
997 def chain_filters(filters, op=ChainedFilter.AND):
998 filters = filter(lambda x: x is not None, filters)
1001 chf = ChainedFilter(JArray('object')(filters, Filter), op)
1004 def filtered_categories(self, tags):
1007 cats[t.category] = True