1 # -*- coding: utf-8 -*-
3 from django.conf import settings
4 from lucene import SimpleFSDirectory, IndexWriter, CheckIndex, \
5 File, Field, Integer, \
6 NumericField, Version, Document, JavaError, IndexSearcher, \
7 QueryParser, PerFieldAnalyzerWrapper, \
8 SimpleAnalyzer, PolishAnalyzer, ArrayList, \
9 KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \
10 BlockJoinQuery, BlockJoinCollector, TermsFilter, \
11 HashSet, BooleanClause, Term, CharTermAttribute, \
12 PhraseQuery, MultiPhraseQuery, StringReader, TermQuery, BlockJoinQuery, \
13 FuzzyQuery, FuzzyTermEnum, PrefixTermEnum, Sort, Integer, \
14 SimpleHTMLFormatter, Highlighter, QueryScorer, TokenSources, TextFragment, \
15 BooleanFilter, TermsFilter, FilterClause, QueryWrapperFilter, \
16 initVM, CLASSPATH, JArray, JavaError
20 JVM = initVM(CLASSPATH)
26 from librarian import dcparser
27 from librarian.parser import WLDocument
28 from catalogue import models
29 from multiprocessing.pool import ThreadPool
30 from threading import current_thread
35 class WLAnalyzer(PerFieldAnalyzerWrapper):
37 polish = PolishAnalyzer(Version.LUCENE_34)
38 polish_gap = PolishAnalyzer(Version.LUCENE_34)
39 # polish_gap.setPositionIncrementGap(999)
41 simple = SimpleAnalyzer(Version.LUCENE_34)
42 simple_gap = SimpleAnalyzer(Version.LUCENE_34)
43 # simple_gap.setPositionIncrementGap(999)
45 keyword = KeywordAnalyzer(Version.LUCENE_34)
47 # not sure if needed: there's NOT_ANALYZED meaning basically the same
49 PerFieldAnalyzerWrapper.__init__(self, polish)
51 self.addAnalyzer("tags", simple_gap)
52 self.addAnalyzer("technical_editors", simple)
53 self.addAnalyzer("editors", simple)
54 self.addAnalyzer("url", keyword)
55 self.addAnalyzer("source_url", keyword)
56 self.addAnalyzer("source_name", simple)
57 self.addAnalyzer("publisher", simple)
58 self.addAnalyzer("author", simple)
59 self.addAnalyzer("is_book", keyword)
61 self.addAnalyzer("themes", simple_gap)
62 self.addAnalyzer("themes_pl", polish_gap)
64 self.addAnalyzer("tag_name", simple_gap)
65 self.addAnalyzer("tag_name_pl", polish_gap)
67 self.addAnalyzer("KEYWORD", keyword)
68 self.addAnalyzer("SIMPLE", simple)
69 self.addAnalyzer("POLISH", polish)
72 class IndexStore(object):
75 self.store = SimpleFSDirectory(File(settings.SEARCH_INDEX))
77 def make_index_dir(self):
79 os.makedirs(settings.SEARCH_INDEX)
80 except OSError as exc:
81 if exc.errno == errno.EEXIST:
86 class IndexChecker(IndexStore):
88 IndexStore.__init__(self)
91 checker = CheckIndex(self.store)
92 status = checker.checkIndex()
96 class Snippets(object):
97 SNIPPET_DIR = "snippets"
99 def __init__(self, book_id):
101 os.makedirs(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR))
102 except OSError as exc:
103 if exc.errno == errno.EEXIST:
106 self.book_id = book_id
109 def open(self, mode='r'):
112 self.file = open(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR, str(self.book_id)), mode)
116 def add(self, snippet):
118 self.file.write(snippet.encode('utf-8'))
119 pos = (self.position, l)
124 self.file.seek(pos[0], 0)
125 return self.read(pos[1]).decode('utf-8')
131 class Index(IndexStore):
132 def __init__(self, analyzer=None):
133 IndexStore.__init__(self)
136 analyzer = WLAnalyzer()
137 self.analyzer = analyzer
139 def open(self, analyzer=None):
141 raise Exception("Index is already opened")
142 self.index = IndexWriter(self.store, self.analyzer,\
143 IndexWriter.MaxFieldLength.LIMITED)
147 self.index.optimize()
151 self.index.optimize()
152 except JavaError, je:
153 print "Error during optimize phase, check index: %s" % je
158 def index_tags(self):
159 q = NumericRangeQuery.newIntRange("tag_id", 0, Integer.MAX_VALUE, True, True)
160 self.index.deleteDocuments(q)
162 for tag in models.Tag.objects.all():
164 doc.add(NumericField("tag_id", Field.Store.YES, True).setIntValue(tag.id))
165 doc.add(Field("tag_name", tag.name, Field.Store.NO, Field.Index.ANALYZED))
166 doc.add(Field("tag_name_pl", tag.name, Field.Store.NO, Field.Index.ANALYZED))
167 doc.add(Field("tag_category", tag.category, Field.Store.NO, Field.Index.NOT_ANALYZED))
168 self.index.addDocument(doc)
170 def remove_book(self, book):
171 q = NumericRangeQuery.newIntRange("book_id", book.id, book.id, True, True)
172 self.index.deleteDocuments(q)
174 def index_book(self, book, overwrite=True):
176 self.remove_book(book)
178 book_doc = self.create_book_doc(book)
179 meta_fields = self.extract_metadata(book)
180 for f in meta_fields.values():
181 if isinstance(f, list):
187 self.index.addDocument(book_doc)
190 self.index_content(book, book_fields=[meta_fields['title'], meta_fields['author']])
195 'dramat_wierszowany_l',
196 'dramat_wierszowany_lp',
197 'dramat_wspolczesny', 'liryka_l', 'liryka_lp',
201 skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne']
203 def create_book_doc(self, book):
205 Create a lucene document connected to the book
208 doc.add(NumericField("book_id", Field.Store.YES, True).setIntValue(book.id))
209 if book.parent is not None:
210 doc.add(NumericField("parent_id", Field.Store.YES, True).setIntValue(book.parent.id))
213 def extract_metadata(self, book):
215 book_info = dcparser.parse(book.xml_file)
217 print("extract metadata for book %s id=%d, thread%d" % (book.slug, book.id, current_thread().ident))
219 fields['slug'] = Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS)
220 fields['tags'] = [Field("tags", t.name, Field.Store.NO, Field.Index.ANALYZED) for t in book.tags]
221 fields['is_book'] = Field("is_book", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED)
224 for field in dcparser.BookInfo.FIELDS:
225 if hasattr(book_info, field.name):
226 if not getattr(book_info, field.name):
228 # since no type information is available, we use validator
229 type_indicator = field.validator
230 if type_indicator == dcparser.as_unicode:
231 s = getattr(book_info, field.name)
235 fields[field.name] = Field(field.name, s, Field.Store.NO, Field.Index.ANALYZED)
236 except JavaError as je:
237 raise Exception("failed to add field: %s = '%s', %s(%s)" % (field.name, s, je.message, je.args))
238 elif type_indicator == dcparser.as_person:
239 p = getattr(book_info, field.name)
240 if isinstance(p, dcparser.Person):
243 persons = ', '.join(map(unicode, p))
244 fields[field.name] = Field(field.name, persons, Field.Store.NO, Field.Index.ANALYZED)
245 elif type_indicator == dcparser.as_date:
246 dt = getattr(book_info, field.name)
247 fields[field.name] = Field(field.name, "%04d%02d%02d" %\
248 (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED)
251 def get_master(self, root):
252 for master in root.iter():
253 if master.tag in self.master_tags:
256 def index_content(self, book, book_fields=[]):
257 wld = WLDocument.from_file(book.xml_file.path)
258 root = wld.edoc.getroot()
260 master = self.get_master(root)
266 for child in list(node):
267 for b, e in walker(child):
272 def fix_format(text):
273 return re.sub("/$", "", text, flags=re.M)
275 def add_part(snippets, **fields):
276 doc = self.create_book_doc(book)
277 for f in book_fields:
280 doc.add(NumericField('header_index', Field.Store.YES, True).setIntValue(fields["header_index"]))
281 doc.add(NumericField("header_span", Field.Store.YES, True)\
282 .setIntValue('header_span' in fields and fields['header_span'] or 1))
283 doc.add(Field('header_type', fields["header_type"], Field.Store.YES, Field.Index.NOT_ANALYZED))
285 doc.add(Field('content', fields["content"], Field.Store.NO, Field.Index.ANALYZED, \
286 Field.TermVector.WITH_POSITIONS_OFFSETS))
288 snip_pos = snippets.add(content)
289 doc.add(NumericField("snippets_position", Field.Store.YES, True).setIntValue(snip_pos[0]))
290 doc.add(NumericField("snippets_length", Field.Store.YES, True).setIntValue(snip_pos[1]))
292 if 'fragment_anchor' in fields:
293 doc.add(Field("fragment_anchor", fields['fragment_anchor'],
294 Field.Store.YES, Field.Index.NOT_ANALYZED))
296 if 'themes' in fields:
297 for theme in fields['themes']:
298 doc.add(Field("themes", theme, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS))
299 doc.add(Field("themes_pl", theme, Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS))
304 snippets = Snippets(book.id).open('w')
306 for header, position in zip(list(master), range(len(master))):
308 if header.tag in self.skip_header_tags:
311 content = u' '.join([t for t in header.itertext()])
312 content = fix_format(content)
314 doc = add_part(snippets, header_index=position, header_type=header.tag, content=content)
316 self.index.addDocument(doc)
318 for start, end in walker(master):
319 if start is not None and start.tag == 'begin':
320 fid = start.attrib['id'][1:]
321 fragments[fid] = {'content': [], 'themes': [], 'start_section': position, 'start_header': header.tag}
322 fragments[fid]['content'].append(start.tail)
323 elif start is not None and start.tag == 'motyw':
324 fid = start.attrib['id'][1:]
325 fragments[fid]['themes'].append(start.text)
326 fragments[fid]['content'].append(start.tail)
327 elif start is not None and start.tag == 'end':
328 fid = start.attrib['id'][1:]
329 if fid not in fragments:
330 continue # a broken <end> node, skip it
331 frag = fragments[fid]
335 return u' '.join(map(
336 lambda x: x == None and u'(none)' or unicode(x),
339 doc = add_part(snippets,
340 header_type=frag['start_header'],
341 header_index=frag['start_section'],
342 header_span=position - frag['start_section'] + 1,
344 content=u' '.join(filter(lambda s: s is not None, frag['content'])),
345 themes=frag['themes'])
347 self.index.addDocument(doc)
348 elif start is not None:
349 for frag in fragments.values():
350 frag['content'].append(start.text)
351 elif end is not None:
352 for frag in fragments.values():
353 frag['content'].append(end.tail)
362 def __exit__(self, type, value, tb):
366 def log_exception_wrapper(f):
371 print("Error in indexing thread: %s" % e)
372 traceback.print_exc()
377 class ReusableIndex(Index):
379 Works like index, but does not close/optimize Lucene index
380 until program exit (uses atexit hook).
381 This is usefull for importbooks command.
383 if you cannot rely on atexit, use ReusableIndex.close_reusable() yourself.
389 def open(self, analyzer=None, threads=4):
390 if ReusableIndex.index is not None:
391 self.index = ReusableIndex.index
393 print("opening index")
394 ReusableIndex.pool = ThreadPool(threads, initializer=lambda: JVM.attachCurrentThread() )
395 ReusableIndex.pool_jobs = []
396 Index.open(self, analyzer)
397 ReusableIndex.index = self.index
398 atexit.register(ReusableIndex.close_reusable)
400 def index_book(self, *args, **kw):
401 job = ReusableIndex.pool.apply_async(log_exception_wrapper(Index.index_book), (self,) + args, kw)
402 ReusableIndex.pool_jobs.append(job)
405 def close_reusable():
406 if ReusableIndex.index is not None:
407 print("wait for indexing to finish")
408 for job in ReusableIndex.pool_jobs:
410 sys.stdout.write('.')
413 ReusableIndex.pool.close()
415 ReusableIndex.index.optimize()
416 ReusableIndex.index.close()
417 ReusableIndex.index = None
423 class Search(IndexStore):
424 def __init__(self, default_field="content"):
425 IndexStore.__init__(self)
426 self.analyzer = WLAnalyzer() #PolishAnalyzer(Version.LUCENE_34)
427 ## self.analyzer = WLAnalyzer()
428 self.searcher = IndexSearcher(self.store, True)
429 self.parser = QueryParser(Version.LUCENE_34, default_field,
432 self.parent_filter = TermsFilter()
433 self.parent_filter.addTerm(Term("is_book", "true"))
435 def query(self, query):
436 return self.parser.parse(query)
438 def wrapjoins(self, query, fields=[]):
440 This functions modifies the query in a recursive way,
441 so Term and Phrase Queries contained, which match
442 provided fields are wrapped in a BlockJoinQuery,
443 and so delegated to children documents.
445 if BooleanQuery.instance_(query):
446 qs = BooleanQuery.cast_(query)
448 clause = BooleanClause.cast_(clause)
449 clause.setQuery(self.wrapjoins(clause.getQuery(), fields))
453 query.extractTerms(termset)
456 if t.field() not in fields:
458 return BlockJoinQuery(query, self.parent_filter,
459 BlockJoinQuery.ScoreMode.Total)
461 def simple_search(self, query, max_results=50):
462 """Returns (books, total_hits)
465 tops = self.searcher.search(self.query(query), max_results)
467 for found in tops.scoreDocs:
468 doc = self.searcher.doc(found.doc)
469 bks.append(models.Book.objects.get(id=doc.get("book_id")))
470 return (bks, tops.totalHits)
472 def search(self, query, max_results=50):
473 query = self.query(query)
474 query = self.wrapjoins(query, ["content", "themes"])
476 tops = self.searcher.search(query, max_results)
478 for found in tops.scoreDocs:
479 doc = self.searcher.doc(found.doc)
480 bks.append(models.Book.objects.get(id=doc.get("book_id")))
481 return (bks, tops.totalHits)
483 def bsearch(self, query, max_results=50):
484 q = self.query(query)
485 bjq = BlockJoinQuery(q, self.parent_filter, BlockJoinQuery.ScoreMode.Avg)
487 tops = self.searcher.search(bjq, max_results)
489 for found in tops.scoreDocs:
490 doc = self.searcher.doc(found.doc)
491 bks.append(models.Book.objects.get(id=doc.get("book_id")))
492 return (bks, tops.totalHits)
494 # TokenStream tokenStream = analyzer.tokenStream(fieldName, reader);
495 # OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class);
496 # CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
498 # while (tokenStream.incrementToken()) {
499 # int startOffset = offsetAttribute.startOffset();
500 # int endOffset = offsetAttribute.endOffset();
501 # String term = charTermAttribute.toString();
505 class SearchResult(object):
506 def __init__(self, searcher, scoreDocs, score=None, how_found=None, snippets_cb=None):
510 self.score = scoreDocs.score
514 stored = searcher.doc(scoreDocs.doc)
515 self.book_id = int(stored.get("book_id"))
517 header_type = stored.get("header_type")
518 sec = (header_type, int(stored.get("header_index")))
519 header_span = stored.get('header_span')
520 header_span = header_span is not None and int(header_span) or 1
521 stored = searcher.doc(scoreDocs.doc)
522 self.book_id = int(stored.get("book_id"))
524 fragment = stored.get("fragment_anchor")
526 hit = (sec + (header_span,), fragment, scoreDocs.score, {'how_found': how_found, 'snippets_cb': snippets_cb})
528 self.hits.append(hit)
530 def merge(self, other):
531 if self.book_id != other.book_id:
532 raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id))
533 self.hits += other.hits
534 if other.score > self.score:
535 self.score = other.score
538 def add_snippets(self, snippets):
539 self.snippets += snippets
543 return models.Book.objects.get(id=self.book_id)
545 book = property(get_book)
550 def sections_covered(results):
551 frags = filter(lambda r: r[1] is not None, results)
552 sect = filter(lambda r: r[1] is None, results)
553 sect = filter(lambda s: 0 == len(filter(
554 lambda f: s[0][1] >= f[0][1] and s[0][1] < f[0][1] + f[0][2],
556 print "filtered, non overlapped sections: %s" % sect
560 parts = [{"header": s[0], "position": s[1], '_score_key': s} for s in self.sections] \
561 + [{"fragment": book.fragments.get(anchor=f), '_score_key':f} for f in self.fragments]
563 parts.sort(lambda a, b: cmp(self.scores[a['_score_key']], self.scores[b['_score_key']]))
564 print("bookid: %d parts: %s" % (self.book_id, parts))
567 parts = property(get_parts)
570 def __unicode__(self):
571 return u'SearchResult(book_id=%d, score=%d)' % (self.book_id, self.score)
574 def aggregate(*result_lists):
576 for rl in result_lists:
578 if r.book_id in books:
579 books[r.book_id].merge(r)
580 #print(u"already have one with score %f, and this one has score %f" % (books[book.id][0], found.score))
583 return books.values()
585 def __cmp__(self, other):
586 return cmp(self.score, other.score)
589 class MultiSearch(Search):
590 """Class capable of IMDb-like searching"""
591 def get_tokens(self, searched, field='content'):
592 """returns tokens analyzed by a proper (for a field) analyzer
593 argument can be: StringReader, string/unicode, or tokens. In the last case
594 they will just be returned (so we can reuse tokens, if we don't change the analyzer)
596 if isinstance(searched, str) or isinstance(searched, unicode):
597 searched = StringReader(searched)
598 elif isinstance(searched, list):
602 tokens = self.analyzer.reusableTokenStream(field, searched)
604 while tokens.incrementToken():
605 cta = tokens.getAttribute(CharTermAttribute.class_)
606 toks.append(cta.toString())
609 def fuzziness(self, fuzzy):
612 if isinstance(fuzzy, float) and fuzzy > 0.0 and fuzzy <= 1.0:
617 def make_phrase(self, tokens, field='content', slop=2, fuzzy=False):
619 phrase = MultiPhraseQuery()
621 term = Term(field, t)
622 fuzzterm = FuzzyTermEnum(self.searcher.getIndexReader(), term, self.fuzziness(fuzzy))
626 # print("fuzz %s" % unicode(fuzzterm.term()).encode('utf-8'))
630 if not fuzzterm.next(): break
632 phrase.add(JArray('object')(fuzzterms, Term))
636 phrase = PhraseQuery()
639 term = Term(field, t)
643 def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD, fuzzy=False):
646 term = Term(field, t)
648 term = FuzzyQuery(term, self.fuzziness(fuzzy))
650 term = TermQuery(term)
651 q.add(BooleanClause(term, modal))
654 def content_query(self, query):
655 return BlockJoinQuery(query, self.parent_filter,
656 BlockJoinQuery.ScoreMode.Total)
658 def search_perfect_book(self, searched, max_results=20, fuzzy=False):
659 qrys = [self.make_phrase(self.get_tokens(searched, field=fld), field=fld, fuzzy=fuzzy) for fld in ['author', 'title']]
663 top = self.searcher.search(q, max_results)
664 for found in top.scoreDocs:
665 books.append(SearchResult(self.searcher, found))
668 def search_perfect_parts(self, searched, max_results=20, fuzzy=False):
669 qrys = [self.make_phrase(self.get_tokens(searched), field=fld, fuzzy=fuzzy) for fld in ['content']]
673 top = self.searcher.search(q, max_results)
674 for found in top.scoreDocs:
675 books.append(SearchResult(self.searcher, found).add_snippets(self.get_snippets(found, q)))
679 def search_everywhere(self, searched, max_results=20, fuzzy=False):
682 # content only query : themes x content
685 tokens = self.get_tokens(searched)
686 q.add(BooleanClause(self.make_term_query(tokens, field='themes', fuzzy=fuzzy), BooleanClause.Occur.MUST))
687 q.add(BooleanClause(self.make_term_query(tokens, field='content', fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
689 topDocs = self.searcher.search(q, max_results)
690 for found in topDocs.scoreDocs:
691 books.append(SearchResult(self.searcher, found))
693 # joined query themes/content x author/title/epochs/genres/kinds
695 # in_meta = BooleanQuery()
696 # in_content = BooleanQuery()
698 # for fld in ['themes', 'content']:
699 # in_content.add(BooleanClause(self.make_term_query(tokens, field=fld, fuzzy=False), BooleanClause.Occur.SHOULD))
701 # in_meta.add(BooleanClause(self.make_term_query(
702 # self.get_tokens(searched, field='author'), field='author', fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
704 # for fld in ['title', 'epochs', 'genres', 'kinds']:
705 # in_meta.add(BooleanClause(self.make_term_query(tokens, field=fld, fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
707 # q.add(BooleanClause(in_meta, BooleanClause.Occur.MUST))
708 # in_content_join = self.content_query(in_content)
709 # q.add(BooleanClause(in_content_join, BooleanClause.Occur.MUST))
710 # # import pdb; pdb.set_trace()
711 # collector = BlockJoinCollector(Sort.RELEVANCE, 100, True, True)
713 # self.searcher.search(q, collector)
715 # top_groups = collector.getTopGroups(in_content_join, Sort.RELEVANCE, 0, max_results, 0, True)
717 # for grp in top_groups.groups:
718 # for part in grp.scoreDocs:
719 # books.append(SearchResult(self.searcher, part, score=grp.maxScore))
722 def multisearch(self, query, max_results=50):
725 - (phrase) OR -> content
728 - (keywords) -> author
733 # queryreader = StringReader(query)
734 # tokens = self.get_tokens(queryreader)
736 # top_level = BooleanQuery()
737 # Should = BooleanClause.Occur.SHOULD
739 # phrase_level = BooleanQuery()
740 # phrase_level.setBoost(1.3)
742 # p_content = self.make_phrase(tokens, joined=True)
743 # p_title = self.make_phrase(tokens, 'title')
744 # p_author = self.make_phrase(tokens, 'author')
746 # phrase_level.add(BooleanClause(p_content, Should))
747 # phrase_level.add(BooleanClause(p_title, Should))
748 # phrase_level.add(BooleanClause(p_author, Should))
750 # kw_level = BooleanQuery()
752 # kw_level.add(self.make_term_query(tokens, 'author'), Should)
753 # j_themes = self.make_term_query(tokens, 'themes', joined=True)
754 # kw_level.add(j_themes, Should)
755 # kw_level.add(self.make_term_query(tokens, 'tags'), Should)
756 # j_con = self.make_term_query(tokens, joined=True)
757 # kw_level.add(j_con, Should)
759 # top_level.add(BooleanClause(phrase_level, Should))
760 # top_level.add(BooleanClause(kw_level, Should))
764 def book_search(self, query, filter=None, max_results=50, collector=None):
765 tops = self.searcher.search(query, filter, max_results)
766 #tops = self.searcher.search(p_content, max_results)
769 for found in tops.scoreDocs:
770 doc = self.searcher.doc(found.doc)
771 b = models.Book.objects.get(id=doc.get("book_id"))
773 print "%s (%d) -> %f" % (b, b.id, found.score)
776 def get_snippets(self, scoreDoc, query, field='content'):
777 htmlFormatter = SimpleHTMLFormatter()
778 highlighter = Highlighter(htmlFormatter, QueryScorer(query))
780 stored = self.searcher.doc(scoreDoc.doc)
783 snippets = Snippets(stored.get('book_id')).open()
785 text = snippets.get(stored.get('snippets_position'), stored.get('snippets_length'))
789 tokenStream = TokenSources.getAnyTokenStream(self.searcher.getIndexReader(), scoreDoc.doc, field, self.analyzer)
790 # highlighter.getBestTextFragments(tokenStream, text, False, 10)
791 snip = highlighter.getBestFragments(tokenStream, text, 3, "...")
792 print('snips: %s' % snip)
797 def enum_to_array(enum):
799 Converts a lucene TermEnum to array of Terms, suitable for
808 if not enum.next(): break
811 return JArray('object')(terms, Term)
813 def search_tags(self, query, filter=None, max_results=40):
814 tops = self.searcher.search(query, filter, max_results)
817 for found in tops.scoreDocs:
818 doc = self.searcher.doc(found.doc)
819 tag = models.Tag.objects.get(id=doc.get("tag_id"))
821 print "%s (%d) -> %f" % (tag, tag.id, found.score)
825 def create_prefix_phrase(self, toks, field):
826 q = MultiPhraseQuery()
827 for i in range(len(toks)):
828 t = Term(field, toks[i])
829 if i == len(toks) - 1:
830 pterms = MultiSearch.enum_to_array(PrefixTermEnum(self.searcher.getIndexReader(), t))
839 def hint_tags(self, string, max_results=50):
840 toks = self.get_tokens(string, field='SIMPLE')
843 for field in ['tag_name', 'tag_name_pl']:
844 q = self.create_prefix_phrase(toks, field)
845 top.add(BooleanClause(q, BooleanClause.Occur.SHOULD))
847 book_cat = TermsFilter()
848 book_cat.addTerm(Term("tag_category", "book"))
850 no_book_cat = BooleanFilter()
851 no_book_cat.add(FilterClause(book_cat, BooleanClause.Occur.MUST_NOT))
853 return self.search_tags(top, no_book_cat, max_results=max_results)
855 def hint_books(self, string, max_results=50):
856 toks = self.get_tokens(string, field='SIMPLE')
858 q = self.create_prefix_phrase(toks, 'title')
859 only_books = TermsFilter()
860 only_books.addTerm(Term("is_book", "true"))
862 return self.book_search(q, only_books, max_results=max_results)
864 def filter_by_tags(self, tags):
868 toks = self.get_tokens(tag.name, field='tags')
869 tag_phrase = PhraseQuery()
872 q.add(BooleanClause(tok, BooleanClause.Occur.MUST))
874 return QueryWrapperFilter(q)
876 def filtered_categories(self, tags):
879 cats[t.category] = True