1 # -*- coding: utf-8 -*-
3 from django.conf import settings
4 from lucene import SimpleFSDirectory, IndexWriter, File, Field, \
5 NumericField, Version, Document, JavaError, IndexSearcher, \
6 QueryParser, Term, PerFieldAnalyzerWrapper, \
7 SimpleAnalyzer, PolishAnalyzer, ArrayList, \
8 KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \
9 BlockJoinQuery, BlockJoinCollector, TermsFilter, \
10 HashSet, BooleanClause, Term, CharTermAttribute, \
11 PhraseQuery, StringReader, TermQuery, BlockJoinQuery, \
15 JVM = initVM(CLASSPATH)
19 from librarian import dcparser
20 from librarian.parser import WLDocument
21 import catalogue.models
22 from multiprocessing.pool import ThreadPool
23 from threading import current_thread
27 class WLAnalyzer(PerFieldAnalyzerWrapper):
29 polish = PolishAnalyzer(Version.LUCENE_34)
30 simple = SimpleAnalyzer(Version.LUCENE_34)
31 keyword = KeywordAnalyzer(Version.LUCENE_34)
32 # not sure if needed: there's NOT_ANALYZED meaning basically the same
34 PerFieldAnalyzerWrapper.__init__(self, polish)
36 self.addAnalyzer("tags", simple)
37 self.addAnalyzer("technical_editors", simple)
38 self.addAnalyzer("editors", simple)
39 self.addAnalyzer("url", keyword)
40 self.addAnalyzer("source_url", keyword)
41 self.addAnalyzer("source_name", simple)
42 self.addAnalyzer("publisher", simple)
43 self.addAnalyzer("author", simple)
44 self.addAnalyzer("is_book", keyword)
46 #self.addanalyzer("fragment_anchor", keyword)
49 class IndexStore(object):
52 self.store = SimpleFSDirectory(File(settings.SEARCH_INDEX))
54 def make_index_dir(self):
56 os.makedirs(settings.SEARCH_INDEX)
57 except OSError as exc:
58 if exc.errno == errno.EEXIST:
63 class Index(IndexStore):
64 def __init__(self, analyzer=None):
65 IndexStore.__init__(self)
68 analyzer = WLAnalyzer()
69 self.analyzer = analyzer
71 def open(self, analyzer=None):
73 raise Exception("Index is already opened")
74 self.index = IndexWriter(self.store, self.analyzer,\
75 IndexWriter.MaxFieldLength.LIMITED)
83 def remove_book(self, book):
84 q = NumericRangeQuery.newIntRange("book_id", book.id, book.id, True,True)
85 self.index.deleteDocuments(q)
87 def index_book(self, book, overwrite=True):
89 self.remove_book(book)
91 doc = self.extract_metadata(book)
92 parts = self.extract_content(book)
93 block = ArrayList().of_(Document)
98 self.index.addDocuments(block)
103 'dramat_wierszowany_l',
104 'dramat_wierszowany_lp',
105 'dramat_wspolczesny', 'liryka_l', 'liryka_lp',
109 skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne']
111 def create_book_doc(self, book):
113 Create a lucene document connected to the book
116 doc.add(NumericField("book_id", Field.Store.YES, True).setIntValue(book.id))
117 if book.parent is not None:
118 doc.add(NumericField("parent_id", Field.Store.YES, True).setIntValue(book.parent.id))
121 def extract_metadata(self, book):
122 book_info = dcparser.parse(book.xml_file)
124 print("extract metadata for book %s id=%d, thread%d" % (book.slug, book.id, current_thread().ident))
126 doc = self.create_book_doc(book)
127 doc.add(Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS))
128 #doc.add(Field("tags", ','.join([t.name for t in book.tags]), Field.Store.NO, Field.Index.ANALYZED))
129 doc.add(Field("is_book", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED))
132 for field in dcparser.BookInfo.FIELDS:
133 if hasattr(book_info, field.name):
134 if not getattr(book_info, field.name):
136 # since no type information is available, we use validator
137 type_indicator = field.validator
138 if type_indicator == dcparser.as_unicode:
139 s = getattr(book_info, field.name)
143 doc.add(Field(field.name, s, Field.Store.NO, Field.Index.ANALYZED))
144 except JavaError as je:
145 raise Exception("failed to add field: %s = '%s', %s(%s)" % (field.name, s, je.message, je.args))
146 elif type_indicator == dcparser.as_person:
147 p = getattr(book_info, field.name)
148 if isinstance(p, dcparser.Person):
151 persons = ', '.join(map(unicode, p))
152 doc.add(Field(field.name, persons, Field.Store.NO, Field.Index.ANALYZED))
153 elif type_indicator == dcparser.as_date:
154 dt = getattr(book_info, field.name)
155 doc.add(Field(field.name, "%04d%02d%02d" % (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED))
158 def get_master(self, root):
159 for master in root.iter():
160 if master.tag in self.master_tags:
164 def extract_content(self, book):
165 wld = WLDocument.from_file(book.xml_file.path)
166 root = wld.edoc.getroot()
168 # first we build a sequence of top-level items.
170 # header_index - the 0-indexed position of header element.
172 master = self.get_master(root)
177 for header, position in zip(list(master), range(len(master))):
178 if header.tag in self.skip_header_tags:
180 doc = self.create_book_doc(book)
181 doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position))
182 doc.add(Field("header_type", header.tag, Field.Store.YES, Field.Index.NOT_ANALYZED))
183 content = u' '.join([t for t in header.itertext()])
184 doc.add(Field("content", content, Field.Store.YES, Field.Index.ANALYZED))
185 header_docs.append(doc)
189 for child in list(node):
190 for b, e in walker(child):
195 # Then we create a document for each fragments
196 # fragment_anchor - the anchor
197 # themes - list of themes [not indexed]
199 # will contain (framgent id -> { content: [], themes: [] }
201 for start, end in walker(master):
202 if start is not None and start.tag == 'begin':
203 fid = start.attrib['id'][1:]
204 fragments[fid] = {'content': [], 'themes': []}
205 fragments[fid]['content'].append(start.tail)
206 elif start is not None and start.tag == 'motyw':
207 fid = start.attrib['id'][1:]
208 fragments[fid]['themes'].append(start.text)
209 fragments[fid]['content'].append(start.tail)
210 elif start is not None and start.tag == 'end':
211 fid = start.attrib['id'][1:]
212 if fid not in fragments:
213 continue # a broken <end> node, skip it
214 frag = fragments[fid]
218 return u' '.join(map(
219 lambda x: x == None and u'(none)' or unicode(x),
222 doc = self.create_book_doc(book)
223 doc.add(Field("fragment_anchor", fid,
224 Field.Store.YES, Field.Index.NOT_ANALYZED))
225 doc.add(Field("content",
226 u' '.join(filter(lambda s: s is not None, frag['content'])),
227 Field.Store.YES, Field.Index.ANALYZED))
228 doc.add(Field("themes",
229 u' '.join(filter(lambda s: s is not None, frag['themes'])),
230 Field.Store.NO, Field.Index.ANALYZED))
232 fragment_docs.append(doc)
233 elif start is not None:
234 for frag in fragments.values():
235 frag['content'].append(start.text)
236 elif end is not None:
237 for frag in fragments.values():
238 frag['content'].append(end.tail)
240 return header_docs + fragment_docs
246 def __exit__(self, type, value, tb):
250 def log_exception_wrapper(f):
255 print("Error in indexing thread: %s" % e)
256 traceback.print_exc()
261 class ReusableIndex(Index):
263 Works like index, but does not close/optimize Lucene index
264 until program exit (uses atexit hook).
265 This is usefull for importbooks command.
267 if you cannot rely on atexit, use ReusableIndex.close_reusable() yourself.
273 def open(self, analyzer=None, threads=4):
274 if ReusableIndex.index is not None:
275 self.index = ReusableIndex.index
277 print("opening index")
278 ReusableIndex.pool = ThreadPool(threads, initializer=lambda: JVM.attachCurrentThread() )
279 ReusableIndex.pool_jobs = []
280 Index.open(self, analyzer)
281 ReusableIndex.index = self.index
282 atexit.register(ReusableIndex.close_reusable)
284 def index_book(self, *args, **kw):
285 job = ReusableIndex.pool.apply_async(log_exception_wrapper(Index.index_book), (self,) + args, kw)
286 ReusableIndex.pool_jobs.append(job)
289 def close_reusable():
290 if ReusableIndex.index is not None:
291 print("wait for indexing to finish")
292 for job in ReusableIndex.pool_jobs:
294 sys.stdout.write('.')
297 ReusableIndex.pool.close()
299 ReusableIndex.index.optimize()
300 ReusableIndex.index.close()
301 ReusableIndex.index = None
307 class Search(IndexStore):
308 def __init__(self, default_field="content"):
309 IndexStore.__init__(self)
310 self.analyzer = PolishAnalyzer(Version.LUCENE_34)
311 ## self.analyzer = WLAnalyzer()
312 self.searcher = IndexSearcher(self.store, True)
313 self.parser = QueryParser(Version.LUCENE_34, default_field,
316 self.parent_filter = TermsFilter()
317 self.parent_filter.addTerm(Term("is_book", "true"))
319 def query(self, query):
320 return self.parser.parse(query)
322 def wrapjoins(self, query, fields=[]):
324 This functions modifies the query in a recursive way,
325 so Term and Phrase Queries contained, which match
326 provided fields are wrapped in a BlockJoinQuery,
327 and so delegated to children documents.
329 if BooleanQuery.instance_(query):
330 qs = BooleanQuery.cast_(query)
332 clause = BooleanClause.cast_(clause)
333 clause.setQuery(self.wrapjoins(clause.getQuery(), fields))
337 query.extractTerms(termset)
340 if t.field() not in fields:
342 return BlockJoinQuery(query, self.parent_filter,
343 BlockJoinQuery.ScoreMode.Total)
345 def simple_search(self, query, max_results=50):
346 """Returns (books, total_hits)
349 tops = self.searcher.search(self.query(query), max_results)
351 for found in tops.scoreDocs:
352 doc = self.searcher.doc(found.doc)
353 bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
354 return (bks, tops.totalHits)
356 def search(self, query, max_results=50):
357 query = self.query(query)
358 query = self.wrapjoins(query, ["content", "themes"])
360 tops = self.searcher.search(query, max_results)
362 for found in tops.scoreDocs:
363 doc = self.searcher.doc(found.doc)
364 bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
365 return (bks, tops.totalHits)
367 def bsearch(self, query, max_results=50):
368 q = self.query(query)
369 bjq = BlockJoinQuery(q, self.parent_filter, BlockJoinQuery.ScoreMode.Avg)
371 tops = self.searcher.search(bjq, max_results)
373 for found in tops.scoreDocs:
374 doc = self.searcher.doc(found.doc)
375 bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
376 return (bks, tops.totalHits)
378 # TokenStream tokenStream = analyzer.tokenStream(fieldName, reader);
379 # OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class);
380 # CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
382 # while (tokenStream.incrementToken()) {
383 # int startOffset = offsetAttribute.startOffset();
384 # int endOffset = offsetAttribute.endOffset();
385 # String term = charTermAttribute.toString();
389 class SearchResult(object):
390 def __init__(self, searcher, scoreDocs, score=None):
394 self.score = scoreDocs.score
400 stored = searcher.doc(scoreDocs.doc)
401 self.book_id = int(stored.get("book_id"))
403 fragment = stored.get("fragment_anchor")
405 self.fragments.append(fragment)
406 self.scores[fragment] = scoreDocs.score
408 header_type = stored.get("header_type")
410 sec = (header_type, int(stored.get("header_index")))
411 self.sections.append(sec)
412 self.scores[sec] = scoreDocs.score
415 return catalogue.models.Book.objects.get(id=self.book_id)
417 book = property(get_book)
421 parts = [{"header": s[0], "position": s[1], '_score_key': s} for s in self.sections] \
422 + [{"fragment": book.fragments.get(anchor=f), '_score_key':f} for f in self.fragments]
424 parts.sort(lambda a, b: cmp(self.scores[a['_score_key']], self.scores[b['_score_key']]))
425 print("bookid: %d parts: %s" % (self.book_id, parts))
428 parts = property(get_parts)
430 def merge(self, other):
431 if self.book_id != other.book_id:
432 raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id))
433 self.fragments += other.fragments
434 self.sections += other.sections
435 self.scores.update(other.scores)
436 if other.score > self.score:
437 self.score = other.score
440 def __unicode__(self):
441 return u'SearchResult(book_id=%d, score=%d)' % (self.book_id, self.score)
444 def aggregate(*result_lists):
446 for rl in result_lists:
448 if r.book_id in books:
449 books[r.book_id].merge(r)
450 #print(u"already have one with score %f, and this one has score %f" % (books[book.id][0], found.score))
453 return books.values()
455 def __cmp__(self, other):
456 return cmp(self.score, other.score)
459 class MultiSearch(Search):
460 """Class capable of IMDb-like searching"""
461 def get_tokens(self, queryreader):
462 if isinstance(queryreader, str) or isinstance(queryreader, unicode):
463 queryreader = StringReader(queryreader)
465 tokens = self.analyzer.reusableTokenStream('content', queryreader)
467 while tokens.incrementToken():
468 cta = tokens.getAttribute(CharTermAttribute.class_)
469 toks.append(cta.toString())
472 def make_phrase(self, tokens, field='content', slop=2):
473 phrase = PhraseQuery()
476 term = Term(field, t)
480 def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD):
483 term = Term(field, t)
484 q.add(BooleanClause(TermQuery(term), modal))
487 def content_query(self, query):
488 return BlockJoinQuery(query, self.parent_filter,
489 BlockJoinQuery.ScoreMode.Total)
491 def search_perfect_book(self, tokens, max_results=20):
492 qrys = [self.make_phrase(tokens, field=fld) for fld in ['author', 'title']]
496 top = self.searcher.search(q, max_results)
497 for found in top.scoreDocs:
498 books.append(SearchResult(self.searcher, found))
501 def search_perfect_parts(self, tokens, max_results=20):
502 qrys = [self.make_phrase(tokens, field=fld) for fld in ['content']]
506 top = self.searcher.search(q, max_results)
507 for found in top.scoreDocs:
508 books.append(SearchResult(self.searcher, found))
512 def search_everywhere(self, tokens, max_results=20):
514 in_meta = BooleanQuery()
515 in_content = BooleanQuery()
517 for fld in ['themes', 'content']:
518 in_content.add(BooleanClause(self.make_term_query(tokens, field=fld), BooleanClause.Occur.SHOULD))
520 for fld in ['author', 'title', 'epochs', 'genres', 'kinds']:
521 in_meta.add(BooleanClause(self.make_term_query(tokens, field=fld), BooleanClause.Occur.SHOULD))
523 q.add(BooleanClause(in_meta, BooleanClause.Occur.MUST))
524 in_content_join = self.content_query(in_content)
525 q.add(BooleanClause(in_content_join, BooleanClause.Occur.MUST))
527 collector = BlockJoinCollector(Sort.RELEVANCE, 100, True, True)
529 self.searcher.search(q, collector)
533 top_groups = collector.getTopGroups(in_content_join, Sort.RELEVANCE, 0, max_results, 0, True)
535 for grp in top_groups.groups:
536 for part in grp.scoreDocs:
537 books.append(SearchResult(self.searcher, part, score=grp.maxScore))
540 def multisearch(self, query, max_results=50):
543 - (phrase) OR -> content
546 - (keywords) -> author
551 # queryreader = StringReader(query)
552 # tokens = self.get_tokens(queryreader)
554 # top_level = BooleanQuery()
555 # Should = BooleanClause.Occur.SHOULD
557 # phrase_level = BooleanQuery()
558 # phrase_level.setBoost(1.3)
560 # p_content = self.make_phrase(tokens, joined=True)
561 # p_title = self.make_phrase(tokens, 'title')
562 # p_author = self.make_phrase(tokens, 'author')
564 # phrase_level.add(BooleanClause(p_content, Should))
565 # phrase_level.add(BooleanClause(p_title, Should))
566 # phrase_level.add(BooleanClause(p_author, Should))
568 # kw_level = BooleanQuery()
570 # kw_level.add(self.make_term_query(tokens, 'author'), Should)
571 # j_themes = self.make_term_query(tokens, 'themes', joined=True)
572 # kw_level.add(j_themes, Should)
573 # kw_level.add(self.make_term_query(tokens, 'tags'), Should)
574 # j_con = self.make_term_query(tokens, joined=True)
575 # kw_level.add(j_con, Should)
577 # top_level.add(BooleanClause(phrase_level, Should))
578 # top_level.add(BooleanClause(kw_level, Should))
583 def do_search(self, query, max_results=50, collector=None):
584 tops = self.searcher.search(query, max_results)
585 #tops = self.searcher.search(p_content, max_results)
588 for found in tops.scoreDocs:
589 doc = self.searcher.doc(found.doc)
590 b = catalogue.models.Book.objects.get(id=doc.get("book_id"))
592 print "%s (%d) -> %f" % (b, b.id, found.score)
593 return (bks, tops.totalHits)