1 # -*- coding: utf-8 -*-
2 from django.conf import settings
3 from lucene import SimpleFSDirectory, IndexWriter, File, Field, \
4 NumericField, Version, Document, JavaError, IndexSearcher, \
5 QueryParser, Term, PerFieldAnalyzerWrapper, \
6 SimpleAnalyzer, PolishAnalyzer, ArrayList, \
7 KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \
8 BlockJoinQuery, BlockJoinCollector, TermsFilter, \
9 HashSet, BooleanClause, Term, CharTermAttribute, \
10 PhraseQuery, StringReader
14 from librarian import dcparser
15 from librarian.parser import WLDocument
16 import catalogue.models
17 from multiprocessing.pool import ThreadPool
21 class WLAnalyzer(PerFieldAnalyzerWrapper):
23 polish = PolishAnalyzer(Version.LUCENE_34)
24 simple = SimpleAnalyzer(Version.LUCENE_34)
25 keyword = KeywordAnalyzer(Version.LUCENE_34)
26 # not sure if needed: there's NOT_ANALYZED meaning basically the same
28 PerFieldAnalyzerWrapper.__init__(self, polish)
30 self.addAnalyzer("tags", simple)
31 self.addAnalyzer("technical_editors", simple)
32 self.addAnalyzer("editors", simple)
33 self.addAnalyzer("url", keyword)
34 self.addAnalyzer("source_url", keyword)
35 self.addAnalyzer("source_name", simple)
36 self.addAnalyzer("publisher", simple)
37 self.addAnalyzer("author", simple)
38 self.addAnalyzer("is_book", keyword)
40 #self.addanalyzer("fragment_anchor", keyword)
43 class IndexStore(object):
46 self.store = SimpleFSDirectory(File(settings.SEARCH_INDEX))
48 def make_index_dir(self):
50 os.makedirs(settings.SEARCH_INDEX)
51 except OSError as exc:
52 if exc.errno == errno.EEXIST:
57 class Index(IndexStore):
58 def __init__(self, analyzer=None):
59 IndexStore.__init__(self)
62 analyzer = WLAnalyzer()
63 self.analyzer = analyzer
65 def open(self, analyzer=None):
67 raise Exception("Index is already opened")
68 self.index = IndexWriter(self.store, self.analyzer,\
69 IndexWriter.MaxFieldLength.LIMITED)
77 def remove_book(self, book):
78 q = NumericRangeQuery.newIntRange("book_id", book.id, book.id, True,True)
79 self.index.deleteDocuments(q)
81 def index_book(self, book, overwrite=True):
83 self.remove_book(book)
85 doc = self.extract_metadata(book)
86 parts = self.extract_content(book)
87 block = ArrayList().of_(Document)
92 self.index.addDocuments(block)
97 'dramat_wierszowany_l',
98 'dramat_wierszowany_lp',
99 'dramat_wspolczesny', 'liryka_l', 'liryka_lp',
103 skip_header_tags = ['autor_utworu', 'nazwa_utworu']
105 def create_book_doc(self, book):
107 Create a lucene document connected to the book
110 doc.add(NumericField("book_id", Field.Store.YES, True).setIntValue(book.id))
111 if book.parent is not None:
112 doc.add(NumericField("parent_id", Field.Store.YES, True).setIntValue(book.parent.id))
115 def extract_metadata(self, book):
116 book_info = dcparser.parse(book.xml_file)
118 doc = self.create_book_doc(book)
119 doc.add(Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS))
120 doc.add(Field("tags", ','.join([t.name for t in book.tags]), Field.Store.NO, Field.Index.ANALYZED))
121 doc.add(Field("is_book", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED))
124 for field in dcparser.BookInfo.FIELDS:
125 if hasattr(book_info, field.name):
126 if not getattr(book_info, field.name):
128 # since no type information is available, we use validator
129 type_indicator = field.validator
130 if type_indicator == dcparser.as_unicode:
131 s = getattr(book_info, field.name)
135 doc.add(Field(field.name, s, Field.Store.NO, Field.Index.ANALYZED))
136 except JavaError as je:
137 raise Exception("failed to add field: %s = '%s', %s(%s)" % (field.name, s, je.message, je.args))
138 elif type_indicator == dcparser.as_person:
139 p = getattr(book_info, field.name)
140 if isinstance(p, dcparser.Person):
143 persons = ', '.join(map(unicode, p))
144 doc.add(Field(field.name, persons, Field.Store.NO, Field.Index.ANALYZED))
145 elif type_indicator == dcparser.as_date:
146 dt = getattr(book_info, field.name)
147 doc.add(Field(field.name, "%04d%02d%02d" % (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED))
150 def get_master(self, root):
151 for master in root.iter():
152 if master.tag in self.master_tags:
156 def extract_content(self, book):
157 wld = WLDocument.from_file(book.xml_file.path)
158 root = wld.edoc.getroot()
160 # first we build a sequence of top-level items.
162 # header_index - the 0-indexed position of header element.
164 master = self.get_master(root)
169 for header, position in zip(list(master), range(len(master))):
170 if header.tag in self.skip_header_tags:
172 doc = self.create_book_doc(book)
173 doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position))
174 doc.add(Field("header_type", header.tag, Field.Store.YES, Field.Index.NOT_ANALYZED))
175 content = u' '.join([t for t in header.itertext()])
176 doc.add(Field("content", content, Field.Store.NO, Field.Index.ANALYZED))
177 header_docs.append(doc)
181 for child in list(node):
182 for b, e in walker(child):
187 # Then we create a document for each fragments
188 # fragment_anchor - the anchor
189 # themes - list of themes [not indexed]
191 # will contain (framgent id -> { content: [], themes: [] }
193 for start, end in walker(master):
194 if start is not None and start.tag == 'begin':
195 fid = start.attrib['id'][1:]
196 fragments[fid] = {'content': [], 'themes': []}
197 fragments[fid]['content'].append(start.tail)
198 elif start is not None and start.tag == 'motyw':
199 fid = start.attrib['id'][1:]
200 fragments[fid]['themes'].append(start.text)
201 fragments[fid]['content'].append(start.tail)
202 elif start is not None and start.tag == 'end':
203 fid = start.attrib['id'][1:]
204 if fid not in fragments:
205 continue # a broken <end> node, skip it
206 frag = fragments[fid]
210 return u' '.join(map(
211 lambda x: x == None and u'(none)' or unicode(x),
214 doc = self.create_book_doc(book)
215 doc.add(Field("fragment_anchor", fid,
216 Field.Store.YES, Field.Index.NOT_ANALYZED))
217 doc.add(Field("content",
218 u' '.join(filter(lambda s: s is not None, frag['content'])),
219 Field.Store.NO, Field.Index.ANALYZED))
220 doc.add(Field("themes",
221 u' '.join(filter(lambda s: s is not None, frag['themes'])),
222 Field.Store.NO, Field.Index.ANALYZED))
224 fragment_docs.append(doc)
225 elif start is not None:
226 for frag in fragments.values():
227 frag['content'].append(start.text)
228 elif end is not None:
229 for frag in fragments.values():
230 frag['content'].append(end.tail)
232 return header_docs + fragment_docs
238 def __exit__(self, type, value, tb):
242 class ReusableIndex(Index):
244 Works like index, but does not close/optimize Lucene index
245 until program exit (uses atexit hook).
246 This is usefull for importbooks command.
248 if you cannot rely on atexit, use ReusableIndex.close_reusable() yourself.
254 def open(self, analyzer=None, threads=4):
255 if ReusableIndex.index is not None:
256 self.index = ReusableIndex.index
258 ReusableIndex.pool = ThreadPool(threads)
259 ReusableIndex.pool_jobs = []
260 Index.open(self, analyzer)
261 ReusableIndex.index = self.index
262 atexit.register(ReusableIndex.close_reusable)
264 def index_book(self, *args, **kw):
265 job = ReusableIndex.pool.apply_async(Index.index_book, args, kw)
266 ReusableIndex.pool_jobs.append(job)
269 def close_reusable():
270 if ReusableIndex.index is not None:
271 for job in ReusableIndex.pool_jobs:
273 ReusableIndex.pool.close()
275 ReusableIndex.index.optimize()
276 ReusableIndex.index.close()
277 ReusableIndex.index = None
283 class Search(IndexStore):
284 def __init__(self, default_field="content"):
285 IndexStore.__init__(self)
286 self.analyzer = PolishAnalyzer(Version.LUCENE_34)
287 ## self.analyzer = WLAnalyzer()
288 self.searcher = IndexSearcher(self.store, True)
289 self.parser = QueryParser(Version.LUCENE_34, default_field,
292 self.parent_filter = TermsFilter()
293 self.parent_filter.addTerm(Term("is_book", "true"))
295 def query(self, query):
296 return self.parser.parse(query)
298 def wrapjoins(self, query, fields=[]):
300 This functions modifies the query in a recursive way,
301 so Term and Phrase Queries contained, which match
302 provided fields are wrapped in a BlockJoinQuery,
303 and so delegated to children documents.
305 if BooleanQuery.instance_(query):
306 qs = BooleanQuery.cast_(query)
308 clause = BooleanClause.cast_(clause)
309 clause.setQuery(self.wrapjoins(clause.getQuery(), fields))
313 query.extractTerms(termset)
316 if t.field() not in fields:
318 return BlockJoinQuery(query, self.parent_filter,
319 BlockJoinQuery.ScoreMode.Total)
321 def simple_search(self, query, max_results=50):
322 """Returns (books, total_hits)
325 tops = self.searcher.search(self.query(query), max_results)
327 for found in tops.scoreDocs:
328 doc = self.searcher.doc(found.doc)
329 bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
330 return (bks, tops.totalHits)
332 def search(self, query, max_results=50):
333 query = self.query(query)
334 query = self.wrapjoins(query, ["content", "themes"])
336 tops = self.searcher.search(query, max_results)
338 for found in tops.scoreDocs:
339 doc = self.searcher.doc(found.doc)
340 bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
341 return (bks, tops.totalHits)
343 def bsearch(self, query, max_results=50):
344 q = self.query(query)
345 bjq = BlockJoinQuery(q, self.parent_filter, BlockJoinQuery.ScoreMode.Avg)
347 tops = self.searcher.search(bjq, max_results)
349 for found in tops.scoreDocs:
350 doc = self.searcher.doc(found.doc)
351 bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
352 return (bks, tops.totalHits)
354 # TokenStream tokenStream = analyzer.tokenStream(fieldName, reader);
355 # OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class);
356 # CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
358 # while (tokenStream.incrementToken()) {
359 # int startOffset = offsetAttribute.startOffset();
360 # int endOffset = offsetAttribute.endOffset();
361 # String term = charTermAttribute.toString();
365 class MultiSearch(Search):
366 """Class capable of IMDb-like searching"""
367 def get_tokens(self, queryreader):
368 if isinstance(queryreader, str):
369 queryreader = StringReader(queryreader)
371 tokens = self.analyzer.reusableTokenStream('content', queryreader)
373 while tokens.incrementToken():
374 cta = tokens.getAttribute(CharTermAttribute.class_)
378 def make_phrase(self, tokens, field='content', joined=False):
379 phrase = PhraseQuery()
381 term = Term(field, t)
384 phrase = self.content_query(phrase)
387 def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD, joined=False):
390 term = Term(field, t)
391 q.add(BooleanClause(term, modal))
393 self.content_query(q)
396 def content_query(self, query):
397 return BlockJoinQuery(query, self.parent_filter,
398 BlockJoinQuery.ScoreMode.Total)
400 def multiseach(self, query, max_results=50):
403 - (phrase) OR -> content
406 - (keywords) -> author
411 queryreader = StringReader(query)
412 tokens = self.get_tokens(queryreader)
414 top_level = BooleanQuery()
415 Should = BooleanClause.Occur.SHOULD
417 phrase_level = BooleanQuery()
419 p_content = self.make_phrase(tokens, joined=True)
420 p_title = self.make_phrase(tokens, 'title')
421 p_author = self.make_phrase(tokens, 'author')
423 phrase_level.add(BooleanClause(p_content, Should))
424 phrase_level.add(BooleanClause(p_title, Should))
425 phrase_level.add(BooleanClause(p_author, Should))
427 kw_level = BooleanQuery()
429 kw_level.add(self.make_term_query(tokens, 'author'), Should)
430 kw_level.add(self.make_term_query(tokens, 'themes', joined=True), Should)
431 kw_level.add(self.make_term_query(tokens, 'tags'), Should)
432 kw_level.add(self.make_term_query(tokens, joined=True), Should)
434 top_level.add(BooleanClause(phrase_level, Should))
435 top_level.add(BooleanClause(kw_level, Should))
437 tops = self.searcher.search(top_level, max_results)
439 for found in tops.scoreDocs:
440 doc = self.searcher.doc(found.doc)
441 bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
442 return (bks, tops.totalHits)