SimpleAnalyzer, PolishAnalyzer, ArrayList, \
KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \
BlockJoinQuery, BlockJoinCollector, TermsFilter, \
- HashSet, BooleanClause, Term
+ HashSet, BooleanClause, Term, CharTermAttribute, \
+ PhraseQuery, StringReader
# KeywordAnalyzer
+import sys
import os
import errno
from librarian import dcparser
from librarian.parser import WLDocument
-from catalogue.models import Book
+import catalogue.models
+from multiprocessing.pool import ThreadPool
+import atexit
class WLAnalyzer(PerFieldAnalyzerWrapper):
def index_book(self, book, overwrite=True):
if overwrite:
self.remove_book(book)
+
doc = self.extract_metadata(book)
parts = self.extract_content(book)
'wywiad'
]
+ skip_header_tags = ['autor_utworu', 'nazwa_utworu']
+
def create_book_doc(self, book):
"""
Create a lucene document connected to the book
if master.tag in self.master_tags:
return master
+
def extract_content(self, book):
wld = WLDocument.from_file(book.xml_file.path)
root = wld.edoc.getroot()
# header_index - the 0-indexed position of header element.
# content
master = self.get_master(root)
+ if master is None:
+ return []
+
header_docs = []
for header, position in zip(list(master), range(len(master))):
- print("header %s @%d" % (header, position))
+ if header.tag in self.skip_header_tags:
+ continue
doc = self.create_book_doc(book)
doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position))
doc.add(Field("header_type", header.tag, Field.Store.YES, Field.Index.NOT_ANALYZED))
# will contain (framgent id -> { content: [], themes: [] }
fragments = {}
for start, end in walker(master):
- print("%s %s" % (start, end))
if start is not None and start.tag == 'begin':
fid = start.attrib['id'][1:]
fragments[fid] = {'content': [], 'themes': []}
fragments[fid]['content'].append(start.tail)
elif start is not None and start.tag == 'end':
fid = start.attrib['id'][1:]
+ if fid not in fragments:
+ continue # a broken <end> node, skip it
frag = fragments[fid]
del fragments[fid]
- print("Fragment %s complete, themes: %s contents: %s" % (fid, frag['themes'], frag['content']))
+
+ def jstr(l):
+ return u' '.join(map(
+ lambda x: x == None and u'(none)' or unicode(x),
+ l))
doc = self.create_book_doc(book)
- doc.add(Field("fragment_anchor", fid, Field.Store.YES, Field.Index.NOT_ANALYZED))
- doc.add(Field("content", u' '.join(filter(lambda s: s is not None, frag['content'])), Field.Store.NO, Field.Index.ANALYZED))
- doc.add(Field("themes", u' '.join(frag['themes']), Field.Store.NO, Field.Index.ANALYZED))
+ doc.add(Field("fragment_anchor", fid,
+ Field.Store.YES, Field.Index.NOT_ANALYZED))
+ doc.add(Field("content",
+ u' '.join(filter(lambda s: s is not None, frag['content'])),
+ Field.Store.NO, Field.Index.ANALYZED))
+ doc.add(Field("themes",
+ u' '.join(filter(lambda s: s is not None, frag['themes'])),
+ Field.Store.NO, Field.Index.ANALYZED))
+
fragment_docs.append(doc)
elif start is not None:
for frag in fragments.values():
self.close()
+class ReusableIndex(Index):
+ """
+ Works like index, but does not close/optimize Lucene index
+ until program exit (uses atexit hook).
+ This is usefull for importbooks command.
+
+ if you cannot rely on atexit, use ReusableIndex.close_reusable() yourself.
+ """
+ index = None
+ pool = None
+ pool_jobs = None
+
+ def open(self, analyzer=None, threads=4):
+ if ReusableIndex.index is not None:
+ self.index = ReusableIndex.index
+ else:
+ ReusableIndex.pool = ThreadPool(threads)
+ ReusableIndex.pool_jobs = []
+ Index.open(self, analyzer)
+ ReusableIndex.index = self.index
+ atexit.register(ReusableIndex.close_reusable)
+
+ def index_book(self, *args, **kw):
+ job = ReusableIndex.pool.apply_async(Index.index_book, (self,)+ args, kw)
+ ReusableIndex.pool_jobs.append(job)
+
+ @staticmethod
+ def close_reusable():
+ import pdb; pdb.set_trace()
+ if ReusableIndex.index is not None:
+ for job in ReusableIndex.pool_jobs:
+ job.wait()
+ ReusableIndex.pool.close()
+
+ ReusableIndex.index.optimize()
+ ReusableIndex.index.close()
+ ReusableIndex.index = None
+
+ def close(self):
+ pass
+
+
class Search(IndexStore):
def __init__(self, default_field="content"):
IndexStore.__init__(self)
bks = []
for found in tops.scoreDocs:
doc = self.searcher.doc(found.doc)
- bks.append(Book.objects.get(id=doc.get("book_id")))
+ bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
return (bks, tops.totalHits)
def search(self, query, max_results=50):
bks = []
for found in tops.scoreDocs:
doc = self.searcher.doc(found.doc)
- bks.append(Book.objects.get(id=doc.get("book_id")))
+ bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
return (bks, tops.totalHits)
def bsearch(self, query, max_results=50):
q = self.query(query)
- f = TermsFilter()
- f.addTerm(Term("is_book", "true"))
- bjq = BlockJoinQuery(q, f, BlockJoinQuery.ScoreMode.Avg)
+ bjq = BlockJoinQuery(q, self.parent_filter, BlockJoinQuery.ScoreMode.Avg)
tops = self.searcher.search(bjq, max_results)
bks = []
for found in tops.scoreDocs:
doc = self.searcher.doc(found.doc)
- bks.append(Book.objects.get(id=doc.get("book_id")))
+ bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
+ return (bks, tops.totalHits)
+
+# TokenStream tokenStream = analyzer.tokenStream(fieldName, reader);
+# OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class);
+# CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
+
+# while (tokenStream.incrementToken()) {
+# int startOffset = offsetAttribute.startOffset();
+# int endOffset = offsetAttribute.endOffset();
+# String term = charTermAttribute.toString();
+# }
+
+
+class MultiSearch(Search):
+ """Class capable of IMDb-like searching"""
+ def get_tokens(self, queryreader):
+ if isinstance(queryreader, str):
+ queryreader = StringReader(queryreader)
+ queryreader.reset()
+ tokens = self.analyzer.reusableTokenStream('content', queryreader)
+ toks = []
+ while tokens.incrementToken():
+ cta = tokens.getAttribute(CharTermAttribute.class_)
+ toks.append(cta)
+ return toks
+
+ def make_phrase(self, tokens, field='content', joined=False):
+ phrase = PhraseQuery()
+ for t in tokens:
+ term = Term(field, t)
+ phrase.add(term)
+ if joined:
+ phrase = self.content_query(phrase)
+ return phrase
+
+ def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD, joined=False):
+ q = BooleanQuery()
+ for t in tokens:
+ term = Term(field, t)
+ q.add(BooleanClause(term, modal))
+ if joined:
+ self.content_query(q)
+ return q
+
+ def content_query(self, query):
+ return BlockJoinQuery(query, self.parent_filter,
+ BlockJoinQuery.ScoreMode.Total)
+
+ def multiseach(self, query, max_results=50):
+ """
+ Search strategy:
+ - (phrase) OR -> content
+ -> title
+ -> author
+ - (keywords) -> author
+ -> motyw
+ -> tags
+ -> content
+ """
+ queryreader = StringReader(query)
+ tokens = self.get_tokens(queryreader)
+
+ top_level = BooleanQuery()
+ Should = BooleanClause.Occur.SHOULD
+
+ phrase_level = BooleanQuery()
+
+ p_content = self.make_phrase(tokens, joined=True)
+ p_title = self.make_phrase(tokens, 'title')
+ p_author = self.make_phrase(tokens, 'author')
+
+ phrase_level.add(BooleanClause(p_content, Should))
+ phrase_level.add(BooleanClause(p_title, Should))
+ phrase_level.add(BooleanClause(p_author, Should))
+
+ kw_level = BooleanQuery()
+
+ kw_level.add(self.make_term_query(tokens, 'author'), Should)
+ kw_level.add(self.make_term_query(tokens, 'themes', joined=True), Should)
+ kw_level.add(self.make_term_query(tokens, 'tags'), Should)
+ kw_level.add(self.make_term_query(tokens, joined=True), Should)
+
+ top_level.add(BooleanClause(phrase_level, Should))
+ top_level.add(BooleanClause(kw_level, Should))
+
+ tops = self.searcher.search(top_level, max_results)
+ bks = []
+ for found in tops.scoreDocs:
+ doc = self.searcher.doc(found.doc)
+ bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
return (bks, tops.totalHits)