X-Git-Url: https://git.mdrn.pl/wolnelektury.git/blobdiff_plain/756585089fb09cd0e25856b55510e69a52112e5f..0f1d9c82677e949f5333447343ae72f45d4841be:/apps/search/index.py?ds=sidebyside diff --git a/apps/search/index.py b/apps/search/index.py index af3dd9a60..85bcb616d 100644 --- a/apps/search/index.py +++ b/apps/search/index.py @@ -1,105 +1,268 @@ # -*- coding: utf-8 -*- from django.conf import settings -from lucene import SimpleFSDirectory, IndexWriter, File, Field, \ - NumericField, Version, Document, JavaError, IndexSearcher, \ - QueryParser, PerFieldAnalyzerWrapper, \ - SimpleAnalyzer, PolishAnalyzer, ArrayList, \ - KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \ - BlockJoinQuery, BlockJoinCollector, TermsFilter, \ - HashSet, BooleanClause, Term, CharTermAttribute, \ - PhraseQuery, MultiPhraseQuery, StringReader, TermQuery, BlockJoinQuery, \ - FuzzyQuery, FuzzyTermEnum, Sort, Integer, \ - SimpleHTMLFormatter, Highlighter, QueryScorer, TokenSources, TextFragment, \ - initVM, CLASSPATH, JArray - # KeywordAnalyzer -JVM = initVM(CLASSPATH) -import sys + import os +import re import errno from librarian import dcparser from librarian.parser import WLDocument +from lxml import etree import catalogue.models -from multiprocessing.pool import ThreadPool -from threading import current_thread -import atexit +from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook +from itertools import chain import traceback +import logging +log = logging.getLogger('search') +import sunburnt +import custom +import operator +log = logging.getLogger('search') -class WLAnalyzer(PerFieldAnalyzerWrapper): - def __init__(self): - polish = PolishAnalyzer(Version.LUCENE_34) - simple = SimpleAnalyzer(Version.LUCENE_34) - keyword = KeywordAnalyzer(Version.LUCENE_34) - # not sure if needed: there's NOT_ANALYZED meaning basically the same - - PerFieldAnalyzerWrapper.__init__(self, polish) - - self.addAnalyzer("tags", simple) - self.addAnalyzer("technical_editors", simple) - self.addAnalyzer("editors", simple) - self.addAnalyzer("url", keyword) - self.addAnalyzer("source_url", keyword) - self.addAnalyzer("source_name", simple) - self.addAnalyzer("publisher", simple) - self.addAnalyzer("author", simple) - self.addAnalyzer("is_book", keyword) - - self.addAnalyzer("KEYWORD", keyword) - self.addAnalyzer("SIMPLE", simple) - self.addAnalyzer("NATURAL", polish) +class SolrIndex(object): + def __init__(self, mode=None): + self.index = custom.CustomSolrInterface(settings.SOLR, mode=mode) -class IndexStore(object): - def __init__(self): - self.make_index_dir() - self.store = SimpleFSDirectory(File(settings.SEARCH_INDEX)) +class Snippets(object): + """ + This class manages snippet files for indexed object (book) + the snippets are concatenated together, and their positions and + lengths are kept in lucene index fields. + """ + SNIPPET_DIR = "snippets" - def make_index_dir(self): + def __init__(self, book_id, revision=None): try: - os.makedirs(settings.SEARCH_INDEX) + os.makedirs(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR)) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise + self.book_id = book_id + self.revision = revision + self.file = None + @property + def path(self): + if self.revision: fn = "%d.%d" % (self.book_id, self.revision) + else: fn = "%d" % self.book_id -class Index(IndexStore): - def __init__(self, analyzer=None): - IndexStore.__init__(self) - self.index = None - if not analyzer: - analyzer = WLAnalyzer() - self.analyzer = analyzer + return os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR, fn) - def open(self, analyzer=None): - if self.index: - raise Exception("Index is already opened") - self.index = IndexWriter(self.store, self.analyzer,\ - IndexWriter.MaxFieldLength.LIMITED) - return self.index + def open(self, mode='r'): + """ + Open the snippet file. Call .close() afterwards. + """ + if not 'b' in mode: + mode += 'b' + + if 'w' in mode: + if os.path.exists(self.path): + self.revision = 1 + while True: + if not os.path.exists(self.path): + break + self.revision += 1 + + self.file = open(self.path, mode) + self.position = 0 + return self + + def add(self, snippet): + """ + Append a snippet (unicode) to the snippet file. + Return a (position, length) tuple + """ + txt = snippet.encode('utf-8') + l = len(txt) + self.file.write(txt) + pos = (self.position, l) + self.position += l + return pos + + def get(self, pos): + """ + Given a tuple of (position, length) return an unicode + of the snippet stored there. + """ + self.file.seek(pos[0], 0) + txt = self.file.read(pos[1]).decode('utf-8') + return txt def close(self): - self.index.optimize() - self.index.close() - self.index = None + """Close snippet file""" + if self.file: + self.file.close() + + def remove(self): + self.revision = None + try: + os.unlink(self.path) + self.revision = 0 + while True: + self.revision += 1 + os.unlink(self.path) + except OSError: + pass - def remove_book(self, book): - q = NumericRangeQuery.newIntRange("book_id", book.id, book.id, True, True) - self.index.deleteDocuments(q) - def index_book(self, book, overwrite=True): +class Index(SolrIndex): + """ + Class indexing books. + """ + def __init__(self): + super(Index, self).__init__(mode='rw') + + def delete_query(self, *queries): + """ + index.delete(queries=...) doesn't work, so let's reimplement it + using deletion of list of uids. + """ + uids = set() + for q in queries: + if isinstance(q, sunburnt.search.LuceneQuery): + q = self.index.query(q) + q.field_limiter.update(['uid']) + st = 0 + rows = 100 + while True: + ids = q.paginate(start=st, rows=rows).execute() + if not len(ids): + break + for res in ids: + uids.add(res['uid']) + st += rows + if uids: + self.index.delete(uids) + return True + else: + return False + + def index_tags(self, *tags, **kw): + """ + Re-index global tag list. + Removes all tags from index, then index them again. + Indexed fields include: id, name (with and without polish stems), category + """ + log.debug("Indexing tags") + remove_only = kw.get('remove_only', False) + # first, remove tags from index. + if tags: + tag_qs = [] + for tag in tags: + q_id = self.index.Q(tag_id=tag.id) + + if isinstance(tag, PDCounterAuthor): + q_cat = self.index.Q(tag_category='pd_author') + elif isinstance(tag, PDCounterBook): + q_cat = self.index.Q(tag_category='pd_book') + else: + q_cat = self.index.Q(tag_category=tag.category) + + q_id_cat = self.index.Q(q_id & q_cat) + tag_qs.append(q_id_cat) + self.delete_query(tag_qs) + else: # all + q = self.index.Q(tag_id__any=True) + self.delete_query(q) + + if not remove_only: + # then add them [all or just one passed] + if not tags: + tags = chain(catalogue.models.Tag.objects.exclude(category='set'), \ + PDCounterAuthor.objects.all(), \ + PDCounterBook.objects.all()) + + for tag in tags: + if isinstance(tag, PDCounterAuthor): + doc = { + "tag_id": int(tag.id), + "tag_name": tag.name, + "tag_name_pl": tag.name, + "tag_category": 'pd_author', + "is_pdcounter": True, + "uid": "tag%d_pd_a" % tag.id + } + elif isinstance(tag, PDCounterBook): + doc = { + "tag_id": int(tag.id), + "tag_name": tag.title, + "tag_name_pl": tag.title, + "tag_category": 'pd_book', + "is_pdcounter": True, + "uid": "tag%d_pd_b" % tag.id + } + else: + doc = { + "tag_id": int(tag.id), + "tag_name": tag.name, + "tag_name_pl": tag.name, + "tag_category": tag.category, + "is_pdcounter": False, + "uid": "tag%d" % tag.id + } + self.index.add(doc) + + def create_book_doc(self, book): + """ + Create a lucene document referring book id. + """ + doc = { + 'book_id': int(book.id), + } + if book.parent is not None: + doc["parent_id"] = int(book.parent.id) + return doc + + def remove_book(self, book_or_id, remove_snippets=True): + """Removes a book from search index. + book - Book instance.""" + if isinstance(book_or_id, catalogue.models.Book): + book_id = book_or_id.id + else: + book_id = book_or_id + + self.delete_query(self.index.Q(book_id=book_id)) + + if remove_snippets: + snippets = Snippets(book_id) + snippets.remove() + + def index_book(self, book, book_info=None, overwrite=True): + """ + Indexes the book. + Creates a lucene document for extracted metadata + and calls self.index_content() to index the contents of the book. + """ if overwrite: - self.remove_book(book) + # we don't remove snippets, since they might be still needed by + # threads using not reopened index + self.remove_book(book, remove_snippets=False) + + book_doc = self.create_book_doc(book) + meta_fields = self.extract_metadata(book, book_info, dc_only=['source_name', 'authors', 'translators', 'title']) + # let's not index it - it's only used for extracting publish date + if 'source_name' in meta_fields: + del meta_fields['source_name'] + + for n, f in meta_fields.items(): + book_doc[n] = f - doc = self.extract_metadata(book) - parts = self.extract_content(book) - block = ArrayList().of_(Document) + book_doc['uid'] = "book%s" % book_doc['book_id'] + self.index.add(book_doc) + del book_doc + book_fields = { + 'title': meta_fields['title'], + 'authors': meta_fields['authors'], + 'published_date': meta_fields['published_date'] + } - for p in parts: - block.add(p) - block.add(doc) - self.index.addDocuments(block) + if 'translators' in meta_fields: + book_fields['translators'] = meta_fields['translators'] + + self.index_content(book, book_fields=book_fields) master_tags = [ 'opowiadanie', @@ -107,33 +270,39 @@ class Index(IndexStore): 'dramat_wierszowany_l', 'dramat_wierszowany_lp', 'dramat_wspolczesny', 'liryka_l', 'liryka_lp', - 'wywiad' + 'wywiad', ] - skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne'] + ignore_content_tags = [ + 'uwaga', 'extra', + 'zastepnik_tekstu', 'sekcja_asterysk', 'separator_linia', 'zastepnik_wersu', + 'didaskalia', + 'naglowek_aktu', 'naglowek_sceny', 'naglowek_czesc', + ] - def create_book_doc(self, book): + footnote_tags = ['pa', 'pt', 'pr', 'pe'] + + skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne', '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF'] + + published_date_re = re.compile("([0-9]+)[\]. ]*$") + + def extract_metadata(self, book, book_info=None, dc_only=None): """ - Create a lucene document connected to the book + Extract metadata from book and returns a map of fields keyed by fieldname """ - doc = Document() - doc.add(NumericField("book_id", Field.Store.YES, True).setIntValue(book.id)) - if book.parent is not None: - doc.add(NumericField("parent_id", Field.Store.YES, True).setIntValue(book.parent.id)) - return doc + fields = {} - def extract_metadata(self, book): - book_info = dcparser.parse(book.xml_file) + if book_info is None: + book_info = dcparser.parse(open(book.xml_file.path)) - print("extract metadata for book %s id=%d, thread%d" % (book.slug, book.id, current_thread().ident)) - - doc = self.create_book_doc(book) - doc.add(Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS)) - doc.add(Field("tags", ','.join([t.name for t in book.tags]), Field.Store.NO, Field.Index.ANALYZED)) - doc.add(Field("is_book", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED)) + fields['slug'] = book.slug + fields['tags'] = [t.name for t in book.tags] + fields['is_book'] = True # validator, name for field in dcparser.BookInfo.FIELDS: + if dc_only and field.name not in dc_only: + continue if hasattr(book_info, field.name): if not getattr(book_info, field.name): continue @@ -143,312 +312,376 @@ class Index(IndexStore): s = getattr(book_info, field.name) if field.multiple: s = ', '.join(s) - try: - doc.add(Field(field.name, s, Field.Store.NO, Field.Index.ANALYZED)) - except JavaError as je: - raise Exception("failed to add field: %s = '%s', %s(%s)" % (field.name, s, je.message, je.args)) + fields[field.name] = s elif type_indicator == dcparser.as_person: p = getattr(book_info, field.name) if isinstance(p, dcparser.Person): persons = unicode(p) else: persons = ', '.join(map(unicode, p)) - doc.add(Field(field.name, persons, Field.Store.NO, Field.Index.ANALYZED)) + fields[field.name] = persons elif type_indicator == dcparser.as_date: dt = getattr(book_info, field.name) - doc.add(Field(field.name, "%04d%02d%02d" % (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED)) - return doc + fields[field.name] = dt + + # get published date + pd = None + if hasattr(book_info, 'source_name') and book_info.source_name: + match = self.published_date_re.search(book_info.source_name) + if match is not None: + pd = str(match.groups()[0]) + if not pd: pd = "" + fields["published_date"] = pd + + return fields + + # def add_gaps(self, fields, fieldname): + # """ + # Interposes a list of fields with gap-fields, which are indexed spaces and returns it. + # This allows for doing phrase queries which do not overlap the gaps (when slop is 0). + # """ + # def gap(): + # while True: + # yield Field(fieldname, ' ', Field.Store.NO, Field.Index.NOT_ANALYZED) + # return reduce(lambda a, b: a + b, zip(fields, gap()))[0:-1] def get_master(self, root): + """ + Returns the first master tag from an etree. + """ for master in root.iter(): if master.tag in self.master_tags: return master - def extract_content(self, book): - wld = WLDocument.from_file(book.xml_file.path) + def index_content(self, book, book_fields={}): + """ + Walks the book XML and extract content from it. + Adds parts for each header tag and for each fragment. + """ + wld = WLDocument.from_file(book.xml_file.path, parse_dublincore=False) root = wld.edoc.getroot() - # first we build a sequence of top-level items. - # book_id - # header_index - the 0-indexed position of header element. - # content master = self.get_master(root) if master is None: return [] - header_docs = [] - for header, position in zip(list(master), range(len(master))): - if header.tag in self.skip_header_tags: - continue - doc = self.create_book_doc(book) - doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position)) - doc.add(Field("header_type", header.tag, Field.Store.YES, Field.Index.NOT_ANALYZED)) - content = u' '.join([t for t in header.itertext()]) - doc.add(Field("content", content, Field.Store.YES, Field.Index.ANALYZED)) - header_docs.append(doc) - - def walker(node): - yield node, None - for child in list(node): - for b, e in walker(child): - yield b, e - yield None, node - return - - # Then we create a document for each fragments - # fragment_anchor - the anchor - # themes - list of themes [not indexed] - fragment_docs = [] - # will contain (framgent id -> { content: [], themes: [] } - fragments = {} - for start, end in walker(master): - if start is not None and start.tag == 'begin': - fid = start.attrib['id'][1:] - fragments[fid] = {'content': [], 'themes': []} - fragments[fid]['content'].append(start.tail) - elif start is not None and start.tag == 'motyw': - fid = start.attrib['id'][1:] - fragments[fid]['themes'].append(start.text) - fragments[fid]['content'].append(start.tail) - elif start is not None and start.tag == 'end': - fid = start.attrib['id'][1:] - if fid not in fragments: - continue # a broken node, skip it - frag = fragments[fid] - del fragments[fid] - - def jstr(l): - return u' '.join(map( - lambda x: x == None and u'(none)' or unicode(x), - l)) - - doc = self.create_book_doc(book) - doc.add(Field("fragment_anchor", fid, - Field.Store.YES, Field.Index.NOT_ANALYZED)) - doc.add(Field("content", - u' '.join(filter(lambda s: s is not None, frag['content'])), - Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)) - doc.add(Field("themes", - u' '.join(filter(lambda s: s is not None, frag['themes'])), - Field.Store.NO, Field.Index.ANALYZED)) - - fragment_docs.append(doc) - elif start is not None: - for frag in fragments.values(): - frag['content'].append(start.text) - elif end is not None: - for frag in fragments.values(): - frag['content'].append(end.tail) - - return header_docs + fragment_docs - - def __enter__(self): - self.open() - return self - - def __exit__(self, type, value, tb): - self.close() - - -def log_exception_wrapper(f): - def _wrap(*a): - try: - f(*a) - except Exception, e: - print("Error in indexing thread: %s" % e) - traceback.print_exc() - raise e - return _wrap - - -class ReusableIndex(Index): - """ - Works like index, but does not close/optimize Lucene index - until program exit (uses atexit hook). - This is usefull for importbooks command. - - if you cannot rely on atexit, use ReusableIndex.close_reusable() yourself. - """ - index = None - pool = None - pool_jobs = None - - def open(self, analyzer=None, threads=4): - if ReusableIndex.index is not None: - self.index = ReusableIndex.index - else: - print("opening index") - ReusableIndex.pool = ThreadPool(threads, initializer=lambda: JVM.attachCurrentThread() ) - ReusableIndex.pool_jobs = [] - Index.open(self, analyzer) - ReusableIndex.index = self.index - atexit.register(ReusableIndex.close_reusable) - - def index_book(self, *args, **kw): - job = ReusableIndex.pool.apply_async(log_exception_wrapper(Index.index_book), (self,) + args, kw) - ReusableIndex.pool_jobs.append(job) - - @staticmethod - def close_reusable(): - if ReusableIndex.index is not None: - print("wait for indexing to finish") - for job in ReusableIndex.pool_jobs: - job.get() - sys.stdout.write('.') - sys.stdout.flush() - print("done.") - ReusableIndex.pool.close() - - ReusableIndex.index.optimize() - ReusableIndex.index.close() - ReusableIndex.index = None - - def close(self): - pass + def walker(node, ignore_tags=[]): + if node.tag not in ignore_tags: + yield node, None, None + if node.text is not None: + yield None, node.text, None + for child in list(node): + for b, t, e in walker(child): + yield b, t, e + yield None, None, node -class Search(IndexStore): - def __init__(self, default_field="content"): - IndexStore.__init__(self) - self.analyzer = WLAnalyzer() #PolishAnalyzer(Version.LUCENE_34) - ## self.analyzer = WLAnalyzer() - self.searcher = IndexSearcher(self.store, True) - self.parser = QueryParser(Version.LUCENE_34, default_field, - self.analyzer) + if node.tail is not None: + yield None, node.tail, None + return - self.parent_filter = TermsFilter() - self.parent_filter.addTerm(Term("is_book", "true")) + def fix_format(text): + # separator = [u" ", u"\t", u".", u";", u","] + if isinstance(text, list): + # need to join it first + text = filter(lambda s: s is not None, content) + text = u' '.join(text) + # for i in range(len(text)): + # if i > 0: + # if text[i][0] not in separator\ + # and text[i - 1][-1] not in separator: + # text.insert(i, u" ") + + return re.sub("(?m)/$", "", text) + + def add_part(snippets, **fields): + doc = self.create_book_doc(book) + for n, v in book_fields.items(): + doc[n] = v - def query(self, query): - return self.parser.parse(query) + doc['header_index'] = fields["header_index"] + doc['header_span'] = 'header_span' in fields and fields['header_span'] or 1 + doc['header_type'] = fields['header_type'] - def wrapjoins(self, query, fields=[]): - """ - This functions modifies the query in a recursive way, - so Term and Phrase Queries contained, which match - provided fields are wrapped in a BlockJoinQuery, - and so delegated to children documents. - """ - if BooleanQuery.instance_(query): - qs = BooleanQuery.cast_(query) - for clause in qs: - clause = BooleanClause.cast_(clause) - clause.setQuery(self.wrapjoins(clause.getQuery(), fields)) - return qs - else: - termset = HashSet() - query.extractTerms(termset) - for t in termset: - t = Term.cast_(t) - if t.field() not in fields: - return query - return BlockJoinQuery(query, self.parent_filter, - BlockJoinQuery.ScoreMode.Total) + doc['text'] = fields['text'] - def simple_search(self, query, max_results=50): - """Returns (books, total_hits) - """ + # snippets + snip_pos = snippets.add(fields["text"]) - tops = self.searcher.search(self.query(query), max_results) - bks = [] - for found in tops.scoreDocs: - doc = self.searcher.doc(found.doc) - bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) - return (bks, tops.totalHits) + doc['snippets_position'] = snip_pos[0] + doc['snippets_length'] = snip_pos[1] + if snippets.revision: + doc["snippets_revision"] = snippets.revision - def search(self, query, max_results=50): - query = self.query(query) - query = self.wrapjoins(query, ["content", "themes"]) + if 'fragment_anchor' in fields: + doc["fragment_anchor"] = fields['fragment_anchor'] - tops = self.searcher.search(query, max_results) - bks = [] - for found in tops.scoreDocs: - doc = self.searcher.doc(found.doc) - bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) - return (bks, tops.totalHits) + if 'themes' in fields: + doc['themes'] = fields['themes'] + doc['uid'] = "part%s%s%s" % (doc['header_index'], + doc['header_span'], + doc.get('fragment_anchor', '')) + return doc - def bsearch(self, query, max_results=50): - q = self.query(query) - bjq = BlockJoinQuery(q, self.parent_filter, BlockJoinQuery.ScoreMode.Avg) + def give_me_utf8(s): + if isinstance(s, unicode): + return s.encode('utf-8') + else: + return s - tops = self.searcher.search(bjq, max_results) - bks = [] - for found in tops.scoreDocs: - doc = self.searcher.doc(found.doc) - bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) - return (bks, tops.totalHits) + fragments = {} + snippets = Snippets(book.id).open('w') + try: + for header, position in zip(list(master), range(len(master))): -# TokenStream tokenStream = analyzer.tokenStream(fieldName, reader); -# OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class); -# CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class); + if header.tag in self.skip_header_tags: + continue + if header.tag is etree.Comment: + continue -# while (tokenStream.incrementToken()) { -# int startOffset = offsetAttribute.startOffset(); -# int endOffset = offsetAttribute.endOffset(); -# String term = charTermAttribute.toString(); -# } + # section content + content = [] + footnote = [] + + def all_content(text): + for frag in fragments.values(): + frag['text'].append(text) + content.append(text) + handle_text = [all_content] + + for start, text, end in walker(header, ignore_tags=self.ignore_content_tags): + # handle footnotes + if start is not None and start.tag in self.footnote_tags: + footnote = [] + + def collect_footnote(t): + footnote.append(t) + + handle_text.append(collect_footnote) + elif end is not None and footnote is not [] and end.tag in self.footnote_tags: + handle_text.pop() + doc = add_part(snippets, header_index=position, header_type=header.tag, + text=u''.join(footnote), + is_footnote=True) + self.index.add(doc) + footnote = [] + + # handle fragments and themes. + if start is not None and start.tag == 'begin': + fid = start.attrib['id'][1:] + fragments[fid] = {'text': [], 'themes': [], 'start_section': position, 'start_header': header.tag} + + # themes for this fragment + elif start is not None and start.tag == 'motyw': + fid = start.attrib['id'][1:] + handle_text.append(None) + if start.text is not None: + fragments[fid]['themes'] += map(unicode.strip, map(unicode, (start.text.split(',')))) + elif end is not None and end.tag == 'motyw': + handle_text.pop() + + elif start is not None and start.tag == 'end': + fid = start.attrib['id'][1:] + if fid not in fragments: + continue # a broken node, skip it + frag = fragments[fid] + if frag['themes'] == []: + continue # empty themes list. + del fragments[fid] + + doc = add_part(snippets, + header_type=frag['start_header'], + header_index=frag['start_section'], + header_span=position - frag['start_section'] + 1, + fragment_anchor=fid, + text=fix_format(frag['text']), + themes=frag['themes']) + self.index.add(doc) + + # Collect content. + + if text is not None and handle_text is not []: + hdl = handle_text[-1] + if hdl is not None: + hdl(text) + + # in the end, add a section text. + doc = add_part(snippets, header_index=position, + header_type=header.tag, text=fix_format(content)) + + self.index.add(doc) + + finally: + snippets.close() class SearchResult(object): - def __init__(self, searcher, scoreDocs, score=None, highlight_query=None): - if score: - self.score = score + def __init__(self, doc, how_found=None, query=None, query_terms=None): + # self.search = search + self.boost = 1.0 + self._hits = [] + self._processed_hits = None # processed hits + self.snippets = [] + self.query_terms = query_terms + + if 'score' in doc: + self._score = doc['score'] else: - self.score = scoreDocs.score + self._score = 0 - self.fragments = [] - self.scores = {} - self.sections = [] + self.book_id = int(doc["book_id"]) - stored = searcher.doc(scoreDocs.doc) - self.book_id = int(stored.get("book_id")) + try: + self.published_date = int(doc.get("published_date")) + except ValueError: + self.published_date = 0 + + # content hits + header_type = doc.get("header_type", None) + # we have a content hit in some header of fragment + if header_type is not None: + sec = (header_type, int(doc["header_index"])) + header_span = doc['header_span'] + header_span = header_span is not None and int(header_span) or 1 + fragment = doc.get("fragment_anchor", None) + snippets_pos = (doc['snippets_position'], doc['snippets_length']) + snippets_rev = doc.get('snippets_revision', None) + + hit = (sec + (header_span,), fragment, self._score, { + 'how_found': how_found, + 'snippets_pos': snippets_pos, + 'snippets_revision': snippets_rev, + 'themes': doc.get('themes', []), + 'themes_pl': doc.get('themes_pl', []) + }) + + self._hits.append(hit) - fragment = stored.get("fragment_anchor") - if fragment: - self.fragments.append(fragment) - self.scores[fragment] = scoreDocs.score + def __unicode__(self): + return u"" % \ + (self.book_id, len(self._hits), self._processed_hits and len(self._processed_hits) or -1, self._score, len(self.snippets)) - header_type = stored.get("header_type") - if header_type: - sec = (header_type, int(stored.get("header_index"))) - self.sections.append(sec) - self.scores[sec] = scoreDocs.score + def __str__(self): + return unicode(self).encode('utf-8') - self.snippets = [] + @property + def score(self): + return self._score * self.boost - def add_snippets(self, snippets): - self.snippets += snippets + def merge(self, other): + if self.book_id != other.book_id: + raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id)) + self._hits += other._hits + if other.score > self.score: + self._score = other._score return self def get_book(self): - return catalogue.models.Book.objects.get(id=self.book_id) + if hasattr(self, '_book'): + return self._book + self._book = catalogue.models.Book.objects.get(id=self.book_id) + return self._book book = property(get_book) - def get_parts(self): - book = self.book - parts = [{"header": s[0], "position": s[1], '_score_key': s} for s in self.sections] \ - + [{"fragment": book.fragments.get(anchor=f), '_score_key':f} for f in self.fragments] + POSITION = 0 + FRAGMENT = 1 + POSITION_INDEX = 1 + POSITION_SPAN = 2 + SCORE = 2 + OTHER = 3 + + @property + def hits(self): + if self._processed_hits is not None: + return self._processed_hits + + # to sections and fragments + frags = filter(lambda r: r[self.FRAGMENT] is not None, self._hits) + + sect = filter(lambda r: r[self.FRAGMENT] is None, self._hits) + + # sections not covered by fragments + sect = filter(lambda s: 0 == len(filter( + lambda f: s[self.POSITION][self.POSITION_INDEX] >= f[self.POSITION][self.POSITION_INDEX] + and s[self.POSITION][self.POSITION_INDEX] < f[self.POSITION][self.POSITION_INDEX] + f[self.POSITION][self.POSITION_SPAN], + frags)), sect) + + hits = [] + + def remove_duplicates(lst, keyfn, compare): + els = {} + for e in lst: + eif = keyfn(e) + if eif in els: + if compare(els[eif], e) >= 1: + continue + els[eif] = e + return els.values() + + # remove fragments with duplicated fid's and duplicated snippets + frags = remove_duplicates(frags, lambda f: f[self.FRAGMENT], lambda a, b: cmp(a[self.SCORE], b[self.SCORE])) + # frags = remove_duplicates(frags, lambda f: f[OTHER]['snippet_pos'] and f[OTHER]['snippet_pos'] or f[FRAGMENT], + # lambda a, b: cmp(a[SCORE], b[SCORE])) + + # remove duplicate sections + sections = {} + + for s in sect: + si = s[self.POSITION][self.POSITION_INDEX] + # skip existing + if si in sections: + if sections[si]['score'] >= s[self.SCORE]: + continue - parts.sort(lambda a, b: cmp(self.scores[a['_score_key']], self.scores[b['_score_key']])) - print("bookid: %d parts: %s" % (self.book_id, parts)) - return parts + m = {'score': s[self.SCORE], + 'section_number': s[self.POSITION][self.POSITION_INDEX] + 1, + } + m.update(s[self.OTHER]) + sections[si] = m - parts = property(get_parts) + hits = sections.values() - def merge(self, other): - if self.book_id != other.book_id: - raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id)) - self.fragments += other.fragments - self.sections += other.sections - self.snippets += other.snippets - self.scores.update(other.scores) - if other.score > self.score: - self.score = other.score - return self + for f in frags: + try: + frag = catalogue.models.Fragment.objects.get(anchor=f[self.FRAGMENT], book__id=self.book_id) + except catalogue.models.Fragment.DoesNotExist: + # stale index + continue + # Figure out if we were searching for a token matching some word in theme name. + themes = frag.tags.filter(category='theme') + themes_hit = set() + if self.query_terms is not None: + for i in range(0, len(f[self.OTHER]['themes'])): + tms = f[self.OTHER]['themes'][i].split(r' +') + f[self.OTHER]['themes_pl'][i].split(' ') + tms = map(unicode.lower, tms) + for qt in self.query_terms: + if qt in tms: + themes_hit.add(f[self.OTHER]['themes'][i]) + break + + def theme_by_name(n): + th = filter(lambda t: t.name == n, themes) + if th: + return th[0] + else: + return None + themes_hit = filter(lambda a: a is not None, map(theme_by_name, themes_hit)) - def __unicode__(self): - return u'SearchResult(book_id=%d, score=%d)' % (self.book_id, self.score) + m = {'score': f[self.SCORE], + 'fragment': frag, + 'section_number': f[self.POSITION][self.POSITION_INDEX] + 1, + 'themes': themes, + 'themes_hit': themes_hit + } + m.update(f[self.OTHER]) + hits.append(m) + + hits.sort(lambda a, b: cmp(a['score'], b['score']), reverse=True) + + self._processed_hits = hits + + return hits @staticmethod def aggregate(*result_lists): @@ -457,211 +690,251 @@ class SearchResult(object): for r in rl: if r.book_id in books: books[r.book_id].merge(r) - #print(u"already have one with score %f, and this one has score %f" % (books[book.id][0], found.score)) else: books[r.book_id] = r return books.values() def __cmp__(self, other): - return cmp(self.score, other.score) - - -class MultiSearch(Search): - """Class capable of IMDb-like searching""" - def get_tokens(self, searched, field='content'): - """returns tokens analyzed by a proper (for a field) analyzer - argument can be: StringReader, string/unicode, or tokens. In the last case - they will just be returned (so we can reuse tokens, if we don't change the analyzer) - """ - if isinstance(searched, str) or isinstance(searched, unicode): - searched = StringReader(searched) - elif isinstance(searched, list): - return searched - - searched.reset() - tokens = self.analyzer.reusableTokenStream(field, searched) - toks = [] - while tokens.incrementToken(): - cta = tokens.getAttribute(CharTermAttribute.class_) - toks.append(cta.toString()) - return toks - - def fuzziness(self, fuzzy): - if not fuzzy: - return None - if isinstance(fuzzy, float) and fuzzy > 0.0 and fuzzy <= 1.0: - return fuzzy + c = cmp(self.score, other.score) + if c == 0: + # this is inverted, because earlier date is better + return cmp(other.published_date, self.published_date) else: - return 0.5 + return c - def make_phrase(self, tokens, field='content', slop=2, fuzzy=False): - if fuzzy: - phrase = MultiPhraseQuery() - for t in tokens: - term = Term(field, t) - fuzzterm = FuzzyTermEnum(self.searcher.getIndexReader(), term, self.fuzziness(fuzzy)) - fuzzterms = [] + def __len__(self): + return len(self.hits) + + def snippet_pos(self, idx=0): + return self.hits[idx]['snippets_pos'] + + def snippet_revision(self, idx=0): + try: + return self.hits[idx]['snippets_revision'] + except: + return None + + +class Search(SolrIndex): + """ + Search facilities. + """ + def __init__(self, default_field="text"): + super(Search, self).__init__(mode='r') + + + def make_term_query(self, query, field='text', modal=operator.or_): + """ + Returns term queries joined by boolean query. + modal - applies to boolean query + fuzzy - should the query by fuzzy. + """ + if query is None: query = '' + q = self.index.Q() + q = reduce(modal, map(lambda s: self.index.Q(**{field: s}), + query.split(r" ")), q) - while True: - # print("fuzz %s" % unicode(fuzzterm.term()).encode('utf-8')) - ft = fuzzterm.term() - if ft: - fuzzterms.append(ft) - if not fuzzterm.next(): break - if fuzzterms: - phrase.add(JArray('object')(fuzzterms, Term)) - else: - phrase.add(term) - else: - phrase = PhraseQuery() - phrase.setSlop(slop) - for t in tokens: - term = Term(field, t) - phrase.add(term) - return phrase - - def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD, fuzzy=False): - q = BooleanQuery() - for t in tokens: - term = Term(field, t) - if fuzzy: - term = FuzzyQuery(term, self.fuzziness(fuzzy)) - else: - term = TermQuery(term) - q.add(BooleanClause(term, modal)) return q - def content_query(self, query): - return BlockJoinQuery(query, self.parent_filter, - BlockJoinQuery.ScoreMode.Total) + def search_phrase(self, searched, field='text', book=False, + filters=None, + snippets=False): + if filters is None: filters = [] + if book: filters.append(self.index.Q(is_book=True)) - def search_perfect_book(self, searched, max_results=20, fuzzy=False): - qrys = [self.make_phrase(self.get_tokens(searched, field=fld), field=fld, fuzzy=fuzzy) for fld in ['author', 'title']] + q = self.index.query(**{field: searched}) + q = self.apply_filters(q, filters).field_limit(score=True, all_fields=True) + res = q.execute() + return [SearchResult(found, how_found=u'search_phrase') for found in res] - books = [] - for q in qrys: - top = self.searcher.search(q, max_results) - for found in top.scoreDocs: - books.append(SearchResult(self.searcher, found)) - return books + def search_some(self, searched, fields, book=True, + filters=None, snippets=True, query_terms=None): + assert isinstance(fields, list) + if filters is None: filters = [] + if book: filters.append(self.index.Q(is_book=True)) - def search_perfect_parts(self, searched, max_results=20, fuzzy=False): - qrys = [self.make_phrase(self.get_tokens(searched), field=fld, fuzzy=fuzzy) for fld in ['content']] + query = self.index.Q() - books = [] - for q in qrys: - top = self.searcher.search(q, max_results) - for found in top.scoreDocs: - books.append(SearchResult(self.searcher, found).add_snippets(self.get_snippets(found, q))) + for fld in fields: + query = self.index.Q(query | self.make_term_query(searched, fld)) - return books + query = self.index.query(query) + query = self.apply_filters(query, filters).field_limit(score=True, all_fields=True) + res = query.execute() + return [SearchResult(found, how_found='search_some', query_terms=query_terms) for found in res] - def search_everywhere(self, searched, max_results=20, fuzzy=False): - books = [] + def search_everywhere(self, searched, query_terms=None): + """ + Tries to use search terms to match different fields of book (or its parts). + E.g. one word can be an author survey, another be a part of the title, and the rest + are some words from third chapter. + """ + books = [] # content only query : themes x content - q = BooleanQuery() - - tokens = self.get_tokens(searched) - q.add(BooleanClause(self.make_term_query(tokens, field='themes', fuzzy=fuzzy), BooleanClause.Occur.MUST)) - q.add(BooleanClause(self.make_term_query(tokens, field='content', fuzzy=fuzzy), BooleanClause.Occur.SHOULD)) + q = self.make_term_query(searched, 'text') + q_themes = self.make_term_query(searched, 'themes_pl') - topDocs = self.searcher.search(q, max_results) - for found in topDocs.scoreDocs: - books.append(SearchResult(self.searcher, found)) + query = self.index.query(q).query(q_themes).field_limit(score=True, all_fields=True) + res = query.execute() - # joined query themes/content x author/title/epochs/genres/kinds - q = BooleanQuery() - in_meta = BooleanQuery() - in_content = BooleanQuery() + for found in res: + books.append(SearchResult(found, how_found='search_everywhere_themesXcontent', query_terms=query_terms)) - for fld in ['themes', 'content']: - in_content.add(BooleanClause(self.make_term_query(tokens, field=fld, fuzzy=False), BooleanClause.Occur.SHOULD)) + # query themes/content x author/title/tags + in_content = self.index.Q() + in_meta = self.index.Q() - in_meta.add(BooleanClause(self.make_term_query(self.get_tokens(searched, field='author'), field='author', fuzzy=fuzzy), BooleanClause.Occur.SHOULD)) + for fld in ['themes_pl', 'text']: + in_content |= self.make_term_query(searched, field=fld) - for fld in ['title', 'epochs', 'genres', 'kinds']: - in_meta.add(BooleanClause(self.make_term_query(tokens, field=fld, fuzzy=fuzzy), BooleanClause.Occur.SHOULD)) + for fld in ['tags', 'authors', 'title']: + in_meta |= self.make_term_query(searched, field=fld) - q.add(BooleanClause(in_meta, BooleanClause.Occur.MUST)) - in_content_join = self.content_query(in_content) - q.add(BooleanClause(in_content_join, BooleanClause.Occur.MUST)) - # import pdb; pdb.set_trace() - collector = BlockJoinCollector(Sort.RELEVANCE, 100, True, True) + q = in_content & in_meta + res = self.index.query(q).field_limit(score=True, all_fields=True).execute() - self.searcher.search(q, collector) + for found in res: + books.append(SearchResult(found, how_found='search_everywhere', query_terms=query_terms)) - top_groups = collector.getTopGroups(in_content_join, Sort.RELEVANCE, 0, max_results, 0, True) - if top_groups: - for grp in top_groups.groups: - for part in grp.scoreDocs: - books.append(SearchResult(self.searcher, part, score=grp.maxScore)) return books - def multisearch(self, query, max_results=50): + def get_snippets(self, searchresult, query, field='text', num=1): """ - Search strategy: - - (phrase) OR -> content - -> title - -> author - - (keywords) -> author - -> motyw - -> tags - -> content + Returns a snippet for found scoreDoc. """ - # queryreader = StringReader(query) - # tokens = self.get_tokens(queryreader) + maxnum = len(searchresult) + if num is None or num < 0 or num > maxnum: + num = maxnum + book_id = searchresult.book_id + revision = searchresult.snippet_revision() + snippets = Snippets(book_id, revision=revision) + snips = [None] * maxnum + try: + snippets.open() + idx = 0 + while idx < maxnum and num > 0: + position, length = searchresult.snippet_pos(idx) + if position is None or length is None: + continue + text = snippets.get((int(position), + int(length))) + snip = self.index.highlight(text=text, field=field, q=query) + snips[idx] = snip + if snip: + num -= 1 + idx += 1 + + except IOError, e: + log.error("Cannot open snippet file for book id = %d [rev=%s], %s" % (book_id, revision, e)) + return [] + finally: + snippets.close() - # top_level = BooleanQuery() - # Should = BooleanClause.Occur.SHOULD + # remove verse end markers.. + snips = map(lambda s: s and s.replace("/\n", "\n"), snips) - # phrase_level = BooleanQuery() - # phrase_level.setBoost(1.3) + searchresult.snippets = snips - # p_content = self.make_phrase(tokens, joined=True) - # p_title = self.make_phrase(tokens, 'title') - # p_author = self.make_phrase(tokens, 'author') + return snips + + def hint_tags(self, query, pdcounter=True, prefix=True): + """ + Return auto-complete hints for tags + using prefix search. + """ + q = self.index.Q() + query = query.strip() + for field in ['tag_name', 'tag_name_pl']: + if prefix: + q |= self.index.Q(**{field: query + "*"}) + else: + q |= self.make_term_query(query, field=field) + qu = self.index.query(q).exclude(tag_category="book") - # phrase_level.add(BooleanClause(p_content, Should)) - # phrase_level.add(BooleanClause(p_title, Should)) - # phrase_level.add(BooleanClause(p_author, Should)) + return self.search_tags(qu, pdcounter=pdcounter) - # kw_level = BooleanQuery() + def search_tags(self, query, filters=None, pdcounter=False): + """ + Search for Tag objects using query. + """ + if not filters: filters = [] + if not pdcounter: + filters.append(~self.index.Q(is_pdcounter=True)) + res = self.apply_filters(query, filters).execute() + + tags = [] + pd_tags = [] + + for doc in res: + is_pdcounter = doc.get('is_pdcounter', False) + category = doc.get('tag_category') + try: + if is_pdcounter == True: + if category == 'pd_author': + tag = PDCounterAuthor.objects.get(id=doc.get('tag_id')) + elif category == 'pd_book': + tag = PDCounterBook.objects.get(id=doc.get('tag_id')) + tag.category = 'pd_book' # make it look more lik a tag. + else: + print ("Warning. cannot get pdcounter tag_id=%d from db; cat=%s" % (int(doc.get('tag_id')), category)).encode('utf-8') + pd_tags.append(tag) + else: + tag = catalogue.models.Tag.objects.get(id=doc.get("tag_id")) + tags.append(tag) - # kw_level.add(self.make_term_query(tokens, 'author'), Should) - # j_themes = self.make_term_query(tokens, 'themes', joined=True) - # kw_level.add(j_themes, Should) - # kw_level.add(self.make_term_query(tokens, 'tags'), Should) - # j_con = self.make_term_query(tokens, joined=True) - # kw_level.add(j_con, Should) + except catalogue.models.Tag.DoesNotExist: pass + except PDCounterAuthor.DoesNotExist: pass + except PDCounterBook.DoesNotExist: pass - # top_level.add(BooleanClause(phrase_level, Should)) - # top_level.add(BooleanClause(kw_level, Should)) + tags_slugs = set(map(lambda t: t.slug, tags)) + tags = tags + filter(lambda t: not t.slug in tags_slugs, pd_tags) - return None + log.debug('search_tags: %s' % tags) + return tags - def do_search(self, query, max_results=50, collector=None): - tops = self.searcher.search(query, max_results) - #tops = self.searcher.search(p_content, max_results) + def hint_books(self, query, prefix=True): + """ + Returns auto-complete hints for book titles + Because we do not index 'pseudo' title-tags. + Prefix search. + """ + q = self.index.Q() + query = query.strip() + if prefix: + q |= self.index.Q(title=query + "*") + else: + q |= self.make_term_query(query, field='title') + qu = self.index.query(q) + only_books = self.index.Q(is_book=True) + return self.search_books(qu, [only_books]) + def search_books(self, query, filters=None, max_results=10): + """ + Searches for Book objects using query + """ bks = [] - for found in tops.scoreDocs: - doc = self.searcher.doc(found.doc) - b = catalogue.models.Book.objects.get(id=doc.get("book_id")) - bks.append(b) - print "%s (%d) -> %f" % (b, b.id, found.score) - return (bks, tops.totalHits) - - def get_snippets(self, scoreDoc, query, field='content'): - htmlFormatter = SimpleHTMLFormatter() - highlighter = Highlighter(htmlFormatter, QueryScorer(query)) - - stored = self.searcher.doc(scoreDoc.doc) - text = stored.get(field) - tokenStream = TokenSources.getAnyTokenStream(self.searcher.getIndexReader(), scoreDoc.doc, field, self.analyzer) - # highlighter.getBestTextFragments(tokenStream, text, False, 10) - snip = highlighter.getBestFragments(tokenStream, text, 3, "...") - print('snips: %s' % snip) - - return [snip] + bks_found = set() + query = query.query(is_book=True) + res = self.apply_filters(query, filters).field_limit(['book_id']) + for r in res: + try: + bid = r['book_id'] + if not bid in bks_found: + bks.append(catalogue.models.Book.objects.get(id=bid)) + bks_found.add(bid) + except catalogue.models.Book.DoesNotExist: pass + return bks + + + @staticmethod + def apply_filters(query, filters): + """ + Apply filters to a query + """ + if filters is None: filters = [] + filters = filter(lambda x: x is not None, filters) + for f in filters: + query = query.query(f) + return query