# -*- coding: utf-8 -*-
from django.conf import settings
-from lucene import SimpleFSDirectory, IndexWriter, CheckIndex, \
- File, Field, Integer, \
- NumericField, Version, Document, JavaError, IndexSearcher, \
- QueryParser, PerFieldAnalyzerWrapper, \
- SimpleAnalyzer, PolishAnalyzer, ArrayList, \
- KeywordAnalyzer, NumericRangeQuery, NumericRangeFilter, BooleanQuery, \
- BlockJoinQuery, BlockJoinCollector, Filter, TermsFilter, ChainedFilter, \
- HashSet, BooleanClause, Term, CharTermAttribute, \
- PhraseQuery, MultiPhraseQuery, StringReader, TermQuery, \
- FuzzyQuery, FuzzyTermEnum, PrefixTermEnum, Sort, Integer, \
- SimpleHTMLFormatter, Highlighter, QueryScorer, TokenSources, TextFragment, \
- BooleanFilter, TermsFilter, FilterClause, QueryWrapperFilter, \
- initVM, CLASSPATH, JArray, JavaError
- # KeywordAnalyzer
-
-# Initialize jvm
-JVM = initVM(CLASSPATH)
-
-import sys
+
import os
import re
import errno
from librarian import dcparser
from librarian.parser import WLDocument
+from lxml import etree
import catalogue.models
-from multiprocessing.pool import ThreadPool
-from threading import current_thread
-import atexit
+from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook
+from itertools import chain
import traceback
+import logging
+log = logging.getLogger('search')
+import sunburnt
+import custom
+import operator
+log = logging.getLogger('search')
-class WLAnalyzer(PerFieldAnalyzerWrapper):
- def __init__(self):
- polish = PolishAnalyzer(Version.LUCENE_34)
- # polish_gap.setPositionIncrementGap(999)
-
- simple = SimpleAnalyzer(Version.LUCENE_34)
- # simple_gap.setPositionIncrementGap(999)
-
- keyword = KeywordAnalyzer(Version.LUCENE_34)
-
- # not sure if needed: there's NOT_ANALYZED meaning basically the same
-
- PerFieldAnalyzerWrapper.__init__(self, polish)
-
- self.addAnalyzer("tags", simple)
- self.addAnalyzer("technical_editors", simple)
- self.addAnalyzer("editors", simple)
- self.addAnalyzer("url", keyword)
- self.addAnalyzer("source_url", keyword)
- self.addAnalyzer("source_name", simple)
- self.addAnalyzer("publisher", simple)
- self.addAnalyzer("author", simple)
- self.addAnalyzer("is_book", keyword)
-
- self.addAnalyzer("themes", simple)
- self.addAnalyzer("themes_pl", polish)
-
- self.addAnalyzer("tag_name", simple)
- self.addAnalyzer("tag_name_pl", polish)
-
- self.addAnalyzer("KEYWORD", keyword)
- self.addAnalyzer("SIMPLE", simple)
- self.addAnalyzer("POLISH", polish)
-
-
-class IndexStore(object):
- def __init__(self):
- self.make_index_dir()
- self.store = SimpleFSDirectory(File(settings.SEARCH_INDEX))
-
- def make_index_dir(self):
- try:
- os.makedirs(settings.SEARCH_INDEX)
- except OSError as exc:
- if exc.errno == errno.EEXIST:
- pass
- else: raise
-
-
-class IndexChecker(IndexStore):
- def __init__(self):
- IndexStore.__init__(self)
-
- def check(self):
- checker = CheckIndex(self.store)
- status = checker.checkIndex()
- return status
+class SolrIndex(object):
+ def __init__(self, mode=None):
+ self.index = custom.CustomSolrInterface(settings.SOLR, mode=mode)
class Snippets(object):
+ """
+ This class manages snippet files for indexed object (book)
+ the snippets are concatenated together, and their positions and
+ lengths are kept in lucene index fields.
+ """
SNIPPET_DIR = "snippets"
- def __init__(self, book_id):
+ def __init__(self, book_id, revision=None):
try:
os.makedirs(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR))
except OSError as exc:
pass
else: raise
self.book_id = book_id
+ self.revision = revision
self.file = None
+ @property
+ def path(self):
+ if self.revision: fn = "%d.%d" % (self.book_id, self.revision)
+ else: fn = "%d" % self.book_id
+
+ return os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR, fn)
+
def open(self, mode='r'):
+ """
+ Open the snippet file. Call .close() afterwards.
+ """
if not 'b' in mode:
mode += 'b'
- self.file = open(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR, str(self.book_id)), mode)
+
+ if 'w' in mode:
+ if os.path.exists(self.path):
+ self.revision = 1
+ while True:
+ if not os.path.exists(self.path):
+ break
+ self.revision += 1
+
+ self.file = open(self.path, mode)
self.position = 0
return self
def add(self, snippet):
+ """
+ Append a snippet (unicode) to the snippet file.
+ Return a (position, length) tuple
+ """
txt = snippet.encode('utf-8')
l = len(txt)
self.file.write(txt)
pos = (self.position, l)
self.position += l
- print "Snip<%s>%s</s>" %(pos, txt)
return pos
def get(self, pos):
+ """
+ Given a tuple of (position, length) return an unicode
+ of the snippet stored there.
+ """
self.file.seek(pos[0], 0)
txt = self.file.read(pos[1]).decode('utf-8')
- print "got from snippets %d bytes from %s:" % (len(txt), pos)
return txt
def close(self):
+ """Close snippet file"""
self.file.close()
+ def remove(self):
+ self.revision = None
+ try:
+ os.unlink(self.path)
+ self.revision = 0
+ while True:
+ self.revision += 1
+ os.unlink(self.path)
+ except OSError:
+ pass
-class Index(IndexStore):
- def __init__(self, analyzer=None):
- IndexStore.__init__(self)
- self.index = None
- if not analyzer:
- analyzer = WLAnalyzer()
- self.analyzer = analyzer
- def open(self, analyzer=None):
- if self.index:
- raise Exception("Index is already opened")
- self.index = IndexWriter(self.store, self.analyzer,\
- IndexWriter.MaxFieldLength.LIMITED)
- return self.index
+class Index(SolrIndex):
+ """
+ Class indexing books.
+ """
+ def __init__(self):
+ super(Index, self).__init__(mode='rw')
+
+ def delete_query(self, *queries):
+ """
+ index.delete(queries=...) doesn't work, so let's reimplement it
+ using deletion of list of uids.
+ """
+ uids = set()
+ for q in queries:
+ if isinstance(q, sunburnt.search.LuceneQuery):
+ q = self.index.query(q)
+ q.field_limiter.update(['uid'])
+ st = 0
+ rows = 100
+ while True:
+ ids = q.paginate(start=st, rows=rows).execute()
+ if not len(ids):
+ break
+ for res in ids:
+ uids.add(res['uid'])
+ st += rows
+ # print "Will delete %s" % ','.join([x for x in uids])
+ if uids:
+ self.index.delete(uids)
+ return True
+ else:
+ return False
+
+ def index_tags(self, *tags, **kw):
+ """
+ Re-index global tag list.
+ Removes all tags from index, then index them again.
+ Indexed fields include: id, name (with and without polish stems), category
+ """
+ log.debug("Indexing tags")
+ remove_only = kw.get('remove_only', False)
+ # first, remove tags from index.
+ if tags:
+ tag_qs = []
+ for tag in tags:
+ q_id = self.index.Q(tag_id=tag.id)
+
+ if isinstance(tag, PDCounterAuthor):
+ q_cat = self.index.Q(tag_category='pd_author')
+ elif isinstance(tag, PDCounterBook):
+ q_cat = self.index.Q(tag_category='pd_book')
+ else:
+ q_cat = self.index.Q(tag_category=tag.category)
+
+ q_id_cat = self.index.Q(q_id & q_cat)
+ tag_qs.append(q_id_cat)
+ self.delete_query(tag_qs)
+ else: # all
+ q = self.index.Q(tag_id__any=True)
+ self.delete_query(q)
+
+ if not remove_only:
+ # then add them [all or just one passed]
+ if not tags:
+ tags = chain(catalogue.models.Tag.objects.exclude(category='set'), \
+ PDCounterAuthor.objects.all(), \
+ PDCounterBook.objects.all())
+
+ for tag in tags:
+ if isinstance(tag, PDCounterAuthor):
+ doc = {
+ "tag_id": int(tag.id),
+ "tag_name": tag.name,
+ "tag_name_pl": tag.name,
+ "tag_category": 'pd_author',
+ "is_pdcounter": True,
+ "uid": "tag%d_pd_a" % tag.id
+ }
+ elif isinstance(tag, PDCounterBook):
+ doc = {
+ "tag_id": int(tag.id),
+ "tag_name": tag.title,
+ "tag_name_pl": tag.title,
+ "tag_category": 'pd_book',
+ "is_pdcounter": True,
+ "uid": "tag%d_pd_b" % tag.id
+ }
+ else:
+ doc = {
+ "tag_id": int(tag.id),
+ "tag_name": tag.name,
+ "tag_name_pl": tag.name,
+ "tag_category": tag.category,
+ "is_pdcounter": False,
+ "uid": "tag%d" % tag.id
+ }
+ self.index.add(doc)
- def optimize(self):
- self.index.optimize()
+ def create_book_doc(self, book):
+ """
+ Create a lucene document referring book id.
+ """
+ doc = {
+ 'book_id': int(book.id),
+ }
+ if book.parent is not None:
+ doc["parent_id"] = int(book.parent.id)
+ return doc
- def close(self):
- try:
- self.index.optimize()
- except JavaError, je:
- print "Error during optimize phase, check index: %s" % je
-
- self.index.close()
- self.index = None
-
- def index_tags(self):
- q = NumericRangeQuery.newIntRange("tag_id", 0, Integer.MAX_VALUE, True, True)
- self.index.deleteDocuments(q)
-
- for tag in catalogue.models.Tag.objects.all():
- doc = Document()
- doc.add(NumericField("tag_id", Field.Store.YES, True).setIntValue(tag.id))
- doc.add(Field("tag_name", tag.name, Field.Store.NO, Field.Index.ANALYZED))
- doc.add(Field("tag_name_pl", tag.name, Field.Store.NO, Field.Index.ANALYZED))
- doc.add(Field("tag_category", tag.category, Field.Store.NO, Field.Index.NOT_ANALYZED))
- self.index.addDocument(doc)
-
- def remove_book(self, book):
- q = NumericRangeQuery.newIntRange("book_id", book.id, book.id, True, True)
- self.index.deleteDocuments(q)
-
- def index_book(self, book, overwrite=True):
+ def remove_book(self, book_or_id, remove_snippets=True):
+ """Removes a book from search index.
+ book - Book instance."""
+ if isinstance(book_or_id, catalogue.models.Book):
+ book_id = book_or_id.id
+ else:
+ book_id = book_or_id
+
+ self.delete_query(self.index.Q(book_id=book_id))
+
+ if remove_snippets:
+ snippets = Snippets(book_id)
+ snippets.remove()
+
+ def index_book(self, book, book_info=None, overwrite=True):
+ """
+ Indexes the book.
+ Creates a lucene document for extracted metadata
+ and calls self.index_content() to index the contents of the book.
+ """
if overwrite:
- self.remove_book(book)
+ # we don't remove snippets, since they might be still needed by
+ # threads using not reopened index
+ self.remove_book(book, remove_snippets=False)
book_doc = self.create_book_doc(book)
- meta_fields = self.extract_metadata(book)
- for f in meta_fields.values():
- if isinstance(f, list) or isinstance(f, tuple):
- for elem in f:
- book_doc.add(elem)
- else:
- book_doc.add(f)
+ meta_fields = self.extract_metadata(book, book_info, dc_only=['source_name', 'authors', 'translators', 'title'])
+ # let's not index it - it's only used for extracting publish date
+ if 'source_name' in meta_fields:
+ del meta_fields['source_name']
- self.index.addDocument(book_doc)
+ for n, f in meta_fields.items():
+ book_doc[n] = f
+
+ book_doc['uid'] = "book%s" % book_doc['book_id']
+ self.index.add(book_doc)
del book_doc
+ book_fields = {
+ 'title': meta_fields['title'],
+ 'authors': meta_fields['authors'],
+ 'published_date': meta_fields['published_date']
+ }
+
+ if 'translators' in meta_fields:
+ book_fields['translators'] = meta_fields['translators']
- self.index_content(book, book_fields=[meta_fields['title'], meta_fields['author']])
+ self.index_content(book, book_fields=book_fields)
master_tags = [
'opowiadanie',
'dramat_wierszowany_l',
'dramat_wierszowany_lp',
'dramat_wspolczesny', 'liryka_l', 'liryka_lp',
- 'wywiad'
+ 'wywiad',
]
- skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne']
+ ignore_content_tags = [
+ 'uwaga', 'extra',
+ 'zastepnik_tekstu', 'sekcja_asterysk', 'separator_linia', 'zastepnik_wersu',
+ 'didaskalia',
+ 'naglowek_aktu', 'naglowek_sceny', 'naglowek_czesc',
+ ]
- def create_book_doc(self, book):
+ footnote_tags = ['pa', 'pt', 'pr', 'pe']
+
+ skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne', '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF']
+
+ published_date_re = re.compile("([0-9]+)[\]. ]*$")
+
+ def extract_metadata(self, book, book_info=None, dc_only=None):
"""
- Create a lucene document connected to the book
+ Extract metadata from book and returns a map of fields keyed by fieldname
"""
- doc = Document()
- doc.add(NumericField("book_id", Field.Store.YES, True).setIntValue(book.id))
- if book.parent is not None:
- doc.add(NumericField("parent_id", Field.Store.YES, True).setIntValue(book.parent.id))
- return doc
-
- def extract_metadata(self, book):
fields = {}
- book_info = dcparser.parse(book.xml_file)
- print("extract metadata for book %s id=%d, thread%d" % (book.slug, book.id, current_thread().ident))
+ if book_info is None:
+ book_info = dcparser.parse(open(book.xml_file.path))
- fields['slug'] = Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS)
- fields['tags'] = self.add_gaps([Field("tags", t.name, Field.Store.NO, Field.Index.ANALYZED) for t in book.tags], 'tags')
- fields['is_book'] = Field("is_book", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED)
+ fields['slug'] = book.slug
+ fields['tags'] = [t.name for t in book.tags]
+ fields['is_book'] = True
# validator, name
for field in dcparser.BookInfo.FIELDS:
+ if dc_only and field.name not in dc_only:
+ continue
if hasattr(book_info, field.name):
if not getattr(book_info, field.name):
continue
s = getattr(book_info, field.name)
if field.multiple:
s = ', '.join(s)
- try:
- fields[field.name] = Field(field.name, s, Field.Store.NO, Field.Index.ANALYZED)
- except JavaError as je:
- raise Exception("failed to add field: %s = '%s', %s(%s)" % (field.name, s, je.message, je.args))
+ fields[field.name] = s
elif type_indicator == dcparser.as_person:
p = getattr(book_info, field.name)
if isinstance(p, dcparser.Person):
persons = unicode(p)
else:
persons = ', '.join(map(unicode, p))
- fields[field.name] = Field(field.name, persons, Field.Store.NO, Field.Index.ANALYZED)
+ fields[field.name] = persons
elif type_indicator == dcparser.as_date:
dt = getattr(book_info, field.name)
- fields[field.name] = Field(field.name, "%04d%02d%02d" %\
- (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED)
+ fields[field.name] = dt
+
+ # get published date
+ pd = None
+ if hasattr(book_info, 'source_name') and book_info.source_name:
+ match = self.published_date_re.search(book_info.source_name)
+ if match is not None:
+ pd = str(match.groups()[0])
+ if not pd: pd = ""
+ fields["published_date"] = pd
+
return fields
+ # def add_gaps(self, fields, fieldname):
+ # """
+ # Interposes a list of fields with gap-fields, which are indexed spaces and returns it.
+ # This allows for doing phrase queries which do not overlap the gaps (when slop is 0).
+ # """
+ # def gap():
+ # while True:
+ # yield Field(fieldname, ' ', Field.Store.NO, Field.Index.NOT_ANALYZED)
+ # return reduce(lambda a, b: a + b, zip(fields, gap()))[0:-1]
+
def get_master(self, root):
+ """
+ Returns the first master tag from an etree.
+ """
for master in root.iter():
if master.tag in self.master_tags:
return master
- def add_gaps(self, fields, fieldname):
- def gap():
- while True:
- yield Field(fieldname, ' ', Field.Store.NO, Field.Index.NOT_ANALYZED)
- return reduce(lambda a, b: a + b, zip(fields, gap()))[0:-1]
-
- def index_content(self, book, book_fields=[]):
- wld = WLDocument.from_file(book.xml_file.path)
+ def index_content(self, book, book_fields={}):
+ """
+ Walks the book XML and extract content from it.
+ Adds parts for each header tag and for each fragment.
+ """
+ wld = WLDocument.from_file(book.xml_file.path, parse_dublincore=False)
root = wld.edoc.getroot()
master = self.get_master(root)
if master is None:
return []
- def walker(node):
- yield node, None
- for child in list(node):
- for b, e in walker(child):
- yield b, e
- yield None, node
+ def walker(node, ignore_tags=[]):
+
+ if node.tag not in ignore_tags:
+ yield node, None, None
+ if node.text is not None:
+ yield None, node.text, None
+ for child in list(node):
+ for b, t, e in walker(child):
+ yield b, t, e
+ yield None, None, node
+
+ if node.tail is not None:
+ yield None, node.tail, None
return
def fix_format(text):
- return re.sub("/$", "", text, flags=re.M)
+ # separator = [u" ", u"\t", u".", u";", u","]
+ if isinstance(text, list):
+ # need to join it first
+ text = filter(lambda s: s is not None, content)
+ text = u' '.join(text)
+ # for i in range(len(text)):
+ # if i > 0:
+ # if text[i][0] not in separator\
+ # and text[i - 1][-1] not in separator:
+ # text.insert(i, u" ")
+
+ return re.sub("(?m)/$", "", text)
def add_part(snippets, **fields):
doc = self.create_book_doc(book)
- for f in book_fields:
- doc.add(f)
+ for n, v in book_fields.items():
+ doc[n] = v
+
+ doc['header_index'] = fields["header_index"]
+ doc['header_span'] = 'header_span' in fields and fields['header_span'] or 1
+ doc['header_type'] = fields['header_type']
- doc.add(NumericField('header_index', Field.Store.YES, True).setIntValue(fields["header_index"]))
- doc.add(NumericField("header_span", Field.Store.YES, True)\
- .setIntValue('header_span' in fields and fields['header_span'] or 1))
- doc.add(Field('header_type', fields["header_type"], Field.Store.YES, Field.Index.NOT_ANALYZED))
+ doc['text'] = fields['text']
- doc.add(Field('content', fields["content"], Field.Store.NO, Field.Index.ANALYZED, \
- Field.TermVector.WITH_POSITIONS_OFFSETS))
+ # snippets
+ snip_pos = snippets.add(fields["text"])
- snip_pos = snippets.add(fields["content"])
- doc.add(NumericField("snippets_position", Field.Store.YES, True).setIntValue(snip_pos[0]))
- doc.add(NumericField("snippets_length", Field.Store.YES, True).setIntValue(snip_pos[1]))
+ doc['snippets_position'] = snip_pos[0]
+ doc['snippets_length'] = snip_pos[1]
+ if snippets.revision:
+ doc["snippets_revision"] = snippets.revision
if 'fragment_anchor' in fields:
- doc.add(Field("fragment_anchor", fields['fragment_anchor'],
- Field.Store.YES, Field.Index.NOT_ANALYZED))
+ doc["fragment_anchor"] = fields['fragment_anchor']
if 'themes' in fields:
- themes, themes_pl = zip(*[
- (Field("themes", theme, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS),
- Field("themes_pl", theme, Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS))
- for theme in fields['themes']])
-
- themes = self.add_gaps(themes, 'themes')
- themes_pl = self.add_gaps(themes_pl, 'themes_pl')
-
- for t in themes:
- doc.add(t)
- for t in themes_pl:
- doc.add(t)
-
+ doc['themes'] = fields['themes']
+ doc['uid'] = "part%s%s%s" % (doc['header_index'],
+ doc['header_span'],
+ doc.get('fragment_anchor', ''))
return doc
+ def give_me_utf8(s):
+ if isinstance(s, unicode):
+ return s.encode('utf-8')
+ else:
+ return s
+
fragments = {}
snippets = Snippets(book.id).open('w')
try:
if header.tag in self.skip_header_tags:
continue
+ if header.tag is etree.Comment:
+ continue
- content = u' '.join([t for t in header.itertext()])
- content = fix_format(content)
-
- doc = add_part(snippets, header_index=position, header_type=header.tag, content=content)
-
- self.index.addDocument(doc)
-
- for start, end in walker(header):
+ # section content
+ content = []
+ footnote = []
+
+ def all_content(text):
+ for frag in fragments.values():
+ frag['text'].append(text)
+ content.append(text)
+ handle_text = [all_content]
+
+ for start, text, end in walker(header, ignore_tags=self.ignore_content_tags):
+ # handle footnotes
+ if start is not None and start.tag in self.footnote_tags:
+ footnote = []
+
+ def collect_footnote(t):
+ footnote.append(t)
+
+ handle_text.append(collect_footnote)
+ elif end is not None and footnote is not [] and end.tag in self.footnote_tags:
+ handle_text.pop()
+ doc = add_part(snippets, header_index=position, header_type=header.tag,
+ text=u''.join(footnote),
+ is_footnote=True)
+ self.index.add(doc)
+ #print "@ footnote text: %s" % footnote
+ footnote = []
+
+ # handle fragments and themes.
if start is not None and start.tag == 'begin':
fid = start.attrib['id'][1:]
- fragments[fid] = {'content': [], 'themes': [], 'start_section': position, 'start_header': header.tag}
- fragments[fid]['content'].append(start.tail)
+ fragments[fid] = {'text': [], 'themes': [], 'start_section': position, 'start_header': header.tag}
+
+ # themes for this fragment
elif start is not None and start.tag == 'motyw':
fid = start.attrib['id'][1:]
+ handle_text.append(None)
if start.text is not None:
- fragments[fid]['themes'] += map(unicode.strip, start.text.split(','))
- fragments[fid]['content'].append(start.tail)
+ fragments[fid]['themes'] += map(unicode.strip, map(unicode, (start.text.split(','))))
+ elif end is not None and end.tag == 'motyw':
+ handle_text.pop()
+
elif start is not None and start.tag == 'end':
fid = start.attrib['id'][1:]
if fid not in fragments:
continue # empty themes list.
del fragments[fid]
- def jstr(l):
- return u' '.join(map(
- lambda x: x == None and u'(none)' or unicode(x),
- l))
-
doc = add_part(snippets,
header_type=frag['start_header'],
header_index=frag['start_section'],
header_span=position - frag['start_section'] + 1,
fragment_anchor=fid,
- content=u' '.join(filter(lambda s: s is not None, frag['content'])),
+ text=fix_format(frag['text']),
themes=frag['themes'])
+ #print '@ FRAG %s' % frag['content']
+ self.index.add(doc)
- self.index.addDocument(doc)
- elif start is not None:
- for frag in fragments.values():
- frag['content'].append(start.text)
- elif end is not None:
- for frag in fragments.values():
- frag['content'].append(end.tail)
- finally:
- snippets.close()
-
-
- def __enter__(self):
- self.open()
- return self
-
- def __exit__(self, type, value, tb):
- self.close()
-
-
-def log_exception_wrapper(f):
- def _wrap(*a):
- try:
- f(*a)
- except Exception, e:
- print("Error in indexing thread: %s" % e)
- traceback.print_exc()
- raise e
- return _wrap
-
-
-class ReusableIndex(Index):
- """
- Works like index, but does not close/optimize Lucene index
- until program exit (uses atexit hook).
- This is usefull for importbooks command.
-
- if you cannot rely on atexit, use ReusableIndex.close_reusable() yourself.
- """
- index = None
- pool = None
- pool_jobs = None
-
- def open(self, analyzer=None, threads=4):
- if ReusableIndex.index is not None:
- self.index = ReusableIndex.index
- else:
- print("opening index")
- ReusableIndex.pool = ThreadPool(threads, initializer=lambda: JVM.attachCurrentThread() )
- ReusableIndex.pool_jobs = []
- Index.open(self, analyzer)
- ReusableIndex.index = self.index
- atexit.register(ReusableIndex.close_reusable)
-
- def index_book(self, *args, **kw):
- job = ReusableIndex.pool.apply_async(log_exception_wrapper(Index.index_book), (self,) + args, kw)
- ReusableIndex.pool_jobs.append(job)
-
- @staticmethod
- def close_reusable():
- if ReusableIndex.index is not None:
- print("wait for indexing to finish")
- for job in ReusableIndex.pool_jobs:
- job.get()
- sys.stdout.write('.')
- sys.stdout.flush()
- print("done.")
- ReusableIndex.pool.close()
-
- ReusableIndex.index.optimize()
- ReusableIndex.index.close()
- ReusableIndex.index = None
-
- def close(self):
- pass
-
-
-class Search(IndexStore):
- def __init__(self, default_field="content"):
- IndexStore.__init__(self)
- self.analyzer = WLAnalyzer() #PolishAnalyzer(Version.LUCENE_34)
- ## self.analyzer = WLAnalyzer()
- self.searcher = IndexSearcher(self.store, True)
- self.parser = QueryParser(Version.LUCENE_34, default_field,
- self.analyzer)
-
- self.parent_filter = TermsFilter()
- self.parent_filter.addTerm(Term("is_book", "true"))
-
- def query(self, query):
- return self.parser.parse(query)
-
- def wrapjoins(self, query, fields=[]):
- """
- This functions modifies the query in a recursive way,
- so Term and Phrase Queries contained, which match
- provided fields are wrapped in a BlockJoinQuery,
- and so delegated to children documents.
- """
- if BooleanQuery.instance_(query):
- qs = BooleanQuery.cast_(query)
- for clause in qs:
- clause = BooleanClause.cast_(clause)
- clause.setQuery(self.wrapjoins(clause.getQuery(), fields))
- return qs
- else:
- termset = HashSet()
- query.extractTerms(termset)
- for t in termset:
- t = Term.cast_(t)
- if t.field() not in fields:
- return query
- return BlockJoinQuery(query, self.parent_filter,
- BlockJoinQuery.ScoreMode.Total)
-
- def simple_search(self, query, max_results=50):
- """Returns (books, total_hits)
- """
-
- tops = self.searcher.search(self.query(query), max_results)
- bks = []
- for found in tops.scoreDocs:
- doc = self.searcher.doc(found.doc)
- bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
- return (bks, tops.totalHits)
-
- def search(self, query, max_results=50):
- query = self.query(query)
- query = self.wrapjoins(query, ["content", "themes"])
+ # Collect content.
- tops = self.searcher.search(query, max_results)
- bks = []
- for found in tops.scoreDocs:
- doc = self.searcher.doc(found.doc)
- bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
- return (bks, tops.totalHits)
-
- def bsearch(self, query, max_results=50):
- q = self.query(query)
- bjq = BlockJoinQuery(q, self.parent_filter, BlockJoinQuery.ScoreMode.Avg)
+ if text is not None and handle_text is not []:
+ hdl = handle_text[-1]
+ if hdl is not None:
+ hdl(text)
- tops = self.searcher.search(bjq, max_results)
- bks = []
- for found in tops.scoreDocs:
- doc = self.searcher.doc(found.doc)
- bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
- return (bks, tops.totalHits)
+ # in the end, add a section text.
+ doc = add_part(snippets, header_index=position,
+ header_type=header.tag, text=fix_format(content))
+ #print '@ CONTENT: %s' % fix_format(content)
-# TokenStream tokenStream = analyzer.tokenStream(fieldName, reader);
-# OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class);
-# CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
+ self.index.add(doc)
-# while (tokenStream.incrementToken()) {
-# int startOffset = offsetAttribute.startOffset();
-# int endOffset = offsetAttribute.endOffset();
-# String term = charTermAttribute.toString();
-# }
+ finally:
+ snippets.close()
class SearchResult(object):
- def __init__(self, searcher, scoreDocs, score=None, how_found=None, snippets=None):
+ def __init__(self, doc, how_found=None, query=None, query_terms=None):
+ # self.search = search
+ self.boost = 1.0
+ self._hits = []
+ self._processed_hits = None # processed hits
self.snippets = []
+ self.query_terms = query_terms
- if score:
- self.score = score
+ if 'score' in doc:
+ self._score = doc['score']
else:
- self.score = scoreDocs.score
-
- self.hits = []
+ self._score = 0
- stored = searcher.doc(scoreDocs.doc)
- self.book_id = int(stored.get("book_id"))
-
- header_type = stored.get("header_type")
- if not header_type:
- return
+ self.book_id = int(doc["book_id"])
- sec = (header_type, int(stored.get("header_index")))
- header_span = stored.get('header_span')
- header_span = header_span is not None and int(header_span) or 1
+ try:
+ self.published_date = int(doc.get("published_date"))
+ except ValueError:
+ self.published_date = 0
+
+ # content hits
+ header_type = doc.get("header_type", None)
+ # we have a content hit in some header of fragment
+ if header_type is not None:
+ sec = (header_type, int(doc["header_index"]))
+ header_span = doc['header_span']
+ header_span = header_span is not None and int(header_span) or 1
+ fragment = doc.get("fragment_anchor", None)
+ snippets_pos = (doc['snippets_position'], doc['snippets_length'])
+ snippets_rev = doc.get('snippets_revision', None)
+
+ hit = (sec + (header_span,), fragment, self._score, {
+ 'how_found': how_found,
+ 'snippets_pos': snippets_pos,
+ 'snippets_revision': snippets_rev,
+ 'themes': doc.get('themes', []),
+ 'themes_pl': doc.get('themes_pl', [])
+ })
+
+ self._hits.append(hit)
- fragment = stored.get("fragment_anchor")
+ def __unicode__(self):
+ return u"<SR id=%d %d(%d) hits score=%f %d snippets>" % \
+ (self.book_id, len(self._hits), self._processed_hits and len(self._processed_hits) or -1, self._score, len(self.snippets))
- hit = (sec + (header_span,), fragment, scoreDocs.score, {'how_found': how_found, 'snippets': snippets})
+ def __str__(self):
+ return unicode(self).encode('utf-8')
- self.hits.append(hit)
+ @property
+ def score(self):
+ return self._score * self.boost
def merge(self, other):
if self.book_id != other.book_id:
raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id))
- self.hits += other.hits
+ self._hits += other._hits
if other.score > self.score:
- self.score = other.score
+ self._score = other._score
return self
def get_book(self):
- return catalogue.models.Book.objects.get(id=self.book_id)
+ if hasattr(self, '_book'):
+ return self._book
+ self._book = catalogue.models.Book.objects.get(id=self.book_id)
+ return self._book
book = property(get_book)
- def process_hits(self):
- frags = filter(lambda r: r[1] is not None, self.hits)
- sect = filter(lambda r: r[1] is None, self.hits)
+ POSITION = 0
+ FRAGMENT = 1
+ POSITION_INDEX = 1
+ POSITION_SPAN = 2
+ SCORE = 2
+ OTHER = 3
+
+ @property
+ def hits(self):
+ if self._processed_hits is not None:
+ return self._processed_hits
+
+ # to sections and fragments
+ frags = filter(lambda r: r[self.FRAGMENT] is not None, self._hits)
+
+ sect = filter(lambda r: r[self.FRAGMENT] is None, self._hits)
+
+ # sections not covered by fragments
sect = filter(lambda s: 0 == len(filter(
- lambda f: s[0][1] >= f[0][1] and s[0][1] < f[0][1] + f[0][2],
+ lambda f: s[self.POSITION][self.POSITION_INDEX] >= f[self.POSITION][self.POSITION_INDEX]
+ and s[self.POSITION][self.POSITION_INDEX] < f[self.POSITION][self.POSITION_INDEX] + f[self.POSITION][self.POSITION_SPAN],
frags)), sect)
hits = []
+ def remove_duplicates(lst, keyfn, compare):
+ els = {}
+ for e in lst:
+ eif = keyfn(e)
+ if eif in els:
+ if compare(els[eif], e) >= 1:
+ continue
+ els[eif] = e
+ return els.values()
+
+ # remove fragments with duplicated fid's and duplicated snippets
+ frags = remove_duplicates(frags, lambda f: f[self.FRAGMENT], lambda a, b: cmp(a[self.SCORE], b[self.SCORE]))
+ # frags = remove_duplicates(frags, lambda f: f[OTHER]['snippet_pos'] and f[OTHER]['snippet_pos'] or f[FRAGMENT],
+ # lambda a, b: cmp(a[SCORE], b[SCORE]))
+
+ # remove duplicate sections
+ sections = {}
+
for s in sect:
- m = {'score': s[2],
- 'header_index': s[0][1]
+ si = s[self.POSITION][self.POSITION_INDEX]
+ # skip existing
+ if si in sections:
+ if sections[si]['score'] >= s[self.SCORE]:
+ continue
+
+ m = {'score': s[self.SCORE],
+ 'section_number': s[self.POSITION][self.POSITION_INDEX] + 1,
}
- m.update(s[3])
- hits.append(m)
+ m.update(s[self.OTHER])
+ sections[si] = m
+
+ hits = sections.values()
for f in frags:
- frag = catalogue.models.Fragment.objects.get(anchor=f[1])
- m = {'score': f[2],
+ try:
+ frag = catalogue.models.Fragment.objects.get(anchor=f[self.FRAGMENT], book__id=self.book_id)
+ except catalogue.models.Fragment.DoesNotExist:
+ # stale index
+ continue
+ # Figure out if we were searching for a token matching some word in theme name.
+ themes = frag.tags.filter(category='theme')
+ themes_hit = set()
+ if self.query_terms is not None:
+ for i in range(0, len(f[self.OTHER]['themes'])):
+ tms = f[self.OTHER]['themes'][i].split(r' +') + f[self.OTHER]['themes_pl'][i].split(' ')
+ tms = map(unicode.lower, tms)
+ for qt in self.query_terms:
+ if qt in tms:
+ themes_hit.add(f[self.OTHER]['themes'][i])
+ break
+
+ def theme_by_name(n):
+ th = filter(lambda t: t.name == n, themes)
+ if th:
+ return th[0]
+ else:
+ return None
+ themes_hit = filter(lambda a: a is not None, map(theme_by_name, themes_hit))
+
+ m = {'score': f[self.SCORE],
'fragment': frag,
- 'themes': frag.tags.filter(category='theme')
+ 'section_number': f[self.POSITION][self.POSITION_INDEX] + 1,
+ 'themes': themes,
+ 'themes_hit': themes_hit
}
- m.update(f[3])
+ m.update(f[self.OTHER])
hits.append(m)
hits.sort(lambda a, b: cmp(a['score'], b['score']), reverse=True)
- print("--- %s" % hits)
+ self._processed_hits = hits
return hits
- def __unicode__(self):
- return u'SearchResult(book_id=%d, score=%d)' % (self.book_id, self.score)
-
@staticmethod
def aggregate(*result_lists):
books = {}
for r in rl:
if r.book_id in books:
books[r.book_id].merge(r)
- #print(u"already have one with score %f, and this one has score %f" % (books[book.id][0], found.score))
else:
books[r.book_id] = r
return books.values()
def __cmp__(self, other):
- return cmp(self.score, other.score)
-
-
-class Hint(object):
- def __init__(self, search):
- self.search = search
- self.book_tags = {}
- self.part_tags = []
- self._book = None
-
- def book(self, book):
- self._book = book
-
- def tags(self, tags):
- for t in tags:
- if t.category in ['author', 'title', 'epoch', 'genre', 'kind']:
- lst = self.book_tags.get(t.category, [])
- lst.append(t)
- self.book_tags[t.category] = lst
- if t.category in ['theme']:
- self.part_tags.append(t)
-
- def tag_filter(self, tags, field='tags'):
- q = BooleanQuery()
-
- for tag in tags:
- toks = self.search.get_tokens(tag.name, field=field)
- tag_phrase = PhraseQuery()
- for tok in toks:
- tag_phrase.add(Term(field, tok))
- q.add(BooleanClause(tag_phrase, BooleanClause.Occur.MUST))
-
- return QueryWrapperFilter(q)
-
- def book_filter(self):
- tags = reduce(lambda a, b: a + b, self.book_tags.values(), [])
- if tags:
- return self.tag_filter(tags)
- else:
- return None
-
- def part_filter(self):
- fs = []
- if self.part_tags:
- fs.append(self.tag_filter(self.part_tags, field='themes'))
- if self._book is not None:
- fs.append(NumericRangeFilter.newIntRange('book_id', self._book.id, self._book.id, True, True))
- return MultiSearch.chain_filters(fs)
-
- def should_search_for_book(self):
- return self._book is None
-
- def just_search_in(self, all):
- """Holds logic to figure out which indexes should be search, when we have some hinst already"""
- some = []
- for field in all:
- if field == 'author' and 'author' in self.book_tags:
- continue
- if field == 'title' and self._book is not None:
- continue
- if (field == 'themes' or field == 'themes_pl') and self.part_tags:
- continue
- some.append(field)
- return some
-
-
-class MultiSearch(Search):
- """Class capable of IMDb-like searching"""
- def get_tokens(self, searched, field='content'):
- """returns tokens analyzed by a proper (for a field) analyzer
- argument can be: StringReader, string/unicode, or tokens. In the last case
- they will just be returned (so we can reuse tokens, if we don't change the analyzer)
- """
- if isinstance(searched, str) or isinstance(searched, unicode):
- searched = StringReader(searched)
- elif isinstance(searched, list):
- return searched
-
- searched.reset()
- tokens = self.analyzer.reusableTokenStream(field, searched)
- toks = []
- while tokens.incrementToken():
- cta = tokens.getAttribute(CharTermAttribute.class_)
- toks.append(cta.toString())
- return toks
-
- def fuzziness(self, fuzzy):
- if not fuzzy:
- return None
- if isinstance(fuzzy, float) and fuzzy > 0.0 and fuzzy <= 1.0:
- return fuzzy
+ c = cmp(self.score, other.score)
+ if c == 0:
+ # this is inverted, because earlier date is better
+ return cmp(other.published_date, self.published_date)
else:
- return 0.5
+ return c
- def make_phrase(self, tokens, field='content', slop=2, fuzzy=False):
- if fuzzy:
- phrase = MultiPhraseQuery()
- for t in tokens:
- term = Term(field, t)
- fuzzterm = FuzzyTermEnum(self.searcher.getIndexReader(), term, self.fuzziness(fuzzy))
- fuzzterms = []
-
- while True:
- # print("fuzz %s" % unicode(fuzzterm.term()).encode('utf-8'))
- ft = fuzzterm.term()
- if ft:
- fuzzterms.append(ft)
- if not fuzzterm.next(): break
- if fuzzterms:
- phrase.add(JArray('object')(fuzzterms, Term))
- else:
- phrase.add(term)
- else:
- phrase = PhraseQuery()
- phrase.setSlop(slop)
- for t in tokens:
- term = Term(field, t)
- phrase.add(term)
- return phrase
-
- def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD, fuzzy=False):
- q = BooleanQuery()
- for t in tokens:
- term = Term(field, t)
- if fuzzy:
- term = FuzzyQuery(term, self.fuzziness(fuzzy))
- else:
- term = TermQuery(term)
- q.add(BooleanClause(term, modal))
- return q
-
- def content_query(self, query):
- return BlockJoinQuery(query, self.parent_filter,
- BlockJoinQuery.ScoreMode.Total)
-
- def search_perfect_book(self, searched, max_results=20, fuzzy=False, hint=None):
- fields_to_search = ['author', 'title']
- only_in = None
- if hint:
- if not hint.should_search_for_book():
- return []
- fields_to_search = hint.just_search_in(fields_to_search)
- only_in = hint.book_filter()
-
- qrys = [self.make_phrase(self.get_tokens(searched, field=fld), field=fld, fuzzy=fuzzy) for fld in fields_to_search]
-
- books = []
- for q in qrys:
- top = self.searcher.search(q,
- self.chain_filters([only_in, self.term_filter(Term('is_book', 'true'))]),
- max_results)
- for found in top.scoreDocs:
- books.append(SearchResult(self.searcher, found))
- return books
+ def __len__(self):
+ return len(self.hits)
- def search_perfect_parts(self, searched, max_results=20, fuzzy=False, hint=None):
- qrys = [self.make_phrase(self.get_tokens(searched), field=fld, fuzzy=fuzzy) for fld in ['content']]
+ def snippet_pos(self, idx=0):
+ return self.hits[idx]['snippets_pos']
- flt = None
- if hint:
- flt = hint.part_filter()
+ def snippet_revision(self, idx=0):
+ try:
+ return self.hits[idx]['snippets_revision']
+ except:
+ return None
- books = []
- for q in qrys:
- top = self.searcher.search(q,
- self.chain_filters([self.term_filter(Term('is_book', 'true'), inverse=True),
- flt
- ]),
- max_results)
- for found in top.scoreDocs:
- books.append(SearchResult(self.searcher, found, snippets=self.get_snippets(found, q)))
- return books
+class Search(SolrIndex):
+ """
+ Search facilities.
+ """
+ def __init__(self, default_field="text"):
+ super(Search, self).__init__(mode='r')
- def search_everywhere(self, searched, max_results=20, fuzzy=False, hint=None):
- books = []
- only_in = None
- if hint:
- only_in = hint.part_filter()
+ def make_term_query(self, query, field='text', modal=operator.or_):
+ """
+ Returns term queries joined by boolean query.
+ modal - applies to boolean query
+ fuzzy - should the query by fuzzy.
+ """
+ if query is None: query = ''
+ q = self.index.Q()
+ q = reduce(modal, map(lambda s: self.index.Q(**{field: s}),
+ query.split(r" ")), q)
- # content only query : themes x content
- q = BooleanQuery()
+ return q
- tokens = self.get_tokens(searched)
- if hint is None or hint.just_search_in(['themes_pl']) != []:
- q.add(BooleanClause(self.make_term_query(tokens, field='themes_pl',
- fuzzy=fuzzy), BooleanClause.Occur.MUST))
+ def search_phrase(self, searched, field='text', book=False,
+ filters=None,
+ snippets=False):
+ if filters is None: filters = []
+ if book: filters.append(self.index.Q(is_book=True))
- q.add(BooleanClause(self.make_term_query(tokens, field='content',
- fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
+ q = self.index.query(**{field: searched})
+ q = self.apply_filters(q, filters).field_limit(score=True, all_fields=True)
+ res = q.execute()
+ return [SearchResult(found, how_found=u'search_phrase') for found in res]
- topDocs = self.searcher.search(q, only_in, max_results)
- for found in topDocs.scoreDocs:
- books.append(SearchResult(self.searcher, found))
+ def search_some(self, searched, fields, book=True,
+ filters=None, snippets=True, query_terms=None):
+ assert isinstance(fields, list)
+ if filters is None: filters = []
+ if book: filters.append(self.index.Q(is_book=True))
- # query themes/content x author/title/tags
- q = BooleanQuery()
- in_meta = BooleanQuery()
- in_content = BooleanQuery()
+ query = self.index.Q()
- for fld in ['themes', 'content', 'tags', 'author', 'title']:
- in_content.add(BooleanClause(self.make_term_query(tokens, field=fld, fuzzy=False), BooleanClause.Occur.SHOULD))
+ for fld in fields:
+ query = self.index.Q(query | self.make_term_query(searched, fld))
- topDocs = self.searcher.search(q, only_in, max_results)
- for found in topDocs.scoreDocs:
- books.append(SearchResult(self.searcher, found))
+ query = self.index.query(query)
+ query = self.apply_filters(query, filters).field_limit(score=True, all_fields=True)
+ res = query.execute()
+ return [SearchResult(found, how_found='search_some', query_terms=query_terms) for found in res]
- return books
-
- def multisearch(self, query, max_results=50):
+ def search_everywhere(self, searched, query_terms=None):
"""
- Search strategy:
- - (phrase) OR -> content
- -> title
- -> author
- - (keywords) -> author
- -> motyw
- -> tags
- -> content
+ Tries to use search terms to match different fields of book (or its parts).
+ E.g. one word can be an author survey, another be a part of the title, and the rest
+ are some words from third chapter.
"""
- # queryreader = StringReader(query)
- # tokens = self.get_tokens(queryreader)
-
- # top_level = BooleanQuery()
- # Should = BooleanClause.Occur.SHOULD
-
- # phrase_level = BooleanQuery()
- # phrase_level.setBoost(1.3)
-
- # p_content = self.make_phrase(tokens, joined=True)
- # p_title = self.make_phrase(tokens, 'title')
- # p_author = self.make_phrase(tokens, 'author')
-
- # phrase_level.add(BooleanClause(p_content, Should))
- # phrase_level.add(BooleanClause(p_title, Should))
- # phrase_level.add(BooleanClause(p_author, Should))
+ books = []
+ # content only query : themes x content
+ q = self.make_term_query(searched, 'text')
+ q_themes = self.make_term_query(searched, 'themes_pl')
- # kw_level = BooleanQuery()
+ query = self.index.query(q).query(q_themes).field_limit(score=True, all_fields=True)
+ res = query.execute()
- # kw_level.add(self.make_term_query(tokens, 'author'), Should)
- # j_themes = self.make_term_query(tokens, 'themes', joined=True)
- # kw_level.add(j_themes, Should)
- # kw_level.add(self.make_term_query(tokens, 'tags'), Should)
- # j_con = self.make_term_query(tokens, joined=True)
- # kw_level.add(j_con, Should)
+ for found in res:
+ books.append(SearchResult(found, how_found='search_everywhere_themesXcontent', query_terms=query_terms))
- # top_level.add(BooleanClause(phrase_level, Should))
- # top_level.add(BooleanClause(kw_level, Should))
+ # query themes/content x author/title/tags
+ in_content = self.index.Q()
+ in_meta = self.index.Q()
- return None
+ for fld in ['themes_pl', 'text']:
+ in_content |= self.make_term_query(searched, field=fld)
- def book_search(self, query, filter=None, max_results=50, collector=None):
- tops = self.searcher.search(query, filter, max_results)
- #tops = self.searcher.search(p_content, max_results)
+ for fld in ['tags', 'authors', 'title']:
+ in_meta |= self.make_term_query(searched, field=fld)
- bks = []
- for found in tops.scoreDocs:
- doc = self.searcher.doc(found.doc)
- b = catalogue.models.Book.objects.get(id=doc.get("book_id"))
- bks.append(b)
- print "%s (%d) -> %f" % (b, b.id, found.score)
- return bks
+ q = in_content & in_meta
+ res = self.index.query(q).field_limit(score=True, all_fields=True).execute()
- def get_snippets(self, scoreDoc, query, field='content'):
- htmlFormatter = SimpleHTMLFormatter()
- highlighter = Highlighter(htmlFormatter, QueryScorer(query))
+ for found in res:
+ books.append(SearchResult(found, how_found='search_everywhere', query_terms=query_terms))
- stored = self.searcher.doc(scoreDoc.doc)
+ return books
- # locate content.
- snippets = Snippets(stored.get('book_id')).open()
+ def get_snippets(self, searchresult, query, field='text', num=1):
+ """
+ Returns a snippet for found scoreDoc.
+ """
+ maxnum = len(searchresult)
+ if num is None or num < 0 or num > maxnum:
+ num = maxnum
+ book_id = searchresult.book_id
+ revision = searchresult.snippet_revision()
+ snippets = Snippets(book_id, revision=revision)
+ snips = [None] * maxnum
try:
- text = snippets.get((int(stored.get('snippets_position')),
- int(stored.get('snippets_length'))))
+ snippets.open()
+ idx = 0
+ while idx < maxnum and num > 0:
+ position, length = searchresult.snippet_pos(idx)
+ if position is None or length is None:
+ continue
+ text = snippets.get((int(position),
+ int(length)))
+ snip = self.index.highlight(text=text, field=field, q=query)
+ snips[idx] = snip
+ if snip:
+ num -= 1
+ idx += 1
+
+ except IOError, e:
+ log.error("Cannot open snippet file for book id = %d [rev=%d], %s" % (book_id, revision, e))
+ return []
finally:
snippets.close()
- tokenStream = TokenSources.getAnyTokenStream(self.searcher.getIndexReader(), scoreDoc.doc, field, self.analyzer)
- # highlighter.getBestTextFragments(tokenStream, text, False, 10)
- # import pdb; pdb.set_trace()
- snip = highlighter.getBestFragments(tokenStream, text, 3, "...")
- print('snips: %s' % snip)
+ # remove verse end markers..
+ snips = map(lambda s: s and s.replace("/\n", "\n"), snips)
- return [snip]
+ searchresult.snippets = snips
- @staticmethod
- def enum_to_array(enum):
+ return snips
+
+ def hint_tags(self, query, pdcounter=True, prefix=True):
"""
- Converts a lucene TermEnum to array of Terms, suitable for
- addition to queries
+ Return auto-complete hints for tags
+ using prefix search.
"""
- terms = []
-
- while True:
- t = enum.term()
- if t:
- terms.append(t)
- if not enum.next(): break
+ q = self.index.Q()
+ query = query.strip()
+ for field in ['tag_name', 'tag_name_pl']:
+ if prefix:
+ q |= self.index.Q(**{field: query + "*"})
+ else:
+ q |= self.make_term_query(query, field=field)
+ qu = self.index.query(q).exclude(tag_category="book")
- if terms:
- return JArray('object')(terms, Term)
+ return self.search_tags(qu, pdcounter=pdcounter)
- def search_tags(self, query, filter=None, max_results=40):
- tops = self.searcher.search(query, filter, max_results)
+ def search_tags(self, query, filters=None, pdcounter=False):
+ """
+ Search for Tag objects using query.
+ """
+ if not filters: filters = []
+ if not pdcounter:
+ filters.append(~self.index.Q(is_pdcounter=True))
+ res = self.apply_filters(query, filters).execute()
tags = []
- for found in tops.scoreDocs:
- doc = self.searcher.doc(found.doc)
- tag = catalogue.models.Tag.objects.get(id=doc.get("tag_id"))
- tags.append(tag)
- print "%s (%d) -> %f" % (tag, tag.id, found.score)
-
- return tags
-
- def create_prefix_phrase(self, toks, field):
- q = MultiPhraseQuery()
- for i in range(len(toks)):
- t = Term(field, toks[i])
- if i == len(toks) - 1:
- pterms = MultiSearch.enum_to_array(PrefixTermEnum(self.searcher.getIndexReader(), t))
- if pterms:
- q.add(pterms)
+ for doc in res:
+ is_pdcounter = doc.get('is_pdcounter', False)
+ category = doc.get('tag_category')
+ try:
+ if is_pdcounter == True:
+ if category == 'pd_author':
+ tag = PDCounterAuthor.objects.get(id=doc.get('tag_id'))
+ elif category == 'pd_book':
+ tag = PDCounterBook.objects.get(id=doc.get('tag_id'))
+ tag.category = 'pd_book' # make it look more lik a tag.
+ else:
+ print "Warning. cannot get pdcounter tag_id=%d from db; cat=%s" % (int(doc.get('tag_id')), category)
else:
- q.add(t)
- else:
- q.add(t)
- return q
-
- @staticmethod
- def term_filter(term, inverse=False):
- only_term = TermsFilter()
- only_term.addTerm(term)
-
- if inverse:
- neg = BooleanFilter()
- neg.add(FilterClause(only_term, BooleanClause.Occur.MUST_NOT))
- only_term = neg
-
- return only_term
+ tag = catalogue.models.Tag.objects.get(id=doc.get("tag_id"))
+ # don't add the pdcounter tag if same tag already exists
- def hint_tags(self, string, max_results=50):
- toks = self.get_tokens(string, field='SIMPLE')
- top = BooleanQuery()
+ tags.append(tag)
- for field in ['tag_name', 'tag_name_pl']:
- q = self.create_prefix_phrase(toks, field)
- top.add(BooleanClause(q, BooleanClause.Occur.SHOULD))
-
- no_book_cat = self.term_filter(Term("tag_category", "book"), inverse=True)
+ except catalogue.models.Tag.DoesNotExist: pass
+ except PDCounterAuthor.DoesNotExist: pass
+ except PDCounterBook.DoesNotExist: pass
- return self.search_tags(top, no_book_cat, max_results=max_results)
+ log.debug('search_tags: %s' % tags)
- def hint_books(self, string, max_results=50):
- toks = self.get_tokens(string, field='SIMPLE')
+ return tags
- q = self.create_prefix_phrase(toks, 'title')
+ def hint_books(self, query, prefix=True):
+ """
+ Returns auto-complete hints for book titles
+ Because we do not index 'pseudo' title-tags.
+ Prefix search.
+ """
+ q = self.index.Q()
+ query = query.strip()
+ if prefix:
+ q |= self.index.Q(title=query + "*")
+ else:
+ q |= self.make_term_query(query, field='title')
+ qu = self.index.query(q)
+ only_books = self.index.Q(is_book=True)
+ return self.search_books(qu, [only_books])
- return self.book_search(q, self.term_filter(Term("is_book", "true")), max_results=max_results)
+ def search_books(self, query, filters=None, max_results=10):
+ """
+ Searches for Book objects using query
+ """
+ bks = []
+ bks_found = set()
+ query = query.query(is_book=True)
+ res = self.apply_filters(query, filters).field_limit(['book_id'])
+ for r in res:
+ try:
+ bid = r['book_id']
+ if not bid in bks_found:
+ bks.append(catalogue.models.Book.objects.get(id=bid))
+ bks_found.add(bid)
+ except catalogue.models.Book.DoesNotExist: pass
+ return bks
+
@staticmethod
- def chain_filters(filters, op=ChainedFilter.AND):
+ def apply_filters(query, filters):
+ """
+ Apply filters to a query
+ """
+ if filters is None: filters = []
filters = filter(lambda x: x is not None, filters)
- if not filters:
- return None
- chf = ChainedFilter(JArray('object')(filters, Filter), op)
- return chf
-
- def filtered_categories(self, tags):
- cats = {}
- for t in tags:
- cats[t.category] = True
- return cats.keys()
-
- def hint(self):
- return Hint(self)
+ for f in filters:
+ query = query.query(f)
+ return query