import os
import re
-import errno
from librarian import dcparser
from librarian.parser import WLDocument
from lxml import etree
import catalogue.models
from pdcounter.models import Author as PDCounterAuthor, BookStub as PDCounterBook
from itertools import chain
-import traceback
-import logging
-log = logging.getLogger('search')
import sunburnt
import custom
import operator
+import logging
+from wolnelektury.utils import makedirs
log = logging.getLogger('search')
+
class SolrIndex(object):
def __init__(self, mode=None):
self.index = custom.CustomSolrInterface(settings.SOLR, mode=mode)
SNIPPET_DIR = "snippets"
def __init__(self, book_id, revision=None):
- try:
- os.makedirs(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR))
- except OSError as exc:
- if exc.errno == errno.EEXIST:
- pass
- else: raise
+ makedirs(os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR))
self.book_id = book_id
self.revision = revision
self.file = None
+ self.position = None
@property
def path(self):
- if self.revision: fn = "%d.%d" % (self.book_id, self.revision)
- else: fn = "%d" % self.book_id
+ if self.revision:
+ fn = "%d.%d" % (self.book_id, self.revision)
+ else:
+ fn = "%d" % self.book_id
return os.path.join(settings.SEARCH_INDEX, self.SNIPPET_DIR, fn)
"""
Open the snippet file. Call .close() afterwards.
"""
- if not 'b' in mode:
+ if 'b' not in mode:
mode += 'b'
if 'w' in mode:
if not remove_only:
# then add them [all or just one passed]
if not tags:
- tags = chain(catalogue.models.Tag.objects.exclude(category='set'), \
- PDCounterAuthor.objects.all(), \
+ tags = chain(
+ catalogue.models.Tag.objects.exclude(category='set'),
+ PDCounterAuthor.objects.all(),
PDCounterBook.objects.all())
for tag in tags:
"""
Create a lucene document referring book id.
"""
- doc = {
- 'book_id': int(book.id),
- }
+ doc = {'book_id': int(book.id)}
if book.parent is not None:
- doc["parent_id"] = int(book.parent.id)
+ doc['parent_id'] = int(book.parent.id)
return doc
def remove_book(self, book_or_id, remove_snippets=True):
self.remove_book(book, remove_snippets=False)
book_doc = self.create_book_doc(book)
- meta_fields = self.extract_metadata(book, book_info, dc_only=['source_name', 'authors', 'translators', 'title'])
+ meta_fields = self.extract_metadata(book, book_info, dc_only=[
+ 'source_name', 'authors', 'translators', 'title', 'epochs', 'kinds', 'genres'])
# let's not index it - it's only used for extracting publish date
if 'source_name' in meta_fields:
del meta_fields['source_name']
'published_date': meta_fields['published_date']
}
- if 'translators' in meta_fields:
- book_fields['translators'] = meta_fields['translators']
+ for tag_name in ('translators', 'epochs', 'kinds', 'genres'):
+ if tag_name in meta_fields:
+ book_fields[tag_name] = meta_fields[tag_name]
self.index_content(book, book_fields=book_fields)
]
ignore_content_tags = [
- 'uwaga', 'extra',
+ 'uwaga', 'extra', 'nota_red',
'zastepnik_tekstu', 'sekcja_asterysk', 'separator_linia', 'zastepnik_wersu',
'didaskalia',
'naglowek_aktu', 'naglowek_sceny', 'naglowek_czesc',
footnote_tags = ['pa', 'pt', 'pr', 'pe']
- skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne', '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF']
+ skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne',
+ '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF']
published_date_re = re.compile("([0-9]+)[\]. ]*$")
book_info = dcparser.parse(open(book.xml_file.path))
fields['slug'] = book.slug
- fields['tags'] = [t.name for t in book.tags]
+ fields['tags'] = [t.name for t in book.tags]
fields['is_book'] = True
# validator, name
match = self.published_date_re.search(book_info.source_name)
if match is not None:
pd = str(match.groups()[0])
- if not pd: pd = ""
+ if not pd:
+ pd = ""
fields["published_date"] = pd
return fields
if master.tag in self.master_tags:
return master
- def index_content(self, book, book_fields={}):
+ def index_content(self, book, book_fields):
"""
Walks the book XML and extract content from it.
Adds parts for each header tag and for each fragment.
if master is None:
return []
- def walker(node, ignore_tags=[]):
-
- if node.tag not in ignore_tags:
+ def walker(node):
+ if node.tag not in self.ignore_content_tags:
yield node, None, None
if node.text is not None:
yield None, node.text, None
return
def fix_format(text):
- # separator = [u" ", u"\t", u".", u";", u","]
+ # separator = [u" ", u"\t", u".", u";", u","]
if isinstance(text, list):
# need to join it first
text = filter(lambda s: s is not None, content)
if 'themes' in fields:
doc['themes'] = fields['themes']
- doc['uid'] = "part%s%s%s" % (doc['header_index'],
- doc['header_span'],
- doc.get('fragment_anchor', ''))
+ doc['uid'] = "part%s-%s-%s-%s" % (
+ book.id, doc['header_index'], doc['header_span'], doc.get('fragment_anchor', ''))
return doc
- def give_me_utf8(s):
- if isinstance(s, unicode):
- return s.encode('utf-8')
- else:
- return s
-
fragments = {}
snippets = Snippets(book.id).open('w')
try:
content.append(text)
handle_text = [all_content]
- for start, text, end in walker(header, ignore_tags=self.ignore_content_tags):
+ for start, text, end in walker(header):
# handle footnotes
if start is not None and start.tag in self.footnote_tags:
footnote = []
# handle fragments and themes.
if start is not None and start.tag == 'begin':
fid = start.attrib['id'][1:]
- fragments[fid] = {'text': [], 'themes': [], 'start_section': position, 'start_header': header.tag}
+ fragments[fid] = {
+ 'text': [], 'themes': [], 'start_section': position, 'start_header': header.tag}
# themes for this fragment
elif start is not None and start.tag == 'motyw':
fid = start.attrib['id'][1:]
- handle_text.append(None)
+ handle_text.append(lambda text: None)
if start.text is not None:
fragments[fid]['themes'] += map(unicode.strip, map(unicode, (start.text.split(','))))
elif end is not None and end.tag == 'motyw':
if fid not in fragments:
continue # a broken <end> node, skip it
frag = fragments[fid]
- if frag['themes'] == []:
+ if not frag['themes']:
continue # empty themes list.
del fragments[fid]
if text is not None and handle_text is not []:
hdl = handle_text[-1]
- if hdl is not None:
- hdl(text)
+ hdl(text)
# in the end, add a section text.
doc = add_part(snippets, header_index=position,
class SearchResult(object):
- def __init__(self, doc, how_found=None, query=None, query_terms=None):
- # self.search = search
+ def __init__(self, doc, how_found=None, query_terms=None):
self.boost = 1.0
self._hits = []
self._processed_hits = None # processed hits
self.snippets = []
self.query_terms = query_terms
+ self._book = None
if 'score' in doc:
self._score = doc['score']
def __unicode__(self):
return u"<SR id=%d %d(%d) hits score=%f %d snippets>" % \
- (self.book_id, len(self._hits), self._processed_hits and len(self._processed_hits) or -1, self._score, len(self.snippets))
+ (self.book_id, len(self._hits),
+ len(self._processed_hits) if self._processed_hits else -1,
+ self._score, len(self.snippets))
def __str__(self):
return unicode(self).encode('utf-8')
return self
def get_book(self):
- if hasattr(self, '_book'):
+ if self._book is not None:
return self._book
self._book = catalogue.models.Book.objects.get(id=self.book_id)
return self._book
# sections not covered by fragments
sect = filter(lambda s: 0 == len(filter(
- lambda f: s[self.POSITION][self.POSITION_INDEX] >= f[self.POSITION][self.POSITION_INDEX]
- and s[self.POSITION][self.POSITION_INDEX] < f[self.POSITION][self.POSITION_INDEX] + f[self.POSITION][self.POSITION_SPAN],
- frags)), sect)
-
- hits = []
+ lambda f: f[self.POSITION][self.POSITION_INDEX] <= s[self.POSITION][self.POSITION_INDEX] <
+ f[self.POSITION][self.POSITION_INDEX] + f[self.POSITION][self.POSITION_SPAN], frags)), sect)
def remove_duplicates(lst, keyfn, compare):
els = {}
m.update(f[self.OTHER])
hits.append(m)
- hits.sort(lambda a, b: cmp(a['score'], b['score']), reverse=True)
+ hits.sort(key=lambda h: h['score'], reverse=True)
self._processed_hits = hits
def snippet_revision(self, idx=0):
try:
return self.hits[idx]['snippets_revision']
- except:
+ except (IndexError, KeyError):
return None
def __init__(self, default_field="text"):
super(Search, self).__init__(mode='r')
-
def make_term_query(self, query, field='text', modal=operator.or_):
"""
Returns term queries joined by boolean query.
modal - applies to boolean query
fuzzy - should the query by fuzzy.
"""
- if query is None: query = ''
+ if query is None:
+ query = ''
q = self.index.Q()
- q = reduce(modal, map(lambda s: self.index.Q(**{field: s}),
- query.split(r" ")), q)
+ q = reduce(modal, map(lambda s: self.index.Q(**{field: s}), query.split(r" ")), q)
return q
- def search_phrase(self, searched, field='text', book=False,
- filters=None,
- snippets=False):
- if filters is None: filters = []
- if book: filters.append(self.index.Q(is_book=True))
-
- q = self.index.query(**{field: searched})
- q = self.apply_filters(q, filters).field_limit(score=True, all_fields=True)
- res = q.execute()
- return [SearchResult(found, how_found=u'search_phrase') for found in res]
-
- def search_some(self, searched, fields, book=True,
- filters=None, snippets=True, query_terms=None):
- assert isinstance(fields, list)
- if filters is None: filters = []
- if book: filters.append(self.index.Q(is_book=True))
-
- query = self.index.Q()
-
- for fld in fields:
- query = self.index.Q(query | self.make_term_query(searched, fld))
-
- query = self.index.query(query)
+ def search_words(self, words, fields, book=True):
+ filters = []
+ for word in words:
+ word_filter = None
+ for field in fields:
+ q = self.index.Q(**{field: word})
+ if word_filter is None:
+ word_filter = q
+ else:
+ word_filter |= q
+ filters.append(word_filter)
+ if book:
+ query = self.index.query(is_book=True)
+ else:
+ query = self.index.query()
query = self.apply_filters(query, filters).field_limit(score=True, all_fields=True)
- res = query.execute()
- return [SearchResult(found, how_found='search_some', query_terms=query_terms) for found in res]
-
-
- def search_everywhere(self, searched, query_terms=None):
- """
- Tries to use search terms to match different fields of book (or its parts).
- E.g. one word can be an author survey, another be a part of the title, and the rest
- are some words from third chapter.
- """
- books = []
- # content only query : themes x content
- q = self.make_term_query(searched, 'text')
- q_themes = self.make_term_query(searched, 'themes_pl')
-
- query = self.index.query(q).query(q_themes).field_limit(score=True, all_fields=True)
- res = query.execute()
-
- for found in res:
- books.append(SearchResult(found, how_found='search_everywhere_themesXcontent', query_terms=query_terms))
-
- # query themes/content x author/title/tags
- in_content = self.index.Q()
- in_meta = self.index.Q()
-
- for fld in ['themes_pl', 'text']:
- in_content |= self.make_term_query(searched, field=fld)
-
- for fld in ['tags', 'authors', 'title']:
- in_meta |= self.make_term_query(searched, field=fld)
-
- q = in_content & in_meta
- res = self.index.query(q).field_limit(score=True, all_fields=True).execute()
-
- for found in res:
- books.append(SearchResult(found, how_found='search_everywhere', query_terms=query_terms))
-
- return books
+ return [SearchResult(found, how_found='search_words', query_terms=words) for found in query.execute()]
def get_snippets(self, searchresult, query, field='text', num=1):
"""
text = snippets.get((int(position),
int(length)))
snip = self.index.highlight(text=text, field=field, q=query)
- snips[idx] = snip
- if snip:
- num -= 1
+ if snip not in snips:
+ snips[idx] = snip
+ if snip:
+ num -= 1
idx += 1
except IOError, e:
- log.error("Cannot open snippet file for book id = %d [rev=%s], %s" % (book_id, revision, e))
+ book = catalogue.models.Book.objects.filter(id=book_id)
+ if not book:
+ log.error("Book does not exist for book id = %d" % book_id)
+ elif not book.get().children.exists():
+ log.error("Cannot open snippet file for book id = %d [rev=%s], %s" % (book_id, revision, e))
return []
finally:
snippets.close()
"""
Search for Tag objects using query.
"""
- if not filters: filters = []
+ if not filters:
+ filters = []
if not pdcounter:
filters.append(~self.index.Q(is_pdcounter=True))
res = self.apply_filters(query, filters).execute()
is_pdcounter = doc.get('is_pdcounter', False)
category = doc.get('tag_category')
try:
- if is_pdcounter == True:
+ if is_pdcounter:
if category == 'pd_author':
tag = PDCounterAuthor.objects.get(id=doc.get('tag_id'))
- elif category == 'pd_book':
+ else: # category == 'pd_book':
tag = PDCounterBook.objects.get(id=doc.get('tag_id'))
tag.category = 'pd_book' # make it look more lik a tag.
- else:
- print ("Warning. cannot get pdcounter tag_id=%d from db; cat=%s" % (int(doc.get('tag_id')), category)).encode('utf-8')
pd_tags.append(tag)
else:
tag = catalogue.models.Tag.objects.get(id=doc.get("tag_id"))
tags.append(tag)
- except catalogue.models.Tag.DoesNotExist: pass
- except PDCounterAuthor.DoesNotExist: pass
- except PDCounterBook.DoesNotExist: pass
+ except catalogue.models.Tag.DoesNotExist:
+ pass
+ except PDCounterAuthor.DoesNotExist:
+ pass
+ except PDCounterBook.DoesNotExist:
+ pass
tags_slugs = set(map(lambda t: t.slug, tags))
- tags = tags + filter(lambda t: not t.slug in tags_slugs, pd_tags)
+ tags = tags + filter(lambda t: t.slug not in tags_slugs, pd_tags)
log.debug('search_tags: %s' % tags)
query = query.strip()
if prefix:
q |= self.index.Q(title=query + "*")
+ q |= self.index.Q(title_orig=query + "*")
else:
q |= self.make_term_query(query, field='title')
+ q |= self.make_term_query(query, field='title_orig')
qu = self.index.query(q)
only_books = self.index.Q(is_book=True)
return self.search_books(qu, [only_books])
for r in res:
try:
bid = r['book_id']
- if not bid in bks_found:
+ if bid not in bks_found:
bks.append(catalogue.models.Book.objects.get(id=bid))
bks_found.add(bid)
- except catalogue.models.Book.DoesNotExist: pass
+ except catalogue.models.Book.DoesNotExist:
+ pass
return bks
-
@staticmethod
def apply_filters(query, filters):
"""
Apply filters to a query
"""
- if filters is None: filters = []
+ if filters is None:
+ filters = []
filters = filter(lambda x: x is not None, filters)
for f in filters:
query = query.query(f)