+ def get_book(self):
+ if hasattr(self, '_book'):
+ return self._book
+ self._book = catalogue.models.Book.objects.get(id=self.book_id)
+ return self._book
+
+ book = property(get_book)
+
+ POSITION = 0
+ FRAGMENT = 1
+ POSITION_INDEX = 1
+ POSITION_SPAN = 2
+ SCORE = 2
+ OTHER = 3
+
+ @property
+ def hits(self):
+ if self._processed_hits is not None:
+ return self._processed_hits
+
+ # to sections and fragments
+ frags = filter(lambda r: r[self.FRAGMENT] is not None, self._hits)
+
+ sect = filter(lambda r: r[self.FRAGMENT] is None, self._hits)
+
+ # sections not covered by fragments
+ sect = filter(lambda s: 0 == len(filter(
+ lambda f: s[self.POSITION][self.POSITION_INDEX] >= f[self.POSITION][self.POSITION_INDEX]
+ and s[self.POSITION][self.POSITION_INDEX] < f[self.POSITION][self.POSITION_INDEX] + f[self.POSITION][self.POSITION_SPAN],
+ frags)), sect)
+
+ hits = []
+
+ def remove_duplicates(lst, keyfn, compare):
+ els = {}
+ for e in lst:
+ eif = keyfn(e)
+ if eif in els:
+ if compare(els[eif], e) >= 1:
+ continue
+ els[eif] = e
+ return els.values()
+
+ # remove fragments with duplicated fid's and duplicated snippets
+ frags = remove_duplicates(frags, lambda f: f[self.FRAGMENT], lambda a, b: cmp(a[self.SCORE], b[self.SCORE]))
+ # frags = remove_duplicates(frags, lambda f: f[OTHER]['snippet_pos'] and f[OTHER]['snippet_pos'] or f[FRAGMENT],
+ # lambda a, b: cmp(a[SCORE], b[SCORE]))
+
+ # remove duplicate sections
+ sections = {}
+
+ for s in sect:
+ si = s[self.POSITION][self.POSITION_INDEX]
+ # skip existing
+ if si in sections:
+ if sections[si]['score'] >= s[self.SCORE]:
+ continue
+
+ m = {'score': s[self.SCORE],
+ 'section_number': s[self.POSITION][self.POSITION_INDEX] + 1,
+ }
+ m.update(s[self.OTHER])
+ sections[si] = m
+
+ hits = sections.values()
+
+ for f in frags:
+ try:
+ frag = catalogue.models.Fragment.objects.get(anchor=f[self.FRAGMENT], book__id=self.book_id)
+ except catalogue.models.Fragment.DoesNotExist:
+ # stale index
+ continue
+ # Figure out if we were searching for a token matching some word in theme name.
+ themes = frag.tags.filter(category='theme')
+ themes_hit = set()
+ if self.query_terms is not None:
+ for i in range(0, len(f[self.OTHER]['themes'])):
+ tms = f[self.OTHER]['themes'][i].split(r' +') + f[self.OTHER]['themes_pl'][i].split(' ')
+ tms = map(unicode.lower, tms)
+ for qt in self.query_terms:
+ if qt in tms:
+ themes_hit.add(f[self.OTHER]['themes'][i])
+ break
+
+ def theme_by_name(n):
+ th = filter(lambda t: t.name == n, themes)
+ if th:
+ return th[0]
+ else:
+ return None
+ themes_hit = filter(lambda a: a is not None, map(theme_by_name, themes_hit))
+
+ m = {'score': f[self.SCORE],
+ 'fragment': frag,
+ 'section_number': f[self.POSITION][self.POSITION_INDEX] + 1,
+ 'themes': themes,
+ 'themes_hit': themes_hit
+ }
+ m.update(f[self.OTHER])
+ hits.append(m)
+
+ hits.sort(lambda a, b: cmp(a['score'], b['score']), reverse=True)
+
+ self._processed_hits = hits
+
+ return hits
+
+ @staticmethod
+ def aggregate(*result_lists):
+ books = {}
+ for rl in result_lists:
+ for r in rl:
+ if r.book_id in books:
+ books[r.book_id].merge(r)
+ else:
+ books[r.book_id] = r
+ return books.values()
+
+ def __cmp__(self, other):
+ c = cmp(self.score, other.score)
+ if c == 0:
+ # this is inverted, because earlier date is better
+ return cmp(other.published_date, self.published_date)
+ else:
+ return c
+
+ def __len__(self):
+ return len(self.hits)
+
+ def snippet_pos(self, idx=0):
+ return self.hits[idx]['snippets_pos']
+
+ def snippet_revision(self, idx=0):
+ try:
+ return self.hits[idx]['snippets_revision']
+ except:
+ return None
+
+
+class Search(SolrIndex):
+ """
+ Search facilities.
+ """
+ def __init__(self, default_field="text"):
+ super(Search, self).__init__(mode='r')
+
+ # def get_tokens(self, searched, field='text', cached=None):
+ # """returns tokens analyzed by a proper (for a field) analyzer
+ # argument can be: StringReader, string/unicode, or tokens. In the last case
+ # they will just be returned (so we can reuse tokens, if we don't change the analyzer)
+ # """
+ # if cached is not None and field in cached:
+ # return cached[field]
+
+ # if isinstance(searched, str) or isinstance(searched, unicode):
+ # searched = StringReader(searched)
+ # elif isinstance(searched, list):
+ # return searched
+
+ # searched.reset()
+ # tokens = self.analyzer.reusableTokenStream(field, searched)
+ # toks = []
+ # while tokens.incrementToken():
+ # cta = tokens.getAttribute(CharTermAttribute.class_)
+ # toks.append(cta.toString())
+
+ # if cached is not None:
+ # cached[field] = toks
+
+ # return toks
+
+ # @staticmethod
+ # def fuzziness(fuzzy):
+ # """Helper method to sanitize fuzziness"""
+ # if not fuzzy:
+ # return None
+ # if isinstance(fuzzy, float) and fuzzy > 0.0 and fuzzy <= 1.0:
+ # return fuzzy
+ # else:
+ # return 0.5
+
+ # def make_phrase(self, tokens, field='text', slop=2, fuzzy=False):
+ # """
+ # Return a PhraseQuery with a series of tokens.
+ # """
+ # if fuzzy:
+ # phrase = MultiPhraseQuery()
+ # for t in tokens:
+ # term = Term(field, t)
+ # fuzzterm = FuzzyTermEnum(self.searcher.getIndexReader(), term, self.fuzziness(fuzzy))
+ # fuzzterms = []
+
+ # while True:
+ # ft = fuzzterm.term()
+ # if ft:
+ # fuzzterms.append(ft)
+ # if not fuzzterm.next(): break
+ # if fuzzterms:
+ # phrase.add(JArray('object')(fuzzterms, Term))
+ # else:
+ # phrase.add(term)
+ # else:
+ # phrase = PhraseQuery()
+ # phrase.setSlop(slop)
+ # for t in tokens:
+ # term = Term(field, t)
+ # phrase.add(term)
+ # return phrase
+
+ def make_term_query(self, query, field='text', modal=operator.or_):
+ """
+ Returns term queries joined by boolean query.
+ modal - applies to boolean query
+ fuzzy - should the query by fuzzy.
+ """
+ if query is None: query = ''
+ q = self.index.Q()
+ q = reduce(modal, map(lambda s: self.index.Q(**{field: s}),
+ query.split(r" ")), q)
+
+ return q
+
+ def search_phrase(self, searched, field='text', book=False,
+ filters=None,
+ snippets=False):
+ if filters is None: filters = []
+ if book: filters.append(self.index.Q(is_book=True))
+
+ q = self.index.query(**{field: searched})
+ q = self.apply_filters(q, filters).field_limit(score=True, all_fields=True)
+ res = q.execute()
+ return [SearchResult(found, how_found=u'search_phrase') for found in res]
+
+ def search_some(self, searched, fields, book=True,
+ filters=None, snippets=True, query_terms=None):
+ assert isinstance(fields, list)
+ if filters is None: filters = []
+ if book: filters.append(self.index.Q(is_book=True))
+
+ query = self.index.Q()
+
+ for fld in fields:
+ query = self.index.Q(query | self.make_term_query(searched, fld))
+
+ query = self.index.query(query)
+ query = self.apply_filters(query, filters).field_limit(score=True, all_fields=True)
+ res = query.execute()
+ return [SearchResult(found, how_found='search_some', query_terms=query_terms) for found in res]
+
+ # def search_perfect_book(self, searched, max_results=20, fuzzy=False, hint=None):
+ # """
+ # Search for perfect book matches. Just see if the query matches with some author or title,
+ # taking hints into account.
+ # """
+ # fields_to_search = ['authors', 'title']
+ # only_in = None
+ # if hint:
+ # if not hint.should_search_for_book():
+ # return []
+ # fields_to_search = hint.just_search_in(fields_to_search)
+ # only_in = hint.book_filter()
+
+ # qrys = [self.make_phrase(self.get_tokens(searched, field=fld), field=fld, fuzzy=fuzzy) for fld in fields_to_search]
+
+ # books = []
+ # for q in qrys:
+ # top = self.searcher.search(q,
+ # self.chain_filters([only_in, self.term_filter(Term('is_book', 'true'))]),
+ # max_results)
+ # for found in top.scoreDocs:
+ # books.append(SearchResult(self, found, how_found="search_perfect_book"))
+ # return books
+
+ # def search_book(self, searched, max_results=20, fuzzy=False, hint=None):
+ # fields_to_search = ['tags', 'authors', 'title']
+
+ # only_in = None
+ # if hint:
+ # if not hint.should_search_for_book():
+ # return []
+ # fields_to_search = hint.just_search_in(fields_to_search)
+ # only_in = hint.book_filter()