+ def search_perfect(self, tokens, max_results=20):
+ qrys = [self.make_phrase(tokens, field=fld) for fld in ['author', 'title', 'content']]
+
+ books = []
+ for q in qrys:
+ top = self.searcher.search(q, max_results)
+ for found in top.scoreDocs:
+ book_info = self.searcher.doc(found.doc)
+ books.append((found.score, catalogue.models.Book.objects.get(id=book_info.get("book_id")), []))
+ return books
+
+ def search_everywhere(self, tokens, max_results=20):
+ q = BooleanQuery()
+ in_meta = BooleanQuery()
+ in_content = BooleanQuery()
+
+ for fld in ['themes', 'content']:
+ in_content.add(BooleanClause(self.make_term_query(tokens, field=fld), BooleanClause.Occur.SHOULD))
+
+ for fld in ['author', 'title', 'epochs', 'genres', 'kinds']:
+ in_meta.add(BooleanClause(self.make_term_query(tokens, field=fld), BooleanClause.Occur.SHOULD))
+
+ q.add(BooleanClause(in_meta, BooleanClause.Occur.MUST))
+ in_content_join = self.content_query(in_content)
+ q.add(BooleanClause(in_content_join, BooleanClause.Occur.MUST))
+
+ collector = BlockJoinCollector(Sort.RELEVANCE, 100, True, True)
+
+ self.searcher.search(q, collector)
+
+ books = []
+
+ top_groups = collector.getTopGroups(in_content_join, Sort.RELEVANCE, 0, max_results, 0, True)
+ if top_groups:
+ for grp in top_groups.groups:
+ doc_id = Integer.cast_(grp.groupValue).intValue()
+ book_data = self.searcher.doc(doc_id)
+ book = catalogue.models.Book.objects.get(id=book_data.get("book_id"))
+ parts = []
+ for part in grp.scoreDocs:
+ part_data = self.searcher.doc(part.doc)
+ header_type = part_data.get("header_type")
+ if header_type:
+ parts.append((part.score, {"header": header_type, "position": int(part_data.get("header_index"))}))
+ fragment = part_data.get("fragment_anchor")
+ if fragment:
+ fragment = book.fragments.get(anchor=fragment)
+ parts.append((part.score, {"fragment": fragment}))
+ books.append((grp.maxScore, book, parts))
+
+ return books
+
+