X-Git-Url: https://git.mdrn.pl/wolnelektury.git/blobdiff_plain/71209be8f9c399340bddb819f71e99ecf116187b..f3fc3aba5b7a4fe8ba5f594a6e807eb96677674f:/apps/search/index.py diff --git a/apps/search/index.py b/apps/search/index.py index 33836ad37..6d97047a0 100644 --- a/apps/search/index.py +++ b/apps/search/index.py @@ -276,7 +276,7 @@ class Index(BaseIndex): footnote_tags = ['pa', 'pt', 'pr', 'pe'] - skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne'] + skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne', '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF'] published_date_re = re.compile("([0-9]+)[\]. ]*$") @@ -323,7 +323,6 @@ class Index(BaseIndex): # get published date source = book_info.source_name match = self.published_date_re.search(source) - print("published date is %s %s" % (match, match is not None and match.groups())) if match is not None: fields["published_date"] = Field("published_date", str(match.groups()[0]), Field.Store.YES, Field.Index.NOT_ANALYZED) @@ -426,9 +425,8 @@ class Index(BaseIndex): fragments = {} snippets = Snippets(book.id).open('w') - position = 0 try: - for header in list(master): + for header, position in zip(list(master), range(len(master))): if header.tag in self.skip_header_tags: continue @@ -441,15 +439,15 @@ class Index(BaseIndex): for start, end in walker(header, ignore_tags=self.ignore_content_tags): # handle footnotes - if start is not None and start.tag in self.footnote_tags: - footnote = ' '.join(start.itertext()) - elif end is not None and footnote is not None and end.tag in self.footnote_tags: - doc = add_part(snippets, header_index=position, header_type=header.tag, - content=footnote) + # if start is not None and start.tag in self.footnote_tags: + # footnote = ' '.join(start.itertext()) + # elif end is not None and footnote is not None and end.tag in self.footnote_tags: + # doc = add_part(snippets, header_index=position, header_type=header.tag, + # content=footnote) - self.index.addDocument(doc) + # self.index.addDocument(doc) - footnote = None + # footnote = None # handle fragments and themes. if start is not None and start.tag == 'begin': @@ -496,7 +494,6 @@ class Index(BaseIndex): content=fix_format(content)) self.index.addDocument(doc) - position += 1 finally: snippets.close() @@ -619,7 +616,6 @@ class SearchResult(object): pd = stored.get("published_date") if pd is None: - print "published_date is none for book %d" % self.book_id pd = 0 self.published_date = int(pd) @@ -642,7 +638,7 @@ class SearchResult(object): raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id)) self._hits += other._hits if other.score > self.score: - self.score = other.score + self._score = other._score return self def get_book(self): @@ -714,7 +710,6 @@ class SearchResult(object): tokens = self.search.get_tokens(self.searched, 'POLISH', cached=self.tokens_cache) for theme in themes: name_tokens = self.search.get_tokens(theme.name, 'POLISH') - print "THEME HIT: %s in %s" % (tokens, name_tokens) for t in tokens: if t in name_tokens: if not theme in themes_hit: @@ -967,13 +962,13 @@ class Search(IndexStore): return q def search_phrase(self, searched, field, book=True, max_results=20, fuzzy=False, - filters=None, tokens_cache=None, boost=None, snippets=False): + filters=None, tokens_cache=None, boost=None, snippets=False, slop=2): if filters is None: filters = [] if tokens_cache is None: tokens_cache = {} tokens = self.get_tokens(searched, field, cached=tokens_cache) - query = self.make_phrase(tokens, field=field, fuzzy=fuzzy) + query = self.make_phrase(tokens, field=field, fuzzy=fuzzy, slop=slop) if book: filters.append(self.term_filter(Term('is_book', 'true'))) top = self.searcher.search(query, self.chain_filters(filters), max_results) @@ -981,7 +976,7 @@ class Search(IndexStore): return [SearchResult(self, found, snippets=(snippets and self.get_snippets(found, query) or None), searched=searched) for found in top.scoreDocs] def search_some(self, searched, fields, book=True, max_results=20, fuzzy=False, - filters=None, tokens_cache=None, boost=None): + filters=None, tokens_cache=None, boost=None, snippets=True): if filters is None: filters = [] if tokens_cache is None: tokens_cache = {} @@ -999,7 +994,7 @@ class Search(IndexStore): top = self.searcher.search(query, self.chain_filters(filters), max_results) return [SearchResult(self, found, searched=searched, tokens_cache=tokens_cache, - snippets=self.get_snippets(found, query)) for found in top.scoreDocs] + snippets=(snippets and self.get_snippets(found, query) or None)) for found in top.scoreDocs] def search_perfect_book(self, searched, max_results=20, fuzzy=False, hint=None): """