From 978c513ebce5fec27c77a3f08fea9030aab7473c Mon Sep 17 00:00:00 2001 From: Marcin Koziej Date: Thu, 20 Oct 2011 16:56:06 +0200 Subject: [PATCH] =?utf8?q?wyszukiwanie=20ksi=C4=85=C5=BCek=20dzia=C5=82a?= =?utf8?q?=20-=20narazie=20bez=20agregowania=20fragment=C3=B3w.?= MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit --- apps/search/index.py | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/apps/search/index.py b/apps/search/index.py index d5d696d34..050fc5b49 100644 --- a/apps/search/index.py +++ b/apps/search/index.py @@ -12,7 +12,7 @@ import os import errno from librarian import dcparser from librarian.parser import WLDocument -from catalogue.models import Book +import catalogue.models class WLAnalyzer(PerFieldAnalyzerWrapper): @@ -156,9 +156,11 @@ class Index(IndexStore): # header_index - the 0-indexed position of header element. # content master = self.get_master(root) + if master is None: + return [] + header_docs = [] for header, position in zip(list(master), range(len(master))): - print("header %s @%d" % (header, position)) doc = self.create_book_doc(book) doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position)) doc.add(Field("header_type", header.tag, Field.Store.YES, Field.Index.NOT_ANALYZED)) @@ -181,7 +183,6 @@ class Index(IndexStore): # will contain (framgent id -> { content: [], themes: [] } fragments = {} for start, end in walker(master): - print("%s %s" % (start, end)) if start is not None and start.tag == 'begin': fid = start.attrib['id'][1:] fragments[fid] = {'content': [], 'themes': []} @@ -192,14 +193,29 @@ class Index(IndexStore): fragments[fid]['content'].append(start.tail) elif start is not None and start.tag == 'end': fid = start.attrib['id'][1:] + if fid not in fragments: + continue # a broken node, skip it frag = fragments[fid] del fragments[fid] - print("Fragment %s complete, themes: %s contents: %s" % (fid, frag['themes'], frag['content'])) + + def jstr(l): + return u' '.join(map( + lambda x: x == None and u'(none)' or unicode(x), + l)) + s = u"Fragment %s complete, themes: %s contents: %s" % \ + (fid, jstr(frag['themes']), jstr(frag['content'])) + print(s.encode('utf-8')) doc = self.create_book_doc(book) - doc.add(Field("fragment_anchor", fid, Field.Store.YES, Field.Index.NOT_ANALYZED)) - doc.add(Field("content", u' '.join(filter(lambda s: s is not None, frag['content'])), Field.Store.NO, Field.Index.ANALYZED)) - doc.add(Field("themes", u' '.join(frag['themes']), Field.Store.NO, Field.Index.ANALYZED)) + doc.add(Field("fragment_anchor", fid, + Field.Store.YES, Field.Index.NOT_ANALYZED)) + doc.add(Field("content", + u' '.join(filter(lambda s: s is not None, frag['content'])), + Field.Store.NO, Field.Index.ANALYZED)) + doc.add(Field("themes", + u' '.join(filter(lambda s: s is not None, frag['themes'])), + Field.Store.NO, Field.Index.ANALYZED)) + fragment_docs.append(doc) elif start is not None: for frag in fragments.values(): @@ -264,7 +280,7 @@ class Search(IndexStore): bks = [] for found in tops.scoreDocs: doc = self.searcher.doc(found.doc) - bks.append(Book.objects.get(id=doc.get("book_id"))) + bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) return (bks, tops.totalHits) def search(self, query, max_results=50): @@ -275,7 +291,7 @@ class Search(IndexStore): bks = [] for found in tops.scoreDocs: doc = self.searcher.doc(found.doc) - bks.append(Book.objects.get(id=doc.get("book_id"))) + bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) return (bks, tops.totalHits) def bsearch(self, query, max_results=50): @@ -288,5 +304,5 @@ class Search(IndexStore): bks = [] for found in tops.scoreDocs: doc = self.searcher.doc(found.doc) - bks.append(Book.objects.get(id=doc.get("book_id"))) + bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) return (bks, tops.totalHits) -- 2.20.1