infopages fixtures
[wolnelektury.git] / apps / search / index.py
index d63d3f8..8ea3124 100644 (file)
@@ -331,10 +331,13 @@ class Index(BaseIndex):
                                                (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED)
 
         # get published date
-        source = book_info.source_name
-        match = self.published_date_re.search(source)
-        if match is not None:
-            fields["published_date"] = Field("published_date", str(match.groups()[0]), Field.Store.YES, Field.Index.NOT_ANALYZED)
+        pd = None
+        if hasattr(book_info, 'source_name') and book_info.source_name:
+            match = self.published_date_re.search(book_info.source_name)
+            if match is not None:
+                pd = str(match.groups()[0])
+        if not pd: pd = ""
+        fields["published_date"] = Field("published_date", pd, Field.Store.YES, Field.Index.NOT_ANALYZED)
 
         return fields
 
@@ -369,11 +372,18 @@ class Index(BaseIndex):
             return []
 
         def walker(node, ignore_tags=[]):
-            yield node, None
-            for child in filter(lambda n: n.tag not in ignore_tags, list(node)):
-                for b, e in walker(child):
-                    yield b, e
-            yield None, node
+
+            if node.tag not in ignore_tags:
+                yield node, None, None
+                if node.text is not None:
+                    yield None, node.text, None
+                for child in list(node):
+                    for b, t, e in walker(child):
+                        yield b, t, e
+                yield None, None, node
+
+            if node.tail is not None:
+                yield None, node.tail, None
             return
 
         def fix_format(text):
@@ -445,35 +455,50 @@ class Index(BaseIndex):
 
                 # section content
                 content = []
-                footnote = None
-
-                for start, end in walker(header, ignore_tags=self.ignore_content_tags):
-                    # handle footnotes
-                    # if start is not None and start.tag in self.footnote_tags:
-                    #     footnote = ' '.join(start.itertext())
-                    # elif end is not None and footnote is not None and end.tag in self.footnote_tags:
-                    #     doc = add_part(snippets, header_index=position, header_type=header.tag,
-                    #                    content=footnote)
+                footnote = []
 
-                    #     self.index.addDocument(doc)
+                def all_content(text):
+                    for frag in fragments.values():
+                        frag['content'].append(text)
+                    content.append(text)
+                handle_text = [all_content]
 
-                    #     footnote = None
 
+                for start, text, end in walker(header, ignore_tags=self.ignore_content_tags):
+                    # handle footnotes
+                    if start is not None and start.tag in self.footnote_tags:
+                        footnote = []
+                        def collect_footnote(t):
+                            footnote.append(t)
+                        handle_text.append(collect_footnote)
+                    elif end is not None and footnote is not [] and end.tag in self.footnote_tags:
+                        handle_text.pop()
+                        doc = add_part(snippets, header_index=position, header_type=header.tag,
+                                       content=u''.join(footnote),
+                                       is_footnote=Field("is_footnote", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED))
+                
+                        self.index.addDocument(doc)
+                        #print "@ footnote text: %s" % footnote
+                        footnote = []
+                    
                     # handle fragments and themes.
                     if start is not None and start.tag == 'begin':
                         fid = start.attrib['id'][1:]
                         fragments[fid] = {'content': [], 'themes': [], 'start_section': position, 'start_header': header.tag}
 
+                    # themes for this fragment
                     elif start is not None and start.tag == 'motyw':
                         fid = start.attrib['id'][1:]
+                        handle_text.append(None)
                         if start.text is not None:
                             fragments[fid]['themes'] += map(str.strip, map(give_me_utf8, start.text.split(',')))
+                    elif end is not None and end.tag == 'motyw':
+                        handle_text.pop()
 
                     elif start is not None and start.tag == 'end':
                         fid = start.attrib['id'][1:]
                         if fid not in fragments:
                             continue  # a broken <end> node, skip it
-                                      #                        import pdb; pdb.set_trace()
                         frag = fragments[fid]
                         if frag['themes'] == []:
                             continue  # empty themes list.
@@ -486,22 +511,20 @@ class Index(BaseIndex):
                                        fragment_anchor=fid,
                                        content=fix_format(frag['content']),
                                        themes=frag['themes'])
-
+                        #print '@ FRAG %s' % frag['content']
                         self.index.addDocument(doc)
 
                         # Collect content.
-                    elif start is not None:
-                        for frag in fragments.values():
-                            frag['content'].append(start.text)
-                        content.append(start.text)
-                    elif end is not None:
-                        for frag in fragments.values():
-                            frag['content'].append(end.tail)
-                        content.append(end.tail)
+
+                    if text is not None and handle_text is not []:
+                        hdl = handle_text[-1]
+                        if hdl is not None:
+                            hdl(text)
 
                         # in the end, add a section text.
                 doc = add_part(snippets, header_index=position, header_type=header.tag,
                                content=fix_format(content))
+                #print '@ CONTENT: %s' % fix_format(content)
 
                 self.index.addDocument(doc)
 
@@ -531,7 +554,7 @@ class ReusableIndex(Index):
     index = None
 
     def open(self, analyzer=None, threads=4):
-        if ReusableIndex.index is not None:
+        if ReusableIndex.index:
             self.index = ReusableIndex.index
         else:
             print("opening index")
@@ -545,13 +568,15 @@ class ReusableIndex(Index):
 
     @staticmethod
     def close_reusable():
-        if ReusableIndex.index is not None:
+        if ReusableIndex.index:
+            print("closing index")
             ReusableIndex.index.optimize()
             ReusableIndex.index.close()
             ReusableIndex.index = None
 
     def close(self):
-        pass
+        if ReusableIndex.index:
+            ReusableIndex.index.commit()
 
 
 class JoinSearch(object):
@@ -758,8 +783,6 @@ class SearchResult(object):
     def __cmp__(self, other):
         c = cmp(self.score, other.score)
         if c == 0:
-            if not hasattr(other,'published_date') or not hasattr(self, 'published_date'):
-                import pdb; pdb.set_trace()
             # this is inverted, because earlier date is better
             return cmp(other.published_date, self.published_date)
         else:
@@ -1189,17 +1212,25 @@ class Search(IndexStore):
         if position is None or length is None:
             return None
         # locate content.
-        snippets = Snippets(stored.get('book_id')).open()
+        book_id = int(stored.get('book_id'))
+        snippets = Snippets(book_id).open()
         try:
-            text = snippets.get((int(position),
-                                 int(length)))
-        finally:
-            snippets.close()
+            try:
+                text = snippets.get((int(position),
+                                     int(length)))
+            finally:
+                snippets.close()
 
-        tokenStream = TokenSources.getAnyTokenStream(self.searcher.getIndexReader(), scoreDoc.doc, field, self.analyzer)
-        #  highlighter.getBestTextFragments(tokenStream, text, False, 10)
-        snip = highlighter.getBestFragments(tokenStream, text, 3, "...")
+            tokenStream = TokenSources.getAnyTokenStream(self.searcher.getIndexReader(), scoreDoc.doc, field, self.analyzer)
+            #  highlighter.getBestTextFragments(tokenStream, text, False, 10)
+            snip = highlighter.getBestFragments(tokenStream, text, 3, "...")
 
+        except Exception, e:
+            e2 = e
+            if hasattr(e, 'getJavaException'):
+                e2 = unicode(e.getJavaException())
+            raise Exception("Problem fetching snippets for book %d, @%d len=%d" % (book_id, int(position), int(length)),
+                e2)
         return snip
 
     @staticmethod
@@ -1279,7 +1310,7 @@ class Search(IndexStore):
 
         return only_term
 
-    def hint_tags(self, string, max_results=50, pdcounter=True, prefix=True):
+    def hint_tags(self, string, max_results=50, pdcounter=True, prefix=True, fuzzy=False):
         """
         Return auto-complete hints for tags
         using prefix search.
@@ -1291,14 +1322,14 @@ class Search(IndexStore):
             if prefix:
                 q = self.make_prefix_phrase(toks, field)
             else:
-                q = self.make_term_query(toks, field)
+                q = self.make_term_query(toks, field, fuzzy=fuzzy)
             top.add(BooleanClause(q, BooleanClause.Occur.SHOULD))
 
         no_book_cat = self.term_filter(Term("tag_category", "book"), inverse=True)
 
         return self.search_tags(top, no_book_cat, max_results=max_results, pdcounter=pdcounter)
 
-    def hint_books(self, string, max_results=50, prefix=True):
+    def hint_books(self, string, max_results=50, prefix=True, fuzzy=False):
         """
         Returns auto-complete hints for book titles
         Because we do not index 'pseudo' title-tags.
@@ -1309,7 +1340,7 @@ class Search(IndexStore):
         if prefix:
             q = self.make_prefix_phrase(toks, 'title')
         else:
-            q = self.make_term_query(toks, 'title')
+            q = self.make_term_query(toks, 'title', fuzzy=fuzzy)
 
         return self.search_books(q, self.term_filter(Term("is_book", "true")), max_results=max_results)