first search demo
authorMarcin Koziej <marcin.koziej@nowoczesnapolska.org.pl>
Thu, 24 Nov 2011 16:08:36 +0000 (17:08 +0100)
committerMarcin Koziej <marcin.koziej@nowoczesnapolska.org.pl>
Thu, 24 Nov 2011 16:08:36 +0000 (17:08 +0100)
reusable index. first attempt at rich search query
async indexing, fixes
multisearch fails to see joined queries -> will try to use one toplevel
store contents in index for highlighting fragments
aggregate & display results
fixed multithreading bug with attaching to jvm
log parallel job exceptions
don't index tags from db.
paralell indexing

.gitignore
apps/catalogue/models.py
apps/search/__init__.py
apps/search/index.py
apps/search/urls.py [new file with mode: 0644]
apps/search/views.py [new file with mode: 0644]
requirements.txt
wolnelektury/settings.py
wolnelektury/templates/newsearch/search.html [new file with mode: 0644]
wolnelektury/urls.py

index c375441..230a876 100644 (file)
@@ -33,3 +33,5 @@ thumbs.db
 
 # Tags file
 TAGS
 
 # Tags file
 TAGS
+
+media
index 5846f53..1a2e8f8 100644 (file)
@@ -626,8 +626,19 @@ class Book(models.Model):
         return result.wait()
 
     def search_index(self):
         return result.wait()
 
     def search_index(self):
-        with search.Index() as idx:
+        if settings.SEARCH_INDEX_PARALLEL:
+            if instance(settings.SEARCH_INDEX_PARALLEL, int):
+                idx = search.ReusableIndex(threads=4)
+            else:
+                idx = search.ReusableIndex()
+        else:
+            idx = search.Index()
+            
+        idx.open()
+        try:
             idx.index_book(self)
             idx.index_book(self)
+        finally:
+            idx.close()
 
     @classmethod
     def from_xml_file(cls, xml_file, **kwargs):
 
     @classmethod
     def from_xml_file(cls, xml_file, **kwargs):
index f45c150..a3f8e9e 100644 (file)
@@ -1,3 +1,3 @@
-from index import Index, Search
 import lucene
 import lucene
-lucene.initVM(lucene.CLASSPATH)
+
+from index import Index, Search, ReusableIndex, MultiSearch, SearchResult, JVM
index 050fc5b..af3dd9a 100644 (file)
@@ -1,18 +1,29 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
+
 from django.conf import settings
 from lucene import SimpleFSDirectory, IndexWriter, File, Field, \
     NumericField, Version, Document, JavaError, IndexSearcher, \
 from django.conf import settings
 from lucene import SimpleFSDirectory, IndexWriter, File, Field, \
     NumericField, Version, Document, JavaError, IndexSearcher, \
-    QueryParser, Term, PerFieldAnalyzerWrapper, \
+    QueryParser, PerFieldAnalyzerWrapper, \
     SimpleAnalyzer, PolishAnalyzer, ArrayList, \
     KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \
     BlockJoinQuery, BlockJoinCollector, TermsFilter, \
     SimpleAnalyzer, PolishAnalyzer, ArrayList, \
     KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \
     BlockJoinQuery, BlockJoinCollector, TermsFilter, \
-    HashSet, BooleanClause, Term
+    HashSet, BooleanClause, Term, CharTermAttribute, \
+    PhraseQuery, MultiPhraseQuery, StringReader, TermQuery, BlockJoinQuery, \
+    FuzzyQuery, FuzzyTermEnum, Sort, Integer, \
+    SimpleHTMLFormatter, Highlighter, QueryScorer, TokenSources, TextFragment, \
+    initVM, CLASSPATH, JArray
     # KeywordAnalyzer
     # KeywordAnalyzer
+JVM = initVM(CLASSPATH)
+import sys
 import os
 import errno
 from librarian import dcparser
 from librarian.parser import WLDocument
 import catalogue.models
 import os
 import errno
 from librarian import dcparser
 from librarian.parser import WLDocument
 import catalogue.models
+from multiprocessing.pool import ThreadPool
+from threading import current_thread
+import atexit
+import traceback
 
 
 class WLAnalyzer(PerFieldAnalyzerWrapper):
 
 
 class WLAnalyzer(PerFieldAnalyzerWrapper):
@@ -34,7 +45,9 @@ class WLAnalyzer(PerFieldAnalyzerWrapper):
         self.addAnalyzer("author", simple)
         self.addAnalyzer("is_book", keyword)
 
         self.addAnalyzer("author", simple)
         self.addAnalyzer("is_book", keyword)
 
-        #self.addanalyzer("fragment_anchor", keyword)
+        self.addAnalyzer("KEYWORD", keyword)
+        self.addAnalyzer("SIMPLE", simple)
+        self.addAnalyzer("NATURAL", polish)
 
 
 class IndexStore(object):
 
 
 class IndexStore(object):
@@ -72,7 +85,7 @@ class Index(IndexStore):
         self.index = None
 
     def remove_book(self, book):
         self.index = None
 
     def remove_book(self, book):
-        q = NumericRangeQuery.newIntRange("book_id", book.id, book.id, True,True)
+        q = NumericRangeQuery.newIntRange("book_id", book.id, book.id, True, True)
         self.index.deleteDocuments(q)
 
     def index_book(self, book, overwrite=True):
         self.index.deleteDocuments(q)
 
     def index_book(self, book, overwrite=True):
@@ -97,6 +110,8 @@ class Index(IndexStore):
         'wywiad'
         ]
 
         'wywiad'
         ]
 
+    skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne']
+
     def create_book_doc(self, book):
         """
         Create a lucene document connected to the book
     def create_book_doc(self, book):
         """
         Create a lucene document connected to the book
@@ -110,6 +125,8 @@ class Index(IndexStore):
     def extract_metadata(self, book):
         book_info = dcparser.parse(book.xml_file)
 
     def extract_metadata(self, book):
         book_info = dcparser.parse(book.xml_file)
 
+        print("extract metadata for book %s id=%d, thread%d" % (book.slug, book.id, current_thread().ident))
+
         doc = self.create_book_doc(book)
         doc.add(Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS))
         doc.add(Field("tags", ','.join([t.name for t in book.tags]), Field.Store.NO, Field.Index.ANALYZED))
         doc = self.create_book_doc(book)
         doc.add(Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS))
         doc.add(Field("tags", ','.join([t.name for t in book.tags]), Field.Store.NO, Field.Index.ANALYZED))
@@ -158,14 +175,16 @@ class Index(IndexStore):
         master = self.get_master(root)
         if master is None:
             return []
         master = self.get_master(root)
         if master is None:
             return []
-        
+
         header_docs = []
         for header, position in zip(list(master), range(len(master))):
         header_docs = []
         for header, position in zip(list(master), range(len(master))):
+            if header.tag in self.skip_header_tags:
+                continue
             doc = self.create_book_doc(book)
             doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position))
             doc.add(Field("header_type", header.tag, Field.Store.YES, Field.Index.NOT_ANALYZED))
             content = u' '.join([t for t in header.itertext()])
             doc = self.create_book_doc(book)
             doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position))
             doc.add(Field("header_type", header.tag, Field.Store.YES, Field.Index.NOT_ANALYZED))
             content = u' '.join([t for t in header.itertext()])
-            doc.add(Field("content", content, Field.Store.NO, Field.Index.ANALYZED))
+            doc.add(Field("content", content, Field.Store.YES, Field.Index.ANALYZED))
             header_docs.append(doc)
 
         def walker(node):
             header_docs.append(doc)
 
         def walker(node):
@@ -202,16 +221,13 @@ class Index(IndexStore):
                     return u' '.join(map(
                         lambda x: x == None and u'(none)' or unicode(x),
                         l))
                     return u' '.join(map(
                         lambda x: x == None and u'(none)' or unicode(x),
                         l))
-                s = u"Fragment %s complete, themes: %s contents: %s" % \
-                      (fid, jstr(frag['themes']), jstr(frag['content']))
-                print(s.encode('utf-8'))
 
                 doc = self.create_book_doc(book)
                 doc.add(Field("fragment_anchor", fid,
                               Field.Store.YES, Field.Index.NOT_ANALYZED))
                 doc.add(Field("content",
                               u' '.join(filter(lambda s: s is not None, frag['content'])),
 
                 doc = self.create_book_doc(book)
                 doc.add(Field("fragment_anchor", fid,
                               Field.Store.YES, Field.Index.NOT_ANALYZED))
                 doc.add(Field("content",
                               u' '.join(filter(lambda s: s is not None, frag['content'])),
-                              Field.Store.NO, Field.Index.ANALYZED))
+                              Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS))
                 doc.add(Field("themes",
                               u' '.join(filter(lambda s: s is not None, frag['themes'])),
                               Field.Store.NO, Field.Index.ANALYZED))
                 doc.add(Field("themes",
                               u' '.join(filter(lambda s: s is not None, frag['themes'])),
                               Field.Store.NO, Field.Index.ANALYZED))
@@ -234,10 +250,67 @@ class Index(IndexStore):
         self.close()
 
 
         self.close()
 
 
+def log_exception_wrapper(f):
+    def _wrap(*a):
+        try:
+            f(*a)
+        except Exception, e:
+            print("Error in indexing thread: %s" % e)
+            traceback.print_exc()
+            raise e
+    return _wrap
+
+
+class ReusableIndex(Index):
+    """
+    Works like index, but does not close/optimize Lucene index
+    until program exit (uses atexit hook).
+    This is usefull for importbooks command.
+
+    if you cannot rely on atexit, use ReusableIndex.close_reusable() yourself.
+    """
+    index = None
+    pool = None
+    pool_jobs = None
+
+    def open(self, analyzer=None, threads=4):
+        if ReusableIndex.index is not None:
+            self.index = ReusableIndex.index
+        else:
+            print("opening index")
+            ReusableIndex.pool = ThreadPool(threads, initializer=lambda: JVM.attachCurrentThread() )
+            ReusableIndex.pool_jobs = []
+            Index.open(self, analyzer)
+            ReusableIndex.index = self.index
+            atexit.register(ReusableIndex.close_reusable)
+
+    def index_book(self, *args, **kw):
+        job = ReusableIndex.pool.apply_async(log_exception_wrapper(Index.index_book), (self,) + args, kw)
+        ReusableIndex.pool_jobs.append(job)
+
+    @staticmethod
+    def close_reusable():
+        if ReusableIndex.index is not None:
+            print("wait for indexing to finish")
+            for job in ReusableIndex.pool_jobs:
+                job.get()
+                sys.stdout.write('.')
+                sys.stdout.flush()
+            print("done.")
+            ReusableIndex.pool.close()
+
+            ReusableIndex.index.optimize()
+            ReusableIndex.index.close()
+            ReusableIndex.index = None
+
+    def close(self):
+        pass
+
+
 class Search(IndexStore):
     def __init__(self, default_field="content"):
         IndexStore.__init__(self)
 class Search(IndexStore):
     def __init__(self, default_field="content"):
         IndexStore.__init__(self)
-        self.analyzer = PolishAnalyzer(Version.LUCENE_34)
+        self.analyzer = WLAnalyzer() #PolishAnalyzer(Version.LUCENE_34)
         ## self.analyzer = WLAnalyzer()
         self.searcher = IndexSearcher(self.store, True)
         self.parser = QueryParser(Version.LUCENE_34, default_field,
         ## self.analyzer = WLAnalyzer()
         self.searcher = IndexSearcher(self.store, True)
         self.parser = QueryParser(Version.LUCENE_34, default_field,
@@ -296,9 +369,7 @@ class Search(IndexStore):
 
     def bsearch(self, query, max_results=50):
         q = self.query(query)
 
     def bsearch(self, query, max_results=50):
         q = self.query(query)
-        f = TermsFilter()
-        f.addTerm(Term("is_book", "true"))
-        bjq = BlockJoinQuery(q, f, BlockJoinQuery.ScoreMode.Avg)
+        bjq = BlockJoinQuery(q, self.parent_filter, BlockJoinQuery.ScoreMode.Avg)
 
         tops = self.searcher.search(bjq, max_results)
         bks = []
 
         tops = self.searcher.search(bjq, max_results)
         bks = []
@@ -306,3 +377,291 @@ class Search(IndexStore):
             doc = self.searcher.doc(found.doc)
             bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
         return (bks, tops.totalHits)
             doc = self.searcher.doc(found.doc)
             bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id")))
         return (bks, tops.totalHits)
+
+# TokenStream tokenStream = analyzer.tokenStream(fieldName, reader);
+# OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class);
+# CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
+
+# while (tokenStream.incrementToken()) {
+#     int startOffset = offsetAttribute.startOffset();
+#     int endOffset = offsetAttribute.endOffset();
+#     String term = charTermAttribute.toString();
+# }
+
+
+class SearchResult(object):
+    def __init__(self, searcher, scoreDocs, score=None, highlight_query=None):
+        if score:
+            self.score = score
+        else:
+            self.score = scoreDocs.score
+
+        self.fragments = []
+        self.scores = {}
+        self.sections = []
+
+        stored = searcher.doc(scoreDocs.doc)
+        self.book_id = int(stored.get("book_id"))
+
+        fragment = stored.get("fragment_anchor")
+        if fragment:
+            self.fragments.append(fragment)
+            self.scores[fragment] = scoreDocs.score
+
+        header_type = stored.get("header_type")
+        if header_type:
+            sec = (header_type, int(stored.get("header_index")))
+            self.sections.append(sec)
+            self.scores[sec] = scoreDocs.score
+
+        self.snippets = []
+
+    def add_snippets(self, snippets):
+        self.snippets += snippets
+        return self
+
+    def get_book(self):
+        return catalogue.models.Book.objects.get(id=self.book_id)
+
+    book = property(get_book)
+
+    def get_parts(self):
+        book = self.book
+        parts = [{"header": s[0], "position": s[1], '_score_key': s} for s in self.sections] \
+            + [{"fragment": book.fragments.get(anchor=f), '_score_key':f} for f in self.fragments]
+
+        parts.sort(lambda a, b: cmp(self.scores[a['_score_key']], self.scores[b['_score_key']]))
+        print("bookid: %d parts: %s" % (self.book_id, parts))
+        return parts
+
+    parts = property(get_parts)
+
+    def merge(self, other):
+        if self.book_id != other.book_id:
+            raise ValueError("this search result is or book %d; tried to merge with %d" % (self.book_id, other.book_id))
+        self.fragments += other.fragments
+        self.sections += other.sections
+        self.snippets += other.snippets
+        self.scores.update(other.scores)
+        if other.score > self.score:
+            self.score = other.score
+        return self
+
+    def __unicode__(self):
+        return u'SearchResult(book_id=%d, score=%d)' % (self.book_id, self.score)
+
+    @staticmethod
+    def aggregate(*result_lists):
+        books = {}
+        for rl in result_lists:
+            for r in rl:
+                if r.book_id in books:
+                    books[r.book_id].merge(r)
+                    #print(u"already have one with score %f, and this one has score %f" % (books[book.id][0], found.score))
+                else:
+                    books[r.book_id] = r
+        return books.values()
+
+    def __cmp__(self, other):
+        return cmp(self.score, other.score)
+
+
+class MultiSearch(Search):
+    """Class capable of IMDb-like searching"""
+    def get_tokens(self, searched, field='content'):
+        """returns tokens analyzed by a proper (for a field) analyzer
+        argument can be: StringReader, string/unicode, or tokens. In the last case
+        they will just be returned (so we can reuse tokens, if we don't change the analyzer)
+        """
+        if isinstance(searched, str) or isinstance(searched, unicode):
+            searched = StringReader(searched)
+        elif isinstance(searched, list):
+            return searched
+
+        searched.reset()
+        tokens = self.analyzer.reusableTokenStream(field, searched)
+        toks = []
+        while tokens.incrementToken():
+            cta = tokens.getAttribute(CharTermAttribute.class_)
+            toks.append(cta.toString())
+        return toks
+
+    def fuzziness(self, fuzzy):
+        if not fuzzy:
+            return None
+        if isinstance(fuzzy, float) and fuzzy > 0.0 and fuzzy <= 1.0:
+            return fuzzy
+        else:
+            return 0.5
+
+    def make_phrase(self, tokens, field='content', slop=2, fuzzy=False):
+        if fuzzy:
+            phrase = MultiPhraseQuery()
+            for t in tokens:
+                term = Term(field, t)
+                fuzzterm = FuzzyTermEnum(self.searcher.getIndexReader(), term, self.fuzziness(fuzzy))
+                fuzzterms = []
+
+                while True:
+                    #                    print("fuzz %s" % unicode(fuzzterm.term()).encode('utf-8'))
+                    ft = fuzzterm.term()
+                    if ft:
+                        fuzzterms.append(ft)
+                    if not fuzzterm.next(): break
+                if fuzzterms:
+                    phrase.add(JArray('object')(fuzzterms, Term))
+                else:
+                    phrase.add(term)
+        else:
+            phrase = PhraseQuery()
+            phrase.setSlop(slop)
+            for t in tokens:
+                term = Term(field, t)
+                phrase.add(term)
+        return phrase
+
+    def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD, fuzzy=False):
+        q = BooleanQuery()
+        for t in tokens:
+            term = Term(field, t)
+            if fuzzy:
+                term = FuzzyQuery(term, self.fuzziness(fuzzy))
+            else:
+                term = TermQuery(term)
+            q.add(BooleanClause(term, modal))
+        return q
+
+    def content_query(self, query):
+        return BlockJoinQuery(query, self.parent_filter,
+                              BlockJoinQuery.ScoreMode.Total)
+
+    def search_perfect_book(self, searched, max_results=20, fuzzy=False):
+        qrys = [self.make_phrase(self.get_tokens(searched, field=fld), field=fld, fuzzy=fuzzy) for fld in ['author', 'title']]
+
+        books = []
+        for q in qrys:
+            top = self.searcher.search(q, max_results)
+            for found in top.scoreDocs:
+                books.append(SearchResult(self.searcher, found))
+        return books
+
+    def search_perfect_parts(self, searched, max_results=20, fuzzy=False):
+        qrys = [self.make_phrase(self.get_tokens(searched), field=fld, fuzzy=fuzzy) for fld in ['content']]
+
+        books = []
+        for q in qrys:
+            top = self.searcher.search(q, max_results)
+            for found in top.scoreDocs:
+                books.append(SearchResult(self.searcher, found).add_snippets(self.get_snippets(found, q)))
+
+        return books
+
+    def search_everywhere(self, searched, max_results=20, fuzzy=False):
+        books = []
+
+        # content only query : themes x content
+        q = BooleanQuery()
+
+        tokens = self.get_tokens(searched)
+        q.add(BooleanClause(self.make_term_query(tokens, field='themes', fuzzy=fuzzy), BooleanClause.Occur.MUST))
+        q.add(BooleanClause(self.make_term_query(tokens, field='content', fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
+
+        topDocs = self.searcher.search(q, max_results)
+        for found in topDocs.scoreDocs:
+            books.append(SearchResult(self.searcher, found))
+
+        # joined query themes/content x author/title/epochs/genres/kinds
+        q = BooleanQuery()
+        in_meta = BooleanQuery()
+        in_content = BooleanQuery()
+
+        for fld in ['themes', 'content']:
+            in_content.add(BooleanClause(self.make_term_query(tokens, field=fld, fuzzy=False), BooleanClause.Occur.SHOULD))
+
+        in_meta.add(BooleanClause(self.make_term_query(self.get_tokens(searched, field='author'), field='author', fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
+
+        for fld in ['title', 'epochs', 'genres', 'kinds']:
+            in_meta.add(BooleanClause(self.make_term_query(tokens, field=fld, fuzzy=fuzzy), BooleanClause.Occur.SHOULD))
+
+        q.add(BooleanClause(in_meta, BooleanClause.Occur.MUST))
+        in_content_join = self.content_query(in_content)
+        q.add(BooleanClause(in_content_join, BooleanClause.Occur.MUST))
+        #        import pdb; pdb.set_trace()
+        collector = BlockJoinCollector(Sort.RELEVANCE, 100, True, True)
+
+        self.searcher.search(q, collector)
+
+        top_groups = collector.getTopGroups(in_content_join, Sort.RELEVANCE, 0, max_results, 0, True)
+        if top_groups:
+            for grp in top_groups.groups:
+                for part in grp.scoreDocs:
+                    books.append(SearchResult(self.searcher, part, score=grp.maxScore))
+        return books
+
+    def multisearch(self, query, max_results=50):
+        """
+        Search strategy:
+        - (phrase) OR -> content
+                      -> title
+                      -> author
+        - (keywords)  -> author
+                      -> motyw
+                      -> tags
+                      -> content
+        """
+        # queryreader = StringReader(query)
+        # tokens = self.get_tokens(queryreader)
+
+        # top_level = BooleanQuery()
+        # Should = BooleanClause.Occur.SHOULD
+
+        # phrase_level = BooleanQuery()
+        # phrase_level.setBoost(1.3)
+
+        # p_content = self.make_phrase(tokens, joined=True)
+        # p_title = self.make_phrase(tokens, 'title')
+        # p_author = self.make_phrase(tokens, 'author')
+
+        # phrase_level.add(BooleanClause(p_content, Should))
+        # phrase_level.add(BooleanClause(p_title, Should))
+        # phrase_level.add(BooleanClause(p_author, Should))
+
+        # kw_level = BooleanQuery()
+
+        # kw_level.add(self.make_term_query(tokens, 'author'), Should)
+        # j_themes = self.make_term_query(tokens, 'themes', joined=True)
+        # kw_level.add(j_themes, Should)
+        # kw_level.add(self.make_term_query(tokens, 'tags'), Should)
+        # j_con = self.make_term_query(tokens, joined=True)
+        # kw_level.add(j_con, Should)
+
+        # top_level.add(BooleanClause(phrase_level, Should))
+        # top_level.add(BooleanClause(kw_level, Should))
+
+        return None
+
+
+    def do_search(self, query, max_results=50, collector=None):
+        tops = self.searcher.search(query, max_results)
+        #tops = self.searcher.search(p_content, max_results)
+
+        bks = []
+        for found in tops.scoreDocs:
+            doc = self.searcher.doc(found.doc)
+            b = catalogue.models.Book.objects.get(id=doc.get("book_id"))
+            bks.append(b)
+            print "%s (%d) -> %f" % (b, b.id, found.score)
+        return (bks, tops.totalHits)
+
+    def get_snippets(self, scoreDoc, query, field='content'):
+        htmlFormatter = SimpleHTMLFormatter()
+        highlighter = Highlighter(htmlFormatter, QueryScorer(query))
+
+        stored = self.searcher.doc(scoreDoc.doc)
+        text = stored.get(field)
+        tokenStream = TokenSources.getAnyTokenStream(self.searcher.getIndexReader(), scoreDoc.doc, field, self.analyzer)
+        #  highlighter.getBestTextFragments(tokenStream, text, False, 10)
+        snip = highlighter.getBestFragments(tokenStream, text, 3, "...")
+        print('snips: %s' % snip)
+
+        return [snip]
diff --git a/apps/search/urls.py b/apps/search/urls.py
new file mode 100644 (file)
index 0000000..f93d65e
--- /dev/null
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
+# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
+#
+from django.conf.urls.defaults import *
+
+urlpatterns = patterns('search.views',
+    url(r'^$', 'main', name='newsearch'),
+)
+
diff --git a/apps/search/views.py b/apps/search/views.py
new file mode 100644 (file)
index 0000000..d9b2f26
--- /dev/null
@@ -0,0 +1,66 @@
+
+from django.shortcuts import render_to_response, get_object_or_404
+from django.template import RequestContext
+from django.contrib.auth.decorators import login_required
+from django.views.decorators import cache
+
+from catalogue.utils import get_random_hash
+from catalogue.models import Book, Tag
+from catalogue import forms
+from search import MultiSearch, JVM, SearchResult
+from lucene import StringReader
+
+import enchant
+
+dictionary = enchant.Dict('pl_PL')
+
+
+def did_you_mean(query, tokens):
+    change = {}
+    
+    for t in tokens:
+        print("%s ok? %s, sug: %s" %(t, dictionary.check(t), dictionary.suggest(t)))
+        if not dictionary.check(t):
+            try:
+                change[t] = dictionary.suggest(t)[0]
+            except IndexError:
+                pass
+
+    if change == {}:
+        return None
+
+    for frm, to in change.items():
+        query = query.replace(frm, to)
+        
+    return query
+
+
+def main(request):
+    results = {}
+    JVM.attachCurrentThread()  # where to put this?
+    srch = MultiSearch()
+
+    results = None
+    query = None
+    fuzzy = False
+    if 'q' in request.GET:
+        query = request.GET['q']
+        toks = StringReader(query)
+        fuzzy = 'fuzzy' in request.GET
+        if fuzzy:
+            fuzzy = 0.7
+
+
+        results = SearchResult.aggregate(srch.search_perfect_book(toks, fuzzy=fuzzy),
+                                         srch.search_perfect_parts(toks, fuzzy=fuzzy),
+                                         srch.search_everywhere(toks, fuzzy=fuzzy))
+        results.sort(reverse=True)
+
+        for r in results:
+            print r.parts
+
+    return render_to_response('newsearch/search.html', {'results': results,
+                                                        'did_you_mean': (query is not None) and 
+                                                        did_you_mean(query, srch.get_tokens(query, field='SIMPLE')),
+                                                        'fuzzy': fuzzy},
+                              context_instance=RequestContext(request))
index 4fd263e..dfb01f1 100644 (file)
@@ -28,3 +28,6 @@ lxml>=2.2.2
 # celery tasks
 django-celery
 django-kombu
 # celery tasks
 django-celery
 django-kombu
+
+# spell checking
+pyenchant
index 757447e..ca06784 100644 (file)
@@ -229,11 +229,12 @@ API_WAIT = 10
 # limit number of filtering tags
 MAX_TAG_LIST = 6
 
 # limit number of filtering tags
 MAX_TAG_LIST = 6
 
-NO_BUILD_EPUB = False
+NO_BUILD_EPUB = True
 NO_BUILD_TXT = False
 NO_BUILD_TXT = False
-NO_BUILD_PDF = False
-NO_BUILD_MOBI = False
+NO_BUILD_PDF = True
+NO_BUILD_MOBI = True
 NO_SEARCH_INDEX = False
 NO_SEARCH_INDEX = False
+SEARCH_INDEX_PARALLEL = False
 
 ALL_EPUB_ZIP = 'wolnelektury_pl_epub'
 ALL_PDF_ZIP = 'wolnelektury_pl_pdf'
 
 ALL_EPUB_ZIP = 'wolnelektury_pl_epub'
 ALL_PDF_ZIP = 'wolnelektury_pl_pdf'
diff --git a/wolnelektury/templates/newsearch/search.html b/wolnelektury/templates/newsearch/search.html
new file mode 100644 (file)
index 0000000..af36b59
--- /dev/null
@@ -0,0 +1,56 @@
+{% extends "base.html" %}
+{% load i18n %}
+{% load catalogue_tags %}
+
+{% block title %}Search{% endblock %}
+
+{% block metadescription %}{% endblock %}
+
+{% block bodyid %}newsearch{% endblock %}
+
+{% block body %}
+    <h1>Search</h1>
+    <form action="{% url newsearch %}" method="get" accept-charset="utf-8" id="search-form-x">
+        <p>
+         <input type="text" name="q" value="{{request.GET.q}}" style="width:250px; font-size: 1.2em;">
+         <input type="submit" value="{% trans "Search" %}" /> 
+         <br />
+         <input type="checkbox" value="true" name="fuzzy" {% if fuzzy %}checked{% endif %}/> fuzzy.
+       </p>
+    </form>
+    {% if did_you_mean %}
+    Czy miałeś na mysli <a href="?q={{did_you_mean|urlencode}}">{{did_you_mean}}</a>?
+    {% endif %}
+
+
+    <div id="results">
+      <ol>
+      {% for result in results %}
+      <li>
+       <p><a href="{{result.book.get_absolute_url}}">{{result.book.pretty_title}}</a> (id: {{result.book.id}}, score: {{result.score}})</p>
+       <ul>
+         {% for snippet in result.snippets %}
+         <li>{{snippet|safe}}</li>
+         {% endfor %}
+
+         {% for part in result.parts %}
+         {% if part.header %}
+         <li>W {{part.header}} nr {{part.position}}</li>
+         {% else %} 
+         {% if part.fragment %}
+         <li>
+           <div style="">Tagi/Motywy: {% for tag in part.fragment.tags %}{{tag.name}} {% endfor %}</div>
+           {{part.fragment.short_html|safe}}
+         </li>
+         {% endif %}
+         {% endif %}
+         {% endfor %}
+       </ul>
+      </li>
+      {% empty %}
+      <p>No results.</p>
+      {% endfor %}
+      </ol>
+    </div>
+
+{% endblock %}
index f379c9c..2a199c7 100644 (file)
@@ -40,6 +40,8 @@ urlpatterns = patterns('',
     # API
     (r'^api/', include('api.urls')),
 
     # API
     (r'^api/', include('api.urls')),
 
+    url(r'^newsearch/', include('search.urls')),
+
     # Static files
     url(r'^%s(?P<path>.*)$' % settings.MEDIA_URL[1:], 'django.views.static.serve',
         {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
     # Static files
     url(r'^%s(?P<path>.*)$' % settings.MEDIA_URL[1:], 'django.views.static.serve',
         {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),