From 180fe27d469608a6f3cdd5249311f93a5bc437b5 Mon Sep 17 00:00:00 2001 From: Marcin Koziej Date: Thu, 24 Nov 2011 17:07:38 +0100 Subject: [PATCH] searching works, without fragments basic query using Dublin core fields works Index has proper tokens. search index building on import --- .../management/commands/importbooks.py | 5 +- apps/catalogue/models.py | 12 +- apps/search/__init__.py | 3 + apps/search/index.py | 308 ++++++++++++++++++ apps/search/tests/__init__.py | 1 + apps/search/tests/files/fraszka-do-anusie.xml | 49 +++ apps/search/tests/files/fraszki.xml | 27 ++ apps/search/tests/index.py | 32 ++ wolnelektury/settings.py | 7 + 9 files changed, 442 insertions(+), 2 deletions(-) create mode 100644 apps/search/__init__.py create mode 100644 apps/search/index.py create mode 100644 apps/search/tests/__init__.py create mode 100755 apps/search/tests/files/fraszka-do-anusie.xml create mode 100755 apps/search/tests/files/fraszki.xml create mode 100644 apps/search/tests/index.py diff --git a/apps/catalogue/management/commands/importbooks.py b/apps/catalogue/management/commands/importbooks.py index ecd3fcc97..4ea0fd359 100644 --- a/apps/catalogue/management/commands/importbooks.py +++ b/apps/catalogue/management/commands/importbooks.py @@ -28,6 +28,8 @@ class Command(BaseCommand): help='Don\'t build TXT file'), make_option('-P', '--no-build-pdf', action='store_false', dest='build_pdf', default=True, help='Don\'t build PDF file'), + make_option('-S', '--no-search-index', action='store_false', dest='search_index', default=True, + help='Don\'t build PDF file'), make_option('-w', '--wait-until', dest='wait_until', metavar='TIME', help='Wait until specified time (Y-M-D h:m:s)'), ) @@ -87,7 +89,8 @@ class Command(BaseCommand): build_epub=options.get('build_epub'), build_txt=options.get('build_txt'), build_pdf=options.get('build_pdf'), - build_mobi=options.get('build_mobi')) + build_mobi=options.get('build_mobi'), + search_index=options.get('search_index')) files_imported += 1 if os.path.isfile(file_base + '.pdf'): diff --git a/apps/catalogue/models.py b/apps/catalogue/models.py index d90299611..5846f5343 100644 --- a/apps/catalogue/models.py +++ b/apps/catalogue/models.py @@ -31,6 +31,8 @@ from slughifi import slughifi from sortify import sortify from os import unlink +import search + TAG_CATEGORIES = ( ('author', _('author')), ('epoch', _('epoch')), @@ -623,6 +625,10 @@ class Book(models.Model): result = create_zip.delay(paths, self.slug) return result.wait() + def search_index(self): + with search.Index() as idx: + idx.index_book(self) + @classmethod def from_xml_file(cls, xml_file, **kwargs): # use librarian to parse meta-data @@ -638,7 +644,8 @@ class Book(models.Model): @classmethod def from_text_and_meta(cls, raw_file, book_info, overwrite=False, - build_epub=True, build_txt=True, build_pdf=True, build_mobi=True): + build_epub=True, build_txt=True, build_pdf=True, build_mobi=True, + search_index=True): import re # check for parts before we do anything @@ -717,6 +724,9 @@ class Book(models.Model): if not settings.NO_BUILD_MOBI and build_mobi: book.build_mobi() + if not settings.NO_SEARCH_INDEX and search_index: + book.search_index() + book_descendants = list(book.children.all()) # add l-tag to descendants and their fragments # delete unnecessary EPUB files diff --git a/apps/search/__init__.py b/apps/search/__init__.py new file mode 100644 index 000000000..f45c150f1 --- /dev/null +++ b/apps/search/__init__.py @@ -0,0 +1,3 @@ +from index import Index, Search +import lucene +lucene.initVM(lucene.CLASSPATH) diff --git a/apps/search/index.py b/apps/search/index.py new file mode 100644 index 000000000..050fc5b49 --- /dev/null +++ b/apps/search/index.py @@ -0,0 +1,308 @@ +# -*- coding: utf-8 -*- +from django.conf import settings +from lucene import SimpleFSDirectory, IndexWriter, File, Field, \ + NumericField, Version, Document, JavaError, IndexSearcher, \ + QueryParser, Term, PerFieldAnalyzerWrapper, \ + SimpleAnalyzer, PolishAnalyzer, ArrayList, \ + KeywordAnalyzer, NumericRangeQuery, BooleanQuery, \ + BlockJoinQuery, BlockJoinCollector, TermsFilter, \ + HashSet, BooleanClause, Term + # KeywordAnalyzer +import os +import errno +from librarian import dcparser +from librarian.parser import WLDocument +import catalogue.models + + +class WLAnalyzer(PerFieldAnalyzerWrapper): + def __init__(self): + polish = PolishAnalyzer(Version.LUCENE_34) + simple = SimpleAnalyzer(Version.LUCENE_34) + keyword = KeywordAnalyzer(Version.LUCENE_34) + # not sure if needed: there's NOT_ANALYZED meaning basically the same + + PerFieldAnalyzerWrapper.__init__(self, polish) + + self.addAnalyzer("tags", simple) + self.addAnalyzer("technical_editors", simple) + self.addAnalyzer("editors", simple) + self.addAnalyzer("url", keyword) + self.addAnalyzer("source_url", keyword) + self.addAnalyzer("source_name", simple) + self.addAnalyzer("publisher", simple) + self.addAnalyzer("author", simple) + self.addAnalyzer("is_book", keyword) + + #self.addanalyzer("fragment_anchor", keyword) + + +class IndexStore(object): + def __init__(self): + self.make_index_dir() + self.store = SimpleFSDirectory(File(settings.SEARCH_INDEX)) + + def make_index_dir(self): + try: + os.makedirs(settings.SEARCH_INDEX) + except OSError as exc: + if exc.errno == errno.EEXIST: + pass + else: raise + + +class Index(IndexStore): + def __init__(self, analyzer=None): + IndexStore.__init__(self) + self.index = None + if not analyzer: + analyzer = WLAnalyzer() + self.analyzer = analyzer + + def open(self, analyzer=None): + if self.index: + raise Exception("Index is already opened") + self.index = IndexWriter(self.store, self.analyzer,\ + IndexWriter.MaxFieldLength.LIMITED) + return self.index + + def close(self): + self.index.optimize() + self.index.close() + self.index = None + + def remove_book(self, book): + q = NumericRangeQuery.newIntRange("book_id", book.id, book.id, True,True) + self.index.deleteDocuments(q) + + def index_book(self, book, overwrite=True): + if overwrite: + self.remove_book(book) + + doc = self.extract_metadata(book) + parts = self.extract_content(book) + block = ArrayList().of_(Document) + + for p in parts: + block.add(p) + block.add(doc) + self.index.addDocuments(block) + + master_tags = [ + 'opowiadanie', + 'powiesc', + 'dramat_wierszowany_l', + 'dramat_wierszowany_lp', + 'dramat_wspolczesny', 'liryka_l', 'liryka_lp', + 'wywiad' + ] + + def create_book_doc(self, book): + """ + Create a lucene document connected to the book + """ + doc = Document() + doc.add(NumericField("book_id", Field.Store.YES, True).setIntValue(book.id)) + if book.parent is not None: + doc.add(NumericField("parent_id", Field.Store.YES, True).setIntValue(book.parent.id)) + return doc + + def extract_metadata(self, book): + book_info = dcparser.parse(book.xml_file) + + doc = self.create_book_doc(book) + doc.add(Field("slug", book.slug, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS)) + doc.add(Field("tags", ','.join([t.name for t in book.tags]), Field.Store.NO, Field.Index.ANALYZED)) + doc.add(Field("is_book", 'true', Field.Store.NO, Field.Index.NOT_ANALYZED)) + + # validator, name + for field in dcparser.BookInfo.FIELDS: + if hasattr(book_info, field.name): + if not getattr(book_info, field.name): + continue + # since no type information is available, we use validator + type_indicator = field.validator + if type_indicator == dcparser.as_unicode: + s = getattr(book_info, field.name) + if field.multiple: + s = ', '.join(s) + try: + doc.add(Field(field.name, s, Field.Store.NO, Field.Index.ANALYZED)) + except JavaError as je: + raise Exception("failed to add field: %s = '%s', %s(%s)" % (field.name, s, je.message, je.args)) + elif type_indicator == dcparser.as_person: + p = getattr(book_info, field.name) + if isinstance(p, dcparser.Person): + persons = unicode(p) + else: + persons = ', '.join(map(unicode, p)) + doc.add(Field(field.name, persons, Field.Store.NO, Field.Index.ANALYZED)) + elif type_indicator == dcparser.as_date: + dt = getattr(book_info, field.name) + doc.add(Field(field.name, "%04d%02d%02d" % (dt.year, dt.month, dt.day), Field.Store.NO, Field.Index.NOT_ANALYZED)) + return doc + + def get_master(self, root): + for master in root.iter(): + if master.tag in self.master_tags: + return master + + def extract_content(self, book): + wld = WLDocument.from_file(book.xml_file.path) + root = wld.edoc.getroot() + + # first we build a sequence of top-level items. + # book_id + # header_index - the 0-indexed position of header element. + # content + master = self.get_master(root) + if master is None: + return [] + + header_docs = [] + for header, position in zip(list(master), range(len(master))): + doc = self.create_book_doc(book) + doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position)) + doc.add(Field("header_type", header.tag, Field.Store.YES, Field.Index.NOT_ANALYZED)) + content = u' '.join([t for t in header.itertext()]) + doc.add(Field("content", content, Field.Store.NO, Field.Index.ANALYZED)) + header_docs.append(doc) + + def walker(node): + yield node, None + for child in list(node): + for b, e in walker(child): + yield b, e + yield None, node + return + + # Then we create a document for each fragments + # fragment_anchor - the anchor + # themes - list of themes [not indexed] + fragment_docs = [] + # will contain (framgent id -> { content: [], themes: [] } + fragments = {} + for start, end in walker(master): + if start is not None and start.tag == 'begin': + fid = start.attrib['id'][1:] + fragments[fid] = {'content': [], 'themes': []} + fragments[fid]['content'].append(start.tail) + elif start is not None and start.tag == 'motyw': + fid = start.attrib['id'][1:] + fragments[fid]['themes'].append(start.text) + fragments[fid]['content'].append(start.tail) + elif start is not None and start.tag == 'end': + fid = start.attrib['id'][1:] + if fid not in fragments: + continue # a broken node, skip it + frag = fragments[fid] + del fragments[fid] + + def jstr(l): + return u' '.join(map( + lambda x: x == None and u'(none)' or unicode(x), + l)) + s = u"Fragment %s complete, themes: %s contents: %s" % \ + (fid, jstr(frag['themes']), jstr(frag['content'])) + print(s.encode('utf-8')) + + doc = self.create_book_doc(book) + doc.add(Field("fragment_anchor", fid, + Field.Store.YES, Field.Index.NOT_ANALYZED)) + doc.add(Field("content", + u' '.join(filter(lambda s: s is not None, frag['content'])), + Field.Store.NO, Field.Index.ANALYZED)) + doc.add(Field("themes", + u' '.join(filter(lambda s: s is not None, frag['themes'])), + Field.Store.NO, Field.Index.ANALYZED)) + + fragment_docs.append(doc) + elif start is not None: + for frag in fragments.values(): + frag['content'].append(start.text) + elif end is not None: + for frag in fragments.values(): + frag['content'].append(end.tail) + + return header_docs + fragment_docs + + def __enter__(self): + self.open() + return self + + def __exit__(self, type, value, tb): + self.close() + + +class Search(IndexStore): + def __init__(self, default_field="content"): + IndexStore.__init__(self) + self.analyzer = PolishAnalyzer(Version.LUCENE_34) + ## self.analyzer = WLAnalyzer() + self.searcher = IndexSearcher(self.store, True) + self.parser = QueryParser(Version.LUCENE_34, default_field, + self.analyzer) + + self.parent_filter = TermsFilter() + self.parent_filter.addTerm(Term("is_book", "true")) + + def query(self, query): + return self.parser.parse(query) + + def wrapjoins(self, query, fields=[]): + """ + This functions modifies the query in a recursive way, + so Term and Phrase Queries contained, which match + provided fields are wrapped in a BlockJoinQuery, + and so delegated to children documents. + """ + if BooleanQuery.instance_(query): + qs = BooleanQuery.cast_(query) + for clause in qs: + clause = BooleanClause.cast_(clause) + clause.setQuery(self.wrapjoins(clause.getQuery(), fields)) + return qs + else: + termset = HashSet() + query.extractTerms(termset) + for t in termset: + t = Term.cast_(t) + if t.field() not in fields: + return query + return BlockJoinQuery(query, self.parent_filter, + BlockJoinQuery.ScoreMode.Total) + + def simple_search(self, query, max_results=50): + """Returns (books, total_hits) + """ + + tops = self.searcher.search(self.query(query), max_results) + bks = [] + for found in tops.scoreDocs: + doc = self.searcher.doc(found.doc) + bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) + return (bks, tops.totalHits) + + def search(self, query, max_results=50): + query = self.query(query) + query = self.wrapjoins(query, ["content", "themes"]) + + tops = self.searcher.search(query, max_results) + bks = [] + for found in tops.scoreDocs: + doc = self.searcher.doc(found.doc) + bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) + return (bks, tops.totalHits) + + def bsearch(self, query, max_results=50): + q = self.query(query) + f = TermsFilter() + f.addTerm(Term("is_book", "true")) + bjq = BlockJoinQuery(q, f, BlockJoinQuery.ScoreMode.Avg) + + tops = self.searcher.search(bjq, max_results) + bks = [] + for found in tops.scoreDocs: + doc = self.searcher.doc(found.doc) + bks.append(catalogue.models.Book.objects.get(id=doc.get("book_id"))) + return (bks, tops.totalHits) diff --git a/apps/search/tests/__init__.py b/apps/search/tests/__init__.py new file mode 100644 index 000000000..403c290f0 --- /dev/null +++ b/apps/search/tests/__init__.py @@ -0,0 +1 @@ +from search.tests.index import * diff --git a/apps/search/tests/files/fraszka-do-anusie.xml b/apps/search/tests/files/fraszka-do-anusie.xml new file mode 100755 index 000000000..3bbda155e --- /dev/null +++ b/apps/search/tests/files/fraszka-do-anusie.xml @@ -0,0 +1,49 @@ + + + + +Sęp Szarzyński, Mikołaj +Fraszka do Anusie +Sekuła, Aleksandra +Sutkowska, Olga +Fundacja Nowoczesna Polska +Barok +Liryka +Fraszka +Publikacja zrealizowana w ramach projektu Wolne Lektury (http://wolnelektury.pl). Reprodukcja cyfrowa wykonana przez Bibliotekę Narodową z egzemplarza pochodzącego ze zbiorów BN. +http://wolnelektury.pl/katalog/lektura/fraszka-do-anusie +http://www.polona.pl/Content/8759 +Szarzyński Sęp, Mikołaj (ca 1550-1581), Rytmy abo Wiersze polskie w wyborze, E. Wende, Warszawa, 1914 +Domena publiczna - Mikołaj Sęp Szarzyński zm. 1581 +1581 +xml +text +text +2008-12-29 +L +L +pol + + + + +Mikołaj Sęp Szarzyński + +Fraszka do Anusie + + + +Kochanek, Łzy, Miłość, Oko, Serce, WzrokJeśli oczu hamować swoich nie umiały/ +Leśnych krynic boginie, aby nie płakały,/ +Gdy baczyłybaczyły --- tu: zobaczyły, patrzyły na. przy studni Narcyza pięknego,/ +A on umarł prze miłość oblicza swojego;/ +Jeśli nieśmiertelnym stanom żałość rozkazuje,/ +Gdy niebaczna fortuna co niesłusznie psuje: + +Jakoż ja mam hamować, by na lice moje/ +Z oczu smutnych żałośne nie płynęły zdroje?/ +Jako serce powściągać, aby nie wzdychało/ +I od ciężkiej żałości omdlewać nie miało? + + + diff --git a/apps/search/tests/files/fraszki.xml b/apps/search/tests/files/fraszki.xml new file mode 100755 index 000000000..edb29abbc --- /dev/null +++ b/apps/search/tests/files/fraszki.xml @@ -0,0 +1,27 @@ + + + + +Kochanowski, Jan +Fraszki +http://wolnelektury.pl/katalog/lektura/fraszka-do-anusie + +Fundacja Nowoczesna Polska +Renesans +Liryka +Fraszka + + +http://wolnelektury.pl/lektura/fraszki + +Domena publiczna - Jan Kochanowski zm. 1584 +1584 +xml +text + +text +2008-11-12 +pol + + + diff --git a/apps/search/tests/index.py b/apps/search/tests/index.py new file mode 100644 index 000000000..c2b9110cf --- /dev/null +++ b/apps/search/tests/index.py @@ -0,0 +1,32 @@ +from __future__ import with_statement + +from search import Index, Search +from catalogue import models +from catalogue.test_utils import WLTestCase +from lucene import PolishAnalyzer, Version +#from nose.tools import raises +from os import path + + +class BookSearchTests(WLTestCase): + def setUp(self): + WLTestCase.setUp(self) + + txt = path.join(path.dirname(__file__), 'files/fraszka-do-anusie.xml') + self.book = models.Book.from_xml_file(txt) + + search = Index() #PolishAnalyzer(Version.LUCENE_34)) + with search: + search.index_book(self.book) + print "index: %s" % search + + def test_search(self): + search = Search() + bks,_= search.search("wolne") + self.assertEqual(len(bks), 1) + self.assertEqual(bks[0].id, 1) + + bks,_= search.search("technical_editors: sutkowska") + self.assertEqual(len(bks), 1) + self.assertEqual(bks[0].id, 1) + diff --git a/wolnelektury/settings.py b/wolnelektury/settings.py index 527b70277..757447e71 100644 --- a/wolnelektury/settings.py +++ b/wolnelektury/settings.py @@ -60,6 +60,7 @@ USE_I18N = True # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = path.join(PROJECT_DIR, '../media') STATIC_ROOT = path.join(PROJECT_DIR, 'static') +SEARCH_INDEX = path.join(MEDIA_ROOT, 'search') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). @@ -150,6 +151,10 @@ INSTALLED_APPS = [ 'sponsors', 'stats', 'suggest', + 'search', + + # + 'django_nose', ] #CACHE_BACKEND = 'locmem:///?max_entries=3000' @@ -228,6 +233,7 @@ NO_BUILD_EPUB = False NO_BUILD_TXT = False NO_BUILD_PDF = False NO_BUILD_MOBI = False +NO_SEARCH_INDEX = False ALL_EPUB_ZIP = 'wolnelektury_pl_epub' ALL_PDF_ZIP = 'wolnelektury_pl_pdf' @@ -246,6 +252,7 @@ BROKER_PASSWORD = "guest" BROKER_VHOST = "/" + # Load localsettings, if they exist try: from localsettings import * -- 2.20.1