From: Marcin Koziej Date: Tue, 8 Nov 2011 09:49:08 +0000 (+0100) Subject: store contents in index for highlighting fragments X-Git-Url: https://git.mdrn.pl/wolnelektury.git/commitdiff_plain/f109b7f6e35d54281f1edf4b684c49db621d36c5?ds=sidebyside;hp=--cc store contents in index for highlighting fragments --- f109b7f6e35d54281f1edf4b684c49db621d36c5 diff --git a/apps/search/index.py b/apps/search/index.py index 53812cbb0..36f42478d 100644 --- a/apps/search/index.py +++ b/apps/search/index.py @@ -8,7 +8,7 @@ from lucene import SimpleFSDirectory, IndexWriter, File, Field, \ BlockJoinQuery, BlockJoinCollector, TermsFilter, \ HashSet, BooleanClause, Term, CharTermAttribute, \ PhraseQuery, StringReader, TermQuery, BlockJoinQuery, \ - Sort + Sort, Integer # KeywordAnalyzer import sys import os @@ -104,7 +104,7 @@ class Index(IndexStore): 'wywiad' ] - skip_header_tags = ['autor_utworu', 'nazwa_utworu'] + skip_header_tags = ['autor_utworu', 'nazwa_utworu', 'dzielo_nadrzedne'] def create_book_doc(self, book): """ @@ -179,7 +179,7 @@ class Index(IndexStore): doc.add(NumericField("header_index", Field.Store.YES, True).setIntValue(position)) doc.add(Field("header_type", header.tag, Field.Store.YES, Field.Index.NOT_ANALYZED)) content = u' '.join([t for t in header.itertext()]) - doc.add(Field("content", content, Field.Store.NO, Field.Index.ANALYZED)) + doc.add(Field("content", content, Field.Store.YES, Field.Index.ANALYZED)) header_docs.append(doc) def walker(node): @@ -222,7 +222,7 @@ class Index(IndexStore): Field.Store.YES, Field.Index.NOT_ANALYZED)) doc.add(Field("content", u' '.join(filter(lambda s: s is not None, frag['content'])), - Field.Store.NO, Field.Index.ANALYZED)) + Field.Store.YES, Field.Index.ANALYZED)) doc.add(Field("themes", u' '.join(filter(lambda s: s is not None, frag['themes'])), Field.Store.NO, Field.Index.ANALYZED)) @@ -373,7 +373,7 @@ class Search(IndexStore): class MultiSearch(Search): """Class capable of IMDb-like searching""" def get_tokens(self, queryreader): - if isinstance(queryreader, str): + if isinstance(queryreader, str) or isinstance(queryreader, unicode): queryreader = StringReader(queryreader) queryreader.reset() tokens = self.analyzer.reusableTokenStream('content', queryreader) @@ -383,29 +383,78 @@ class MultiSearch(Search): toks.append(cta.toString()) return toks - def make_phrase(self, tokens, field='content', joined=False, slop=2): + def make_phrase(self, tokens, field='content', slop=2): phrase = PhraseQuery() phrase.setSlop(slop) for t in tokens: term = Term(field, t) phrase.add(term) - if joined: - phrase = self.content_query(phrase) return phrase - def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD, joined=False): + def make_term_query(self, tokens, field='content', modal=BooleanClause.Occur.SHOULD): q = BooleanQuery() for t in tokens: term = Term(field, t) q.add(BooleanClause(TermQuery(term), modal)) - if joined: - q = self.content_query(q) return q def content_query(self, query): return BlockJoinQuery(query, self.parent_filter, BlockJoinQuery.ScoreMode.Total) + def search_perfect(self, tokens, max_results=20): + qrys = [self.make_phrase(tokens, field=fld) for fld in ['author', 'title', 'content']] + + books = [] + for q in qrys: + top = self.searcher.search(q, max_results) + for found in top.scoreDocs: + book_info = self.searcher.doc(found.doc) + books.append((found.score, catalogue.models.Book.objects.get(id=book_info.get("book_id")), [])) + return books + + def search_everywhere(self, tokens, max_results=20): + q = BooleanQuery() + in_meta = BooleanQuery() + in_content = BooleanQuery() + + for fld in ['themes', 'content']: + in_content.add(BooleanClause(self.make_term_query(tokens, field=fld), BooleanClause.Occur.SHOULD)) + + for fld in ['author', 'title', 'epochs', 'genres', 'kinds']: + in_meta.add(BooleanClause(self.make_term_query(tokens, field=fld), BooleanClause.Occur.SHOULD)) + + q.add(BooleanClause(in_meta, BooleanClause.Occur.MUST)) + in_content_join = self.content_query(in_content) + q.add(BooleanClause(in_content_join, BooleanClause.Occur.MUST)) + + collector = BlockJoinCollector(Sort.RELEVANCE, 100, True, True) + + self.searcher.search(q, collector) + + books = [] + + top_groups = collector.getTopGroups(in_content_join, Sort.RELEVANCE, 0, max_results, 0, True) + if top_groups: + for grp in top_groups.groups: + doc_id = Integer.cast_(grp.groupValue).intValue() + book_data = self.searcher.doc(doc_id) + book = catalogue.models.Book.objects.get(id=book_data.get("book_id")) + parts = [] + for part in grp.scoreDocs: + part_data = self.searcher.doc(part.doc) + header_type = part_data.get("header_type") + if header_type: + parts.append((part.score, {"header": header_type, "position": int(part_data.get("header_index"))})) + fragment = part_data.get("fragment_anchor") + if fragment: + fragment = book.fragments.get(anchor=fragment) + parts.append((part.score, {"fragment": fragment})) + books.append((grp.maxScore, book, parts)) + + return books + + def multisearch(self, query, max_results=50): """ Search strategy: @@ -417,48 +466,35 @@ class MultiSearch(Search): -> tags -> content """ - queryreader = StringReader(query) - tokens = self.get_tokens(queryreader) - - top_level = BooleanQuery() - Should = BooleanClause.Occur.SHOULD + # queryreader = StringReader(query) + # tokens = self.get_tokens(queryreader) - phrase_level = BooleanQuery() - phrase_level.setBoost(1.3) + # top_level = BooleanQuery() + # Should = BooleanClause.Occur.SHOULD - p_content = self.make_phrase(tokens, joined=True) - p_title = self.makxe_phrase(tokens, 'title') - p_author = self.make_phrase(tokens, 'author') + # phrase_level = BooleanQuery() + # phrase_level.setBoost(1.3) - phrase_level.add(BooleanClause(p_content, Should)) - phrase_level.add(BooleanClause(p_title, Should)) - phrase_level.add(BooleanClause(p_author, Should)) + # p_content = self.make_phrase(tokens, joined=True) + # p_title = self.make_phrase(tokens, 'title') + # p_author = self.make_phrase(tokens, 'author') - kw_level = BooleanQuery() + # phrase_level.add(BooleanClause(p_content, Should)) + # phrase_level.add(BooleanClause(p_title, Should)) + # phrase_level.add(BooleanClause(p_author, Should)) - kw_level.add(self.make_term_query(tokens, 'author'), Should) - j_themes = self.make_term_query(tokens, 'themes', joined=True) - kw_level.add(j_themes, Should) - kw_level.add(self.make_term_query(tokens, 'tags'), Should) - j_con = self.make_term_query(tokens, joined=True) - kw_level.add(j_con, Should) + # kw_level = BooleanQuery() - top_level.add(BooleanClause(phrase_level, Should)) - top_level.add(BooleanClause(kw_level, Should)) + # kw_level.add(self.make_term_query(tokens, 'author'), Should) + # j_themes = self.make_term_query(tokens, 'themes', joined=True) + # kw_level.add(j_themes, Should) + # kw_level.add(self.make_term_query(tokens, 'tags'), Should) + # j_con = self.make_term_query(tokens, joined=True) + # kw_level.add(j_con, Should) - collector = BlockJoinCollector(Sort.RELEVANCE, 100, True, True) - - self.searcher.search(kw_level, collector) + # top_level.add(BooleanClause(phrase_level, Should)) + # top_level.add(BooleanClause(kw_level, Should)) - # frazy w treści: - # ph1 = collector.getTopGroups(j_themes, Sort.RELEVANCE, - # 0, 10, 0, True) - # reload(search.index); realod(search); s = search.MultiSearch(); s.multisearch(u'dusiołek') - # ph2 = collector.getTopGroups(j_con, Sort.RELEVANCE, - # 0, 10, 0, True) - - import pdb; pdb.set_trace(); - return None diff --git a/apps/search/urls.py b/apps/search/urls.py new file mode 100644 index 000000000..f93d65e79 --- /dev/null +++ b/apps/search/urls.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later. +# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information. +# +from django.conf.urls.defaults import * + +urlpatterns = patterns('search.views', + url(r'^$', 'main', name='newsearch'), +) + diff --git a/apps/search/views.py b/apps/search/views.py new file mode 100644 index 000000000..5ed610705 --- /dev/null +++ b/apps/search/views.py @@ -0,0 +1,27 @@ + + +from django.shortcuts import render_to_response, get_object_or_404 +from django.template import RequestContext +from django.contrib.auth.decorators import login_required +from django.views.decorators import cache + +from catalogue.utils import get_random_hash +from catalogue.models import Book, Tag +from catalogue import forms +from search import MultiSearch, JVM + + +def main(request): + results = {} + JVM.attachCurrentThread() # where to put this? + srch = MultiSearch() + + results = None + if 'q' in request.GET: + toks = srch.get_tokens(request.GET['q']) + results = srch.search_perfect(toks) + srch.search_everywhere(toks) + results.sort(lambda a, b: cmp(a[0], b[0]) < 0) + print("searched, results are: %s\n" % results) + + return render_to_response('newsearch/search.html', {"results": results}, + context_instance=RequestContext(request)) diff --git a/wolnelektury/templates/newsearch/search.html b/wolnelektury/templates/newsearch/search.html new file mode 100644 index 000000000..e018b2111 --- /dev/null +++ b/wolnelektury/templates/newsearch/search.html @@ -0,0 +1,45 @@ +{% extends "base.html" %} +{% load i18n %} +{% load catalogue_tags %} + +{% block title %}Leśmianator w WolneLektury.pl{% endblock %} + +{% block metadescription %}Stwórz własny wierszmiks z utworów znajdujących się na Wolnych Lekturach.{% endblock %} + +{% block bodyid %}lesmianator{% endblock %} + +{% block body %} +

Search

+
+

+ + +

+
+ +
+
    + {% for score,book,parts in results %} +
  1. +

    {{book.author}} - {{book.title}} (id: {{book.id}}, score: {{score}})

    +
      + {% for score,part in parts %} + {% if part.header %} +
    • W {{part.header}} nr {{part.position}}
    • + {% else %} + {% if part.fragment %} +
    • +
      {% for tag in part.fragment.tags %}{{tag.name}} {% endfor %}
      + {{part.fragment.text|safe}}
    • + {% endif %} + {% endif %} + {% endfor %} +
    +
  2. + {% empty %} +

    No results.

    + {% endfor %} +
+
+ +{% endblock %}