Show translator in results
authorMarcin Koziej <marcin.koziej@nowoczesnapolska.org.pl>
Fri, 2 Nov 2012 12:23:30 +0000 (13:23 +0100)
committerMarcin Koziej <marcin.koziej@nowoczesnapolska.org.pl>
Fri, 2 Nov 2012 12:23:30 +0000 (13:23 +0100)
Filter tags duplicated by PD

apps/catalogue/templates/catalogue/search_multiple_hits.html
apps/search/index.py
apps/search/views.py

index 70988f3..3724302 100644 (file)
     </div>
     {% endif %}
 
+    {% if results.translator %}
+    <div class="book-list-header">
+      <div class="book-box-inner">
+       <p>{% trans "Results by translators" %}</p>
+      </div>
+    </div>
+    <div>
+      <ol class="work-list">
+       {% for translator in results.translator %}<li class="Book-item">{% book_short translator.book %}</li>{% endfor %}
+      </ol>
+    </div>
+    {% endif %}
+
     {% if results.title %}
     <div class="book-list-header">
       <div class="book-box-inner">
index 484340a..66a7b34 100644 (file)
@@ -832,7 +832,8 @@ class Search(SolrIndex):
             log.error("Cannot open snippet file for book id = %d [rev=%d], %s" % (book_id, revision, e))
             return []
         finally:
-            snippets.close()
+            if snippets:
+                snippets.close()
 
             # remove verse end markers..
         snips = map(lambda s: s and s.replace("/\n", "\n"), snips)
@@ -867,6 +868,8 @@ class Search(SolrIndex):
         res = self.apply_filters(query, filters).execute()
 
         tags = []
+        pd_tags = []
+
         for doc in res:
             is_pdcounter = doc.get('is_pdcounter', False)
             category = doc.get('tag_category')
@@ -879,16 +882,18 @@ class Search(SolrIndex):
                         tag.category = 'pd_book'  # make it look more lik a tag.
                     else:
                         print "Warning. cannot get pdcounter tag_id=%d from db; cat=%s" % (int(doc.get('tag_id')), category)
+                    pd_tags.append(tag)
                 else:
                     tag = catalogue.models.Tag.objects.get(id=doc.get("tag_id"))
-                    # don't add the pdcounter tag if same tag already exists
-
-                tags.append(tag)
+                    tags.append(tag)
 
             except catalogue.models.Tag.DoesNotExist: pass
             except PDCounterAuthor.DoesNotExist: pass
             except PDCounterBook.DoesNotExist: pass
 
+        tags_slugs = set(map(lambda t: t.slug, tags))
+        tags = tags + filter(lambda t: not t.slug in tags_slugs, pd_tags)
+
         log.debug('search_tags: %s' % tags)
 
         return tags
index 36dd52c..2109a73 100644 (file)
@@ -124,15 +124,17 @@ def main(request):
     tags = split_tags(tags)
 
     author_results = search.search_phrase(query, 'authors', book=True)
+    translator_results = search.search_phrase(query, 'translators', book=True)
+
     title_results = search.search_phrase(query, 'title', book=True)
 
     # Boost main author/title results with mixed search, and save some of its results for end of list.
     # boost author, title results
-    author_title_mixed = search.search_some(query, ['authors', 'title', 'tags'], query_terms=theme_terms)
+    author_title_mixed = search.search_some(query, ['authors', 'translators', 'title', 'tags'], query_terms=theme_terms)
     author_title_rest = []
 
     for b in author_title_mixed:
-        also_in_mixed = filter(lambda ba: ba.book_id == b.book_id, author_results + title_results)
+        also_in_mixed = filter(lambda ba: ba.book_id == b.book_id, author_results + translator_results + title_results)
         for b2 in also_in_mixed:
             b2.boost *= 1.1
         if also_in_mixed is []:
@@ -155,15 +157,17 @@ def main(request):
                     return True
             return False
         return f
-    f = already_found(author_results + title_results + text_phrase)
+    f = already_found(author_results + translator_results + title_results + text_phrase)
     everywhere = filter(lambda x: not f(x), everywhere)
 
     author_results = SearchResult.aggregate(author_results)
+    translator_results = SearchResult.aggregate(translator_results)
     title_results = SearchResult.aggregate(title_results)
 
     everywhere = SearchResult.aggregate(everywhere, author_title_rest)
 
     for field, res in [('authors', author_results),
+                       ('translators', translator_results),
                        ('title', title_results),
                        ('text', text_phrase),
                        ('text', everywhere)]:
@@ -180,11 +184,12 @@ def main(request):
             return False
 
     author_results = filter(ensure_exists, author_results)
+    translator_results = filter(ensure_exists, translator_results)
     title_results = filter(ensure_exists, title_results)
     text_phrase = filter(ensure_exists, text_phrase)
     everywhere = filter(ensure_exists, everywhere)
 
-    results = author_results + title_results + text_phrase + everywhere
+    results = author_results + translator_results + title_results + text_phrase + everywhere
     # ensure books do exists & sort them
     results.sort(reverse=True)
 
@@ -209,6 +214,7 @@ def main(request):
                               {'tags': tags,
                                'prefix': query,
                                'results': {'author': author_results,
+                                           'translator': translator_results,
                                            'title': title_results,
                                            'content': text_phrase,
                                            'other': everywhere},