import warnings
from sunburnt import search
import copy
-
+from httplib2 import socket
class TermVectorOptions(search.Options):
def __init__(self, schema, original=None):
self.writeable = False
elif 'r' not in mode:
self.readable = False
- self.init_schema()
+ try:
+ self.init_schema()
+ except socket.error, e:
+ raise socket.error, "Cannot connect to Solr server, and search indexing is enabled (%s)" % str(e)
+
def _analyze(self, **kwargs):
if not self.readable:
if matches:
return self.substring(kwargs['text'], matches,
- margins=kwargs.get('margins', 30),
- mark=kwargs.get('mark', ("<b>", "</b>")))
+ margins=kwargs.get('margins', 30),
+ mark=kwargs.get('mark', ("<b>", "</b>")))
else:
return None
start = None
end = None
totlen = len(text)
- matches_margins = map(lambda (s, e): (max(0, s - margins), min(totlen, e + margins)), matches)
- (start, end) = matches_margins[0]
-
- for (s, e) in matches_margins[1:]:
+ matches_margins = map(lambda (s, e):
+ ((s, e),
+ (max(0, s - margins), min(totlen, e + margins))),
+ matches)
+ (start, end) = matches_margins[0][1]
+ matches = []
+ for (m, (s, e)) in matches_margins[1:]:
if end < s or start > e:
continue
start = min(start, s)
end = max(end, e)
-
+ matches.append(m)
+
snip = text[start:end]
- matches = list(matches)
matches.sort(lambda a, b: cmp(b[0], a[0]))
+
for (s, e) in matches:
off = - start
snip = snip[:e + off] + mark[1] + snip[e + off:]
snip = snip[:s + off] + mark[0] + snip[s + off:]
# maybe break on word boundaries
+
return snip