-# -*- coding: utf-8
-
+# This file is part of Librarian, licensed under GNU Affero GPLv3 or later.
+# Copyright © Fundacja Wolne Lektury. See NOTICE for more information.
+#
+import copy
import re
from lxml import etree
from librarian import dcparser, RDFNS
-from librarian.html import raw_printable_text
from librarian.util import get_translation
+def last_words(text, n):
+ words = []
+ for w in reversed(text.split()):
+ words.append(w)
+ if len(w) > 2:
+ n -= 1
+ if not n: break
+ if n:
+ return n, text
+ else:
+ return n, ' '.join(reversed(words))
+
class WLElement(etree.ElementBase):
SECTION_PRECEDENCE = None
+ ASIDE = False
TXT_TOP_MARGIN = 0
TXT_BOTTOM_MARGIN = 0
STRIP = False
text_substitutions = [
- (u'---', u'—'),
- (u'--', u'–'),
- #(u'...', u'…'), # Temporary turnoff for epub
- (u',,', u'„'),
- (u'"', u'”'),
+ ('---', '—'),
+ ('--', '–'),
+ #('...', '…'), # Temporary turnoff for epub
+ (',,', '„'),
+ ('"', '”'),
('\ufeff', ''),
("'", "\u2019"), # This was enabled for epub.
if parent is not None:
parent.signal(signal)
- def raw_printable_text(self):
+ def raw_printable_text(self, builder):
+ from librarian.html import raw_printable_text
+
# TODO: podtagi, wyroznienia, etc
t = ''
- t += self.normalize_text(self.text)
+ t += self.normalize_text(self.text, builder)
for c in self:
if not isinstance(c, WLElement):
continue
if c.tag not in ('pe', 'pa', 'pt', 'pr', 'motyw'):
- t += c.raw_printable_text()
- t += self.normalize_text(c.tail)
+ t += c.raw_printable_text(builder)
+ t += self.normalize_text(c.tail, builder)
return t
- def normalize_text(self, text):
+ def normalize_text(self, text, builder):
text = text or ''
for e, s in self.text_substitutions:
text = text.replace(e, s)
# FIXME: TEmporary turnoff
# text = re.sub(r'\s+', ' ', text)
### TODO: Added now for epub
- text = re.sub(r'(?<=\s\w)\s+', u'\u00A0', text)
+
+ if getattr(builder, 'hyphenator', None) is not None:
+ newt = ''
+ wlist = re.compile(r'\w+|[^\w]', re.UNICODE).findall(text)
+ for w in wlist:
+ newt += builder.hyphenator.inserted(w, '\u00AD')
+ text = newt
+
+ if builder.orphans:
+ text = re.sub(r'(?<=\s\w)\s+', '\u00A0', text)
return text
def _build_inner(self, builder, build_method):
child_count = len(self)
if self.CAN_HAVE_TEXT and self.text:
- text = self.normalize_text(self.text)
+ text = self.normalize_text(self.text, builder)
if self.STRIP:
text = text.lstrip()
if not child_count:
for i, child in enumerate(self):
if isinstance(child, WLElement):
getattr(child, build_method)(builder)
+ elif getattr(builder, 'debug') and child.tag is etree.Comment:
+ builder.process_comment(child)
if self.CAN_HAVE_TEXT and child.tail:
- text = self.normalize_text(child.tail)
+ text = self.normalize_text(child.tail, builder)
if self.STRIP and i == child_count - 1:
text = text.rstrip()
builder.push_text(text)
# always copy the id attribute (?)
if self.attrib.get('id'):
attr['id'] = self.attrib['id']
- elif '_compat_section_id' in self.attrib:
+ elif getattr(self, 'SHOULD_HAVE_ID', False) and '_compat_section_id' in self.attrib:
attr['id'] = self.attrib['_compat_section_id']
return attr
# TEMPORARY
self.CAN_HAVE_TEXT = True
self.STRIP = False
-
+
start_chunk = self.EPUB_START_CHUNK and isinstance(self.getparent(), Master)
if start_chunk:
builder.add_toc_entry(
fragment,
- self.raw_printable_text(),
+ self.raw_printable_text(builder),
self.SECTION_PRECEDENCE
)
attr = self.get_epub_attr(builder)
if fragment:
attr['id'] = fragment
+ if builder.debug:
+ chunkno, sourceline = 0, self.sourceline
+ if builder.splits:
+ chunkno, sourceline = len(builder.splits), sourceline - builder.splits[-1]
+ attr['data-debug'] = f'{chunkno}:{sourceline}'
builder.start_element(
self.EPUB_TAG,
attr
if self.SECTION_PRECEDENCE:
assert isinstance(self.getparent(), (Master, DlugiCytat, PoezjaCyt, Footnote)), \
'Header {} inside a <{}> instead of a master.'.format(
- etree.tostring(self), self.getparent().tag)
+ etree.tostring(self, encoding='unicode'), self.getparent().tag)
for c in self:
if isinstance(c, WLElement):
for e in self:
if isinstance(e, WLElement):
e.sanitize()
+
+ def snip(self, words, before=None, sub=False):
+ if sub and self.ASIDE:
+ return words, []
+
+ snippet = []
+ if before is not None:
+ i = self.index(before)
+ else:
+ i = len(self)
+
+ while i > 0:
+ i -= 1
+ if self[i].tail:
+ if words:
+ words, text = last_words(self[i].tail, words)
+ snippet = [('text', text)] + snippet
+
+ if words:
+ words, subsnip = self[i].snip(words, sub=True)
+ snippet = subsnip + snippet
+
+ if words and self.text:
+ words, text = last_words(self.text, words)
+ snippet = [('text', text)] + snippet
+
+ snippet = [('start', self.tag, self.attrib)] + snippet + [('end',)]
+
+ if not sub and words and not self.ASIDE:
+ # do we dare go up?
+ parent = self.getparent()
+ if parent is not None and parent.CAN_HAVE_TEXT:
+ words, parsnip = parent.snip(words, before=self)
+ return words, parsnip[:-1] + snippet + parsnip[-1:]
+
+ return words, snippet
+
+ def get_snippet(self, words=15):
+ from librarian.parser import parser
+
+ words, snippet = self.getparent().snip(words=words, before=self)
+
+ cursor = snipelem = parser.makeelement('snippet')
+ snipelem._meta_object = self.meta
+ for s in snippet:
+ if s[0] == 'start':
+ elem = parser.makeelement(s[1], **s[2])
+ cursor.append(elem)
+ cursor = elem
+ elif s[0] == 'end':
+ cursor = cursor.getparent()
+ else:
+ if len(cursor):
+ cursor[-1].tail = (cursor[-1].tail or '') + s[1]
+ else:
+ cursor.text = (cursor.text or '') + s[1]
+
+ return snipelem
+
+ def get_link(self):
+ sec = getattr(self, 'SHOULD_HAVE_ID', False) and self.attrib.get('_compat_section_id')
+ if sec:
+ return sec
+ parent_index = self.getparent().index(self)
+ if parent_index:
+ return self.getparent()[parent_index - 1].get_link()
+ else:
+ return self.getparent().get_link()
+
+
+class Snippet(WLElement):
+ pass