# This file is part of Librarian, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
+from __future__ import print_function, unicode_literals
+
import os
import re
-import cStringIO
import copy
from lxml import etree
from librarian import functions
from lxml.etree import XMLSyntaxError, XSLTApplyError
+import six
+
functions.reg_substitute_entities()
functions.reg_person_name()
'partial': 'xslt/wl2html_partial.xslt'
}
+
def get_stylesheet(name):
return os.path.join(os.path.dirname(__file__), STYLESHEETS[name])
+
def html_has_content(text):
return etree.ETXPath('//p|//{%(ns)s}p|//h1|//{%(ns)s}h1' % {'ns': str(XHTMLNS)})(text)
+
+def transform_abstrakt(abstrakt_element):
+ style_filename = get_stylesheet('legacy')
+ style = etree.parse(style_filename)
+ xml = etree.tostring(abstrakt_element, encoding='unicode')
+ document = etree.parse(six.StringIO(xml.replace('abstrakt', 'dlugi_cytat'))) # HACK
+ result = document.xslt(style)
+ html = re.sub('<a name="sec[0-9]*"/>', '', etree.tostring(result, encoding='unicode'))
+ return re.sub('</?blockquote[^>]*>', '', html)
+
+
def transform(wldoc, stylesheet='legacy', options=None, flags=None):
"""Transforms the WL document to XHTML.
document.edoc.getroot().set(flag, 'yes')
document.clean_ed_note()
+ document.clean_ed_note('abstrakt')
if not options:
options = {}
+ options.setdefault('gallery', "''")
result = document.transform(style, **options)
- del document # no longer needed large object :)
+ del document # no longer needed large object :)
if html_has_content(result):
add_anchors(result.getroot())
add_table_of_themes(result.getroot())
add_table_of_contents(result.getroot())
- return OutputFile.from_string(etree.tostring(result, method='html',
- xml_declaration=False, pretty_print=True, encoding='utf-8'))
+ return OutputFile.from_bytes(etree.tostring(
+ result, method='html', xml_declaration=False, pretty_print=True, encoding='utf-8'))
else:
return None
except KeyError:
raise ValueError("'%s' is not a valid stylesheet.")
- except (XMLSyntaxError, XSLTApplyError), e:
+ except (XMLSyntaxError, XSLTApplyError) as e:
raise ParseError(e)
+
+@six.python_2_unicode_compatible
class Fragment(object):
def __init__(self, id, themes):
super(Fragment, self).__init__()
try:
stack.pop()
except IndexError:
- print 'CLOSED NON-OPEN TAG:', element
+ print('CLOSED NON-OPEN TAG:', element)
stack.reverse()
return self.events + stack
result = []
for event, element in self.closed_events():
if event == 'start':
- result.append(u'<%s %s>' % (element.tag, ' '.join('%s="%s"' % (k, v) for k, v in element.attrib.items())))
+ result.append(u'<%s %s>' % (
+ element.tag, ' '.join('%s="%s"' % (k, v) for k, v in element.attrib.items())))
if element.text:
result.append(element.text)
elif event == 'end':
return ''.join(result)
- def __unicode__(self):
+ def __str__(self):
return self.to_string()
# iterparse would die on a HTML document
parser = etree.HTMLParser(encoding='utf-8')
- buf = cStringIO.StringIO()
+ buf = six.BytesIO()
buf.write(etree.tostring(etree.parse(input_filename, parser).getroot()[0][0], encoding='utf-8'))
buf.seek(0)
for event, element in etree.iterparse(buf, events=('start', 'end')):
# Process begin and end elements
if element.get('class', '') in ('theme-begin', 'theme-end'):
- if not event == 'end': continue # Process elements only once, on end event
+ if not event == 'end':
+ continue # Process elements only once, on end event
# Open new fragment
if element.get('class', '') == 'theme-begin':
try:
fragment = open_fragments[element.get('fid')]
except KeyError:
- print '%s:closed not open fragment #%s' % (input_filename, element.get('fid'))
+ print('%s:closed not open fragment #%s' % (input_filename, element.get('fid')))
else:
closed_fragments[fragment.id] = fragment
del open_fragments[fragment.id]
for fragment_id in open_fragments:
open_fragments[fragment_id].append('text', element.tail)
-
# Process all elements except begin and end
else:
# Omit annotation tags
link_text = prefix
anchor = etree.Element('a', href='#%s' % prefix)
anchor.set('class', 'anchor')
- anchor.text = unicode(link_text)
+ anchor.text = six.text_type(link_text)
parent.insert(index, anchor)
if with_target:
def add_anchors(root):
counter = 1
for element in root.iterdescendants():
- if any_ancestor(element, lambda e: e.get('class') in ('note', 'motto', 'motto_podpis', 'dedication')
- or e.get('id') == 'nota_red'
- or e.tag == 'blockquote'):
+ def f(e):
+ return e.get('class') in ('note', 'motto', 'motto_podpis', 'dedication', 'frame') or \
+ e.get('id') == 'nota_red' or e.tag == 'blockquote'
+ if any_ancestor(element, f):
continue
if element.tag == 'p' and 'verse' in element.get('class', ''):
for e in working.findall('a'):
if e.get('class') in ('annotation', 'theme-begin'):
e.text = ''
- return etree.tostring(working, method='text', encoding=unicode).strip()
+ return etree.tostring(working, method='text', encoding='unicode').strip()
def add_table_of_contents(root):
counter = 1
for element in root.iterdescendants():
if element.tag in ('h2', 'h3'):
- if any_ancestor(element, lambda e: e.get('id') in ('footnotes', 'nota_red') or e.get('class') in ('person-list',)):
+ if any_ancestor(element,
+ lambda e: e.get('id') in ('footnotes', 'nota_red') or e.get('class') in ('person-list',)):
continue
element_text = raw_printable_text(element)
if len(subsections):
subsection_list = etree.SubElement(section_element, 'ol')
- for n, subsection, text, _ in subsections:
+ for n1, subsection, subtext, _ in subsections:
subsection_element = etree.SubElement(subsection_list, 'li')
- add_anchor(subsection_element, "s%d" % n, with_target=False, link_text=text)
+ add_anchor(subsection_element, "s%d" % n1, with_target=False, link_text=subtext)
root.insert(0, toc)
try:
from sortify import sortify
except ImportError:
- sortify = lambda x: x
+ def sortify(x):
+ return x
book_themes = {}
for fragment in root.findall('.//a[@class="theme-begin"]'):
theme_names = [s.strip() for s in fragment.text.split(',')]
for theme_name in theme_names:
book_themes.setdefault(theme_name, []).append(fragment.get('name'))
- book_themes = book_themes.items()
+ book_themes = list(book_themes.items())
book_themes.sort(key=lambda s: sortify(s[0]))
themes_div = etree.Element('div', id="themes")
themes_ol = etree.SubElement(themes_div, 'ol')
def extract_annotations(html_path):
- """For each annotation, yields a tuple: anchor, text, html."""
+ """Extracts annotations from HTML for annotations dictionary.
+
+ For each annotation, yields a tuple of:
+ anchor, footnote type, valid qualifiers, text, html.
+
+ """
+ from .fn_qualifiers import FN_QUALIFIERS
+
parser = etree.HTMLParser(encoding='utf-8')
tree = etree.parse(html_path, parser)
footnotes = tree.find('//*[@id="footnotes"]')
- re_qualifier = re.compile(ur'[^\u2014]+\s+\((.+)\)\s+\u2014')
+ re_qualifier = re.compile(r'[^\u2014]+\s+\(([^\)]+)\)\s+\u2014')
if footnotes is not None:
for footnote in footnotes.findall('div'):
fn_type = footnote.get('class').split('-')[1]
footnote.text = None
if len(footnote) and footnote[-1].tail == '\n':
footnote[-1].tail = None
- text_str = etree.tostring(footnote, method='text', encoding=unicode).strip()
- html_str = etree.tostring(footnote, method='html', encoding=unicode).strip()
- qualifier = None
+ text_str = etree.tostring(footnote, method='text', encoding='unicode').strip()
+ html_str = etree.tostring(footnote, method='html', encoding='unicode').strip()
+
match = re_qualifier.match(text_str)
if match:
- qualifier = match.group(1)
-
- yield anchor, fn_type, qualifier, text_str, html_str
+ qualifier_str = match.group(1)
+ qualifiers = []
+ for candidate in re.split('[;,]', qualifier_str):
+ candidate = candidate.strip()
+ if candidate in FN_QUALIFIERS:
+ qualifiers.append(candidate)
+ elif candidate.startswith('z '):
+ subcandidate = candidate.split()[1]
+ if subcandidate in FN_QUALIFIERS:
+ qualifiers.append(subcandidate)
+ else:
+ qualifiers = []
+ yield anchor, fn_type, qualifiers, text_str, html_str