1 # -*- coding: utf-8 -*-
3 # This file is part of Librarian, licensed under GNU Affero GPLv3 or later.
4 # Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
6 from __future__ import print_function, unicode_literals
12 from lxml import etree
13 from librarian import XHTMLNS, ParseError, OutputFile
14 from librarian import functions
17 from lxml.etree import XMLSyntaxError, XSLTApplyError
21 functions.reg_substitute_entities()
22 functions.reg_person_name()
25 'legacy': 'xslt/book2html.xslt',
26 'full': 'xslt/wl2html_full.xslt',
27 'partial': 'xslt/wl2html_partial.xslt'
31 def get_stylesheet(name):
32 return os.path.join(os.path.dirname(__file__), STYLESHEETS[name])
35 def html_has_content(text):
37 '//p|//{%(ns)s}p|//h1|//{%(ns)s}h1' % {'ns': str(XHTMLNS)}
41 def transform_abstrakt(abstrakt_element):
42 style_filename = get_stylesheet('legacy')
43 style = etree.parse(style_filename)
44 xml = etree.tostring(abstrakt_element, encoding='unicode')
45 document = etree.parse(six.StringIO(
46 xml.replace('abstrakt', 'dlugi_cytat')
48 result = document.xslt(style)
49 html = re.sub('<a name="sec[0-9]*"/>', '',
50 etree.tostring(result, encoding='unicode'))
51 return re.sub('</?blockquote[^>]*>', '', html)
54 def add_image_sizes(tree, gallery_path, gallery_url, base_url):
55 widths = [360, 600, 1200, 1800, 2400]
57 for i, ilustr in enumerate(tree.findall('//ilustr')):
58 rel_path = ilustr.attrib['src']
59 img_url = six.moves.urllib.parse.urljoin(base_url, rel_path)
61 f = six.moves.urllib.request.urlopen(img_url)
63 ext = {'GIF': 'gif', 'PNG': 'png'}.get(img.format, 'jpg')
66 # Needed widths: predefined and original, limited by
67 # whichever is smaller.
71 set(widths + [img.size[0]])
73 if w <= min(widths[-1], img.size[0])
77 fname = '%d.W%d.%s' % (i, w, ext)
78 fpath = gallery_path + fname
79 if not os.path.exists(fpath):
80 height = round(img.size[1] * w / img.size[0])
81 th = img.resize((w, height))
83 th_url = gallery_url + fname
84 srcset.append(" ".join((
89 ilustr.attrib['srcset'] = ", ".join(srcset)
90 ilustr.attrib['src'] = largest_url
95 def transform(wldoc, stylesheet='legacy', options=None, flags=None, css=None, gallery_path='img/', gallery_url='img/', base_url='file://./'):
96 """Transforms the WL document to XHTML.
98 If output_filename is None, returns an XML,
99 otherwise returns True if file has been written,False if it hasn't.
100 File won't be written if it has no content.
104 style_filename = get_stylesheet(stylesheet)
105 style = etree.parse(style_filename)
107 document = copy.deepcopy(wldoc)
109 document.swap_endlines()
113 document.edoc.getroot().set(flag, 'yes')
115 document.clean_ed_note()
116 document.clean_ed_note('abstrakt')
122 os.makedirs(gallery_path)
126 add_image_sizes(document.edoc, gallery_path, gallery_url, base_url)
130 or 'https://static.wolnelektury.pl/css/compressed/book_text.css'
133 result = document.transform(style, css=css, **options)
134 del document # no longer needed large object :)
136 if html_has_content(result):
137 add_anchors(result.getroot())
138 add_table_of_themes(result.getroot())
139 add_table_of_contents(result.getroot())
141 return OutputFile.from_bytes(etree.tostring(
142 result, method='html', xml_declaration=False,
143 pretty_print=True, encoding='utf-8'
148 raise ValueError("'%s' is not a valid stylesheet.")
149 except (XMLSyntaxError, XSLTApplyError) as e:
153 @six.python_2_unicode_compatible
154 class Fragment(object):
155 def __init__(self, id, themes):
156 super(Fragment, self).__init__()
161 def append(self, event, element):
162 self.events.append((event, element))
164 def closed_events(self):
166 for event, element in self.events:
168 stack.append(('end', element))
173 print('CLOSED NON-OPEN TAG:', element)
176 return self.events + stack
180 for event, element in self.closed_events():
182 result.append(u'<%s %s>' % (
186 for k, v in element.attrib.items()
190 result.append(element.text)
192 result.append(u'</%s>' % element.tag)
194 result.append(element.tail)
196 result.append(element)
198 return ''.join(result)
201 return self.to_string()
204 def extract_fragments(input_filename):
205 """Extracts theme fragments from input_filename."""
207 closed_fragments = {}
209 # iterparse would die on a HTML document
210 parser = etree.HTMLParser(encoding='utf-8')
212 buf.write(etree.tostring(
213 etree.parse(input_filename, parser).getroot()[0][0],
218 for event, element in etree.iterparse(buf, events=('start', 'end')):
219 # Process begin and end elements
220 if element.get('class', '') in ('theme-begin', 'theme-end'):
221 if not event == 'end':
222 continue # Process elements only once, on end event
225 if element.get('class', '') == 'theme-begin':
226 fragment = Fragment(id=element.get('fid'), themes=element.text)
229 parent = element.getparent()
231 while parent.get('id', None) != 'book-text':
232 cparent = copy.deepcopy(parent)
234 if 'id' in cparent.attrib:
235 del cparent.attrib['id']
236 parents.append(cparent)
237 parent = parent.getparent()
240 for parent in parents:
241 fragment.append('start', parent)
243 open_fragments[fragment.id] = fragment
245 # Close existing fragment
248 fragment = open_fragments[element.get('fid')]
250 print('%s:closed not open fragment #%s' % (
251 input_filename, element.get('fid')
254 closed_fragments[fragment.id] = fragment
255 del open_fragments[fragment.id]
257 # Append element tail to lost_text
258 # (we don't want to lose any text)
260 for fragment_id in open_fragments:
261 open_fragments[fragment_id].append('text', element.tail)
263 # Process all elements except begin and end
265 # Omit annotation tags
266 if (len(element.get('name', '')) or
267 element.get('class', '') in ('annotation', 'anchor')):
268 if event == 'end' and element.tail:
269 for fragment_id in open_fragments:
270 open_fragments[fragment_id].append(
274 for fragment_id in open_fragments:
275 celem = copy.copy(element)
276 if 'id' in celem.attrib:
277 del celem.attrib['id']
278 open_fragments[fragment_id].append(
282 return closed_fragments, open_fragments
285 def add_anchor(element, prefix, with_link=True, with_target=True,
287 parent = element.getparent()
288 index = parent.index(element)
291 if link_text is None:
293 anchor = etree.Element('a', href='#%s' % prefix)
294 anchor.set('class', 'anchor')
295 anchor.text = six.text_type(link_text)
296 parent.insert(index, anchor)
299 anchor_target = etree.Element('a', name='%s' % prefix)
300 anchor_target.set('class', 'target')
301 anchor_target.text = u' '
302 parent.insert(index, anchor_target)
305 def any_ancestor(element, test):
306 for ancestor in element.iterancestors():
312 def add_anchors(root):
314 for element in root.iterdescendants():
318 'note', 'motto', 'motto_podpis', 'dedication', 'frame'
320 or e.get('id') == 'nota_red'
321 or e.tag == 'blockquote'
323 if any_ancestor(element, f):
326 if element.tag == 'div' and 'verse' in element.get('class', ''):
327 if counter == 1 or counter % 5 == 0:
328 add_anchor(element, "f%d" % counter, link_text=counter)
330 elif 'paragraph' in element.get('class', ''):
331 add_anchor(element, "f%d" % counter, link_text=counter)
335 def raw_printable_text(element):
336 working = copy.deepcopy(element)
337 for e in working.findall('a'):
338 if e.get('class') in ('annotation', 'theme-begin'):
340 return etree.tostring(working, method='text', encoding='unicode').strip()
343 def add_table_of_contents(root):
346 for element in root.iterdescendants():
347 if element.tag in ('h2', 'h3'):
350 lambda e: e.get('id') in (
351 'footnotes', 'nota_red'
352 ) or e.get('class') in ('person-list',)):
355 element_text = raw_printable_text(element)
356 if (element.tag == 'h3' and len(sections)
357 and sections[-1][1] == 'h2'):
358 sections[-1][3].append(
359 (counter, element.tag, element_text, [])
362 sections.append((counter, element.tag, element_text, []))
363 add_anchor(element, "s%d" % counter, with_link=False)
366 toc = etree.Element('div')
368 toc_header = etree.SubElement(toc, 'h2')
369 toc_header.text = u'Spis treści'
370 toc_list = etree.SubElement(toc, 'ol')
372 for n, section, text, subsections in sections:
373 section_element = etree.SubElement(toc_list, 'li')
374 add_anchor(section_element, "s%d" % n, with_target=False,
378 subsection_list = etree.SubElement(section_element, 'ol')
379 for n1, subsection, subtext, _ in subsections:
380 subsection_element = etree.SubElement(subsection_list, 'li')
381 add_anchor(subsection_element, "s%d" % n1, with_target=False,
387 def add_table_of_themes(root):
389 from sortify import sortify
395 for fragment in root.findall('.//a[@class="theme-begin"]'):
396 if not fragment.text:
398 theme_names = [s.strip() for s in fragment.text.split(',')]
399 for theme_name in theme_names:
400 book_themes.setdefault(theme_name, []).append(fragment.get('name'))
401 book_themes = list(book_themes.items())
402 book_themes.sort(key=lambda s: sortify(s[0]))
403 themes_div = etree.Element('div', id="themes")
404 themes_ol = etree.SubElement(themes_div, 'ol')
405 for theme_name, fragments in book_themes:
406 themes_li = etree.SubElement(themes_ol, 'li')
407 themes_li.text = "%s: " % theme_name
408 for i, fragment in enumerate(fragments):
409 item = etree.SubElement(themes_li, 'a', href="#%s" % fragment)
410 item.text = str(i + 1)
412 root.insert(0, themes_div)
415 def extract_annotations(html_path):
416 """Extracts annotations from HTML for annotations dictionary.
418 For each annotation, yields a tuple of:
419 anchor, footnote type, valid qualifiers, text, html.
422 from .fn_qualifiers import FN_QUALIFIERS
424 parser = etree.HTMLParser(encoding='utf-8')
425 tree = etree.parse(html_path, parser)
426 footnotes = tree.find('//*[@id="footnotes"]')
427 re_qualifier = re.compile(r'[^\u2014]+\s+\(([^\)]+)\)\s+\u2014')
428 if footnotes is not None:
429 for footnote in footnotes.findall('div'):
430 fn_type = footnote.get('class').split('-')[1]
431 anchor = footnote.find('a[@class="annotation"]').get('href')[1:]
434 if len(footnote) and footnote[-1].tail == '\n':
435 footnote[-1].tail = None
436 text_str = etree.tostring(footnote, method='text',
437 encoding='unicode').strip()
438 html_str = etree.tostring(footnote, method='html',
439 encoding='unicode').strip()
441 match = re_qualifier.match(text_str)
443 qualifier_str = match.group(1)
445 for candidate in re.split('[;,]', qualifier_str):
446 candidate = candidate.strip()
447 if candidate in FN_QUALIFIERS:
448 qualifiers.append(candidate)
449 elif candidate.startswith('z '):
450 subcandidate = candidate.split()[1]
451 if subcandidate in FN_QUALIFIERS:
452 qualifiers.append(subcandidate)
456 yield anchor, fn_type, qualifiers, text_str, html_str