Licensing info change.
[librarian.git] / src / librarian / html.py
1 # -*- coding: utf-8 -*-
2 #
3 # This file is part of Librarian, licensed under GNU Affero GPLv3 or later.
4 # Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
5 #
6 from __future__ import print_function, unicode_literals
7
8 import os
9 import re
10 import copy
11
12 from lxml import etree
13 from librarian import XHTMLNS, ParseError, OutputFile
14 from librarian import functions
15 from PIL import Image
16
17 from lxml.etree import XMLSyntaxError, XSLTApplyError
18 import six
19
20
21 functions.reg_substitute_entities()
22 functions.reg_person_name()
23
24 STYLESHEETS = {
25     'legacy': 'xslt/book2html.xslt',
26     'full': 'xslt/wl2html_full.xslt',
27     'partial': 'xslt/wl2html_partial.xslt'
28 }
29
30
31 def get_stylesheet(name):
32     return os.path.join(os.path.dirname(__file__), STYLESHEETS[name])
33
34
35 def html_has_content(text):
36     return etree.ETXPath(
37         '//p|//{%(ns)s}p|//h1|//{%(ns)s}h1' % {'ns': str(XHTMLNS)}
38     )(text)
39
40
41 def transform_abstrakt(abstrakt_element):
42     style_filename = get_stylesheet('legacy')
43     style = etree.parse(style_filename)
44     xml = etree.tostring(abstrakt_element, encoding='unicode')
45     document = etree.parse(six.StringIO(
46         xml.replace('abstrakt', 'dlugi_cytat')
47     ))  # HACK
48     result = document.xslt(style)
49     html = re.sub('<a name="sec[0-9]*"/>', '',
50                   etree.tostring(result, encoding='unicode'))
51     return re.sub('</?blockquote[^>]*>', '', html)
52
53
54 def add_image_sizes(tree, gallery_path, gallery_url, base_url):
55     widths = [360, 600, 1200, 1800, 2400]
56
57     for i, ilustr in enumerate(tree.findall('//ilustr')):
58         rel_path = ilustr.attrib['src']
59         img_url = six.moves.urllib.parse.urljoin(base_url, rel_path)
60
61         f = six.moves.urllib.request.urlopen(img_url)
62         img = Image.open(f)
63         ext = {'GIF': 'gif', 'PNG': 'png'}.get(img.format, 'jpg')
64
65         srcset = []
66         # Needed widths: predefined and original, limited by
67         # whichever is smaller.
68         img_widths = [
69             w for w in
70             sorted(
71                 set(widths + [img.size[0]])
72             )
73             if w <= min(widths[-1], img.size[0])
74         ]
75         largest = None
76         for w in widths:
77             fname = '%d.W%d.%s' % (i, w, ext)
78             fpath = gallery_path + fname
79             if not os.path.exists(fpath):
80                 height = round(img.size[1] * w / img.size[0])
81                 th = img.resize((w, height))
82                 th.save(fpath)
83             th_url = gallery_url + fname
84             srcset.append(" ".join((
85                 th_url,
86                 '%dw' % w
87             )))
88             largest_url = th_url
89         ilustr.attrib['srcset'] = ", ".join(srcset)
90         ilustr.attrib['src'] = largest_url
91
92         f.close()
93
94
95 def transform(wldoc, stylesheet='legacy', options=None, flags=None, css=None, gallery_path='img/', gallery_url='img/', base_url='file://./'):
96     """Transforms the WL document to XHTML.
97
98     If output_filename is None, returns an XML,
99     otherwise returns True if file has been written,False if it hasn't.
100     File won't be written if it has no content.
101     """
102     # Parse XSLT
103     try:
104         style_filename = get_stylesheet(stylesheet)
105         style = etree.parse(style_filename)
106
107         document = copy.deepcopy(wldoc)
108         del wldoc
109         document.swap_endlines()
110
111         if flags:
112             for flag in flags:
113                 document.edoc.getroot().set(flag, 'yes')
114
115         document.clean_ed_note()
116         document.clean_ed_note('abstrakt')
117         document.fix_pa_akap()
118         
119         if not options:
120             options = {}
121
122         try:
123             os.makedirs(gallery_path)
124         except OSError:
125             pass
126
127         add_image_sizes(document.edoc, gallery_path, gallery_url, base_url)
128
129         css = (
130             css
131             or 'https://static.wolnelektury.pl/css/compressed/book_text.css'
132         )
133         css = "'%s'" % css
134         result = document.transform(style, css=css, **options)
135         del document  # no longer needed large object :)
136
137         if html_has_content(result):
138             add_anchors(result.getroot())
139             add_table_of_themes(result.getroot())
140             add_table_of_contents(result.getroot())
141
142             return OutputFile.from_bytes(etree.tostring(
143                 result, method='html', xml_declaration=False,
144                 pretty_print=True, encoding='utf-8'
145             ))
146         else:
147             return None
148     except KeyError:
149         raise ValueError("'%s' is not a valid stylesheet.")
150     except (XMLSyntaxError, XSLTApplyError) as e:
151         raise ParseError(e)
152
153
154 @six.python_2_unicode_compatible
155 class Fragment(object):
156     def __init__(self, id, themes):
157         super(Fragment, self).__init__()
158         self.id = id
159         self.themes = themes
160         self.events = []
161
162     def append(self, event, element):
163         self.events.append((event, element))
164
165     def closed_events(self):
166         stack = []
167         for event, element in self.events:
168             if event == 'start':
169                 stack.append(('end', element))
170             elif event == 'end':
171                 try:
172                     stack.pop()
173                 except IndexError:
174                     print('CLOSED NON-OPEN TAG:', element)
175
176         stack.reverse()
177         return self.events + stack
178
179     def to_string(self):
180         result = []
181         for event, element in self.closed_events():
182             if event == 'start':
183                 result.append(u'<%s %s>' % (
184                     element.tag,
185                     ' '.join(
186                         '%s="%s"' % (k, v)
187                         for k, v in element.attrib.items()
188                     )
189                 ))
190                 if element.text:
191                     result.append(element.text)
192             elif event == 'end':
193                 result.append(u'</%s>' % element.tag)
194                 if element.tail:
195                     result.append(element.tail)
196             else:
197                 result.append(element)
198
199         return ''.join(result)
200
201     def __str__(self):
202         return self.to_string()
203
204
205 def extract_fragments(input_filename):
206     """Extracts theme fragments from input_filename."""
207     open_fragments = {}
208     closed_fragments = {}
209
210     # iterparse would die on a HTML document
211     parser = etree.HTMLParser(encoding='utf-8')
212     buf = six.BytesIO()
213     buf.write(etree.tostring(
214         etree.parse(input_filename, parser).getroot()[0][0],
215         encoding='utf-8'
216     ))
217     buf.seek(0)
218
219     for event, element in etree.iterparse(buf, events=('start', 'end')):
220         # Process begin and end elements
221         if element.get('class', '') in ('theme-begin', 'theme-end'):
222             if not event == 'end':
223                 continue  # Process elements only once, on end event
224
225             # Open new fragment
226             if element.get('class', '') == 'theme-begin':
227                 fragment = Fragment(id=element.get('fid'), themes=element.text)
228
229                 # Append parents
230                 parent = element.getparent()
231                 parents = []
232                 while parent.get('id', None) != 'book-text':
233                     cparent = copy.deepcopy(parent)
234                     cparent.text = None
235                     if 'id' in cparent.attrib:
236                         del cparent.attrib['id']
237                     parents.append(cparent)
238                     parent = parent.getparent()
239
240                 parents.reverse()
241                 for parent in parents:
242                     fragment.append('start', parent)
243
244                 open_fragments[fragment.id] = fragment
245
246             # Close existing fragment
247             else:
248                 try:
249                     fragment = open_fragments[element.get('fid')]
250                 except KeyError:
251                     print('%s:closed not open fragment #%s' % (
252                         input_filename, element.get('fid')
253                     ))
254                 else:
255                     closed_fragments[fragment.id] = fragment
256                     del open_fragments[fragment.id]
257
258             # Append element tail to lost_text
259             # (we don't want to lose any text)
260             if element.tail:
261                 for fragment_id in open_fragments:
262                     open_fragments[fragment_id].append('text', element.tail)
263
264         # Process all elements except begin and end
265         else:
266             # Omit annotation tags
267             if (len(element.get('name', '')) or
268                     element.get('class', '') in ('annotation', 'anchor')):
269                 if event == 'end' and element.tail:
270                     for fragment_id in open_fragments:
271                         open_fragments[fragment_id].append(
272                             'text', element.tail
273                         )
274             else:
275                 for fragment_id in open_fragments:
276                     celem = copy.copy(element)
277                     if 'id' in celem.attrib:
278                         del celem.attrib['id']
279                     open_fragments[fragment_id].append(
280                         event, celem
281                     )
282
283     return closed_fragments, open_fragments
284
285
286 def add_anchor(element, prefix, with_link=True, with_target=True,
287                link_text=None):
288     parent = element.getparent()
289     index = parent.index(element)
290
291     if with_link:
292         if link_text is None:
293             link_text = prefix
294         anchor = etree.Element('a', href='#%s' % prefix)
295         anchor.set('class', 'anchor')
296         anchor.text = six.text_type(link_text)
297         parent.insert(index, anchor)
298
299     if with_target:
300         anchor_target = etree.Element('a', name='%s' % prefix)
301         anchor_target.set('class', 'target')
302         anchor_target.text = u' '
303         parent.insert(index, anchor_target)
304
305
306 def any_ancestor(element, test):
307     for ancestor in element.iterancestors():
308         if test(ancestor):
309             return True
310     return False
311
312
313 def add_anchors(root):
314     counter = 1
315     for element in root.iterdescendants():
316         def f(e):
317             return (
318                 e.get('class') in (
319                     'note', 'motto', 'motto_podpis', 'dedication', 'frame'
320                 )
321                 or e.get('id') == 'nota_red'
322                 or e.tag == 'blockquote'
323                 or e.get('id') == 'footnotes'
324             )
325         if any_ancestor(element, f):
326             continue
327
328         if element.tag == 'div' and 'verse' in element.get('class', ''):
329             if counter == 1 or counter % 5 == 0:
330                 add_anchor(element, "f%d" % counter, link_text=counter)
331             counter += 1
332         elif 'paragraph' in element.get('class', ''):
333             add_anchor(element, "f%d" % counter, link_text=counter)
334             counter += 1
335
336
337 def raw_printable_text(element):
338     working = copy.deepcopy(element)
339     for e in working.findall('a'):
340         if e.get('class') in ('annotation', 'theme-begin'):
341             e.text = ''
342     return etree.tostring(working, method='text', encoding='unicode').strip()
343
344
345 def add_table_of_contents(root):
346     sections = []
347     counter = 1
348     for element in root.iterdescendants():
349         if element.tag in ('h2', 'h3'):
350             if any_ancestor(
351                     element,
352                     lambda e: e.get('id') in (
353                         'footnotes', 'nota_red'
354                     ) or e.get('class') in ('person-list',)):
355                 continue
356
357             element_text = raw_printable_text(element)
358             if (element.tag == 'h3' and len(sections)
359                     and sections[-1][1] == 'h2'):
360                 sections[-1][3].append(
361                     (counter, element.tag, element_text, [])
362                 )
363             else:
364                 sections.append((counter, element.tag, element_text, []))
365             add_anchor(element, "s%d" % counter, with_link=False)
366             counter += 1
367
368     toc = etree.Element('div')
369     toc.set('id', 'toc')
370     toc_header = etree.SubElement(toc, 'h2')
371     toc_header.text = u'Spis treści'
372     toc_list = etree.SubElement(toc, 'ol')
373
374     for n, section, text, subsections in sections:
375         section_element = etree.SubElement(toc_list, 'li')
376         add_anchor(section_element, "s%d" % n, with_target=False,
377                    link_text=text)
378
379         if len(subsections):
380             subsection_list = etree.SubElement(section_element, 'ol')
381             for n1, subsection, subtext, _ in subsections:
382                 subsection_element = etree.SubElement(subsection_list, 'li')
383                 add_anchor(subsection_element, "s%d" % n1, with_target=False,
384                            link_text=subtext)
385
386     root.insert(0, toc)
387
388
389 def add_table_of_themes(root):
390     try:
391         from sortify import sortify
392     except ImportError:
393         def sortify(x):
394             return x
395
396     book_themes = {}
397     for fragment in root.findall('.//a[@class="theme-begin"]'):
398         if not fragment.text:
399             continue
400         theme_names = [s.strip() for s in fragment.text.split(',')]
401         for theme_name in theme_names:
402             book_themes.setdefault(theme_name, []).append(fragment.get('name'))
403     book_themes = list(book_themes.items())
404     book_themes.sort(key=lambda s: sortify(s[0]))
405     themes_div = etree.Element('div', id="themes")
406     themes_ol = etree.SubElement(themes_div, 'ol')
407     for theme_name, fragments in book_themes:
408         themes_li = etree.SubElement(themes_ol, 'li')
409         themes_li.text = "%s: " % theme_name
410         for i, fragment in enumerate(fragments):
411             item = etree.SubElement(themes_li, 'a', href="#%s" % fragment)
412             item.text = str(i + 1)
413             item.tail = ' '
414     root.insert(0, themes_div)
415
416
417 def extract_annotations(html_path):
418     """Extracts annotations from HTML for annotations dictionary.
419
420     For each annotation, yields a tuple of:
421     anchor, footnote type, valid qualifiers, text, html.
422
423     """
424     from .fn_qualifiers import FN_QUALIFIERS
425
426     parser = etree.HTMLParser(encoding='utf-8')
427     tree = etree.parse(html_path, parser)
428     footnotes = tree.find('//*[@id="footnotes"]')
429     re_qualifier = re.compile(r'[^\u2014]+\s+\(([^\)]+)\)\s+\u2014')
430     if footnotes is not None:
431         for footnote in footnotes.findall('div'):
432             fn_type = footnote.get('class').split('-')[1]
433             anchor = footnote.find('a[@class="annotation"]').get('href')[1:]
434             del footnote[:2]
435             footnote.text = None
436             if len(footnote) and footnote[-1].tail == '\n':
437                 footnote[-1].tail = None
438             text_str = etree.tostring(footnote, method='text',
439                                       encoding='unicode').strip()
440             html_str = etree.tostring(footnote, method='html',
441                                       encoding='unicode').strip()
442
443             match = re_qualifier.match(text_str)
444             if match:
445                 qualifier_str = match.group(1)
446                 qualifiers = []
447                 for candidate in re.split('[;,]', qualifier_str):
448                     candidate = candidate.strip()
449                     if candidate in FN_QUALIFIERS:
450                         qualifiers.append(candidate)
451                     elif candidate.startswith('z '):
452                         subcandidate = candidate.split()[1]
453                         if subcandidate in FN_QUALIFIERS:
454                             qualifiers.append(subcandidate)
455             else:
456                 qualifiers = []
457
458             yield anchor, fn_type, qualifiers, text_str, html_str