Drop lots of legacy code. Support Python 3.7-3.11.
[librarian.git] / src / librarian / html.py
index f8e4a83..c2f41c0 100644 (file)
@@ -1,20 +1,19 @@
-# -*- coding: utf-8 -*-
-#
 # This file is part of Librarian, licensed under GNU Affero GPLv3 or later.
-# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
+# Copyright © Fundacja Wolne Lektury. See NOTICE for more information.
 #
-from __future__ import print_function, unicode_literals
-
+import io
 import os
 import re
 import copy
+import urllib.parse
+import urllib.request
 
 from lxml import etree
 from librarian import XHTMLNS, ParseError, OutputFile
 from librarian import functions
+from PIL import Image
 
 from lxml.etree import XMLSyntaxError, XSLTApplyError
-import six
 
 
 functions.reg_substitute_entities()
@@ -22,8 +21,6 @@ functions.reg_person_name()
 
 STYLESHEETS = {
     'legacy': 'xslt/book2html.xslt',
-    'full': 'xslt/wl2html_full.xslt',
-    'partial': 'xslt/wl2html_partial.xslt'
 }
 
 
@@ -32,20 +29,66 @@ def get_stylesheet(name):
 
 
 def html_has_content(text):
-    return etree.ETXPath('//p|//{%(ns)s}p|//h1|//{%(ns)s}h1' % {'ns': str(XHTMLNS)})(text)
+    return etree.ETXPath(
+        '//p|//{%(ns)s}p|//h1|//{%(ns)s}h1' % {'ns': str(XHTMLNS)}
+    )(text)
 
 
 def transform_abstrakt(abstrakt_element):
     style_filename = get_stylesheet('legacy')
     style = etree.parse(style_filename)
     xml = etree.tostring(abstrakt_element, encoding='unicode')
-    document = etree.parse(six.StringIO(xml.replace('abstrakt', 'dlugi_cytat')))  # HACK
+    document = etree.parse(io.StringIO(
+        xml.replace('abstrakt', 'dlugi_cytat')
+    ))  # HACK
     result = document.xslt(style)
-    html = re.sub('<a name="sec[0-9]*"/>', '', etree.tostring(result, encoding='unicode'))
+    html = re.sub('<a name="sec[0-9]*"/>', '',
+                  etree.tostring(result, encoding='unicode'))
     return re.sub('</?blockquote[^>]*>', '', html)
 
 
-def transform(wldoc, stylesheet='legacy', options=None, flags=None, css=None):
+def add_image_sizes(tree, gallery_path, gallery_url, base_url):
+    widths = [360, 600, 1200, 1800, 2400]
+
+    for i, ilustr in enumerate(tree.findall('//ilustr')):
+        rel_path = ilustr.attrib['src']
+        img_url = urllib.parse.urljoin(base_url, rel_path)
+
+        f = urllib.request.urlopen(img_url)
+        img = Image.open(f)
+        ext = {'GIF': 'gif', 'PNG': 'png'}.get(img.format, 'jpg')
+
+        srcset = []
+        # Needed widths: predefined and original, limited by
+        # whichever is smaller.
+        img_widths = [
+            w for w in
+            sorted(
+                set(widths + [img.size[0]])
+            )
+            if w <= min(widths[-1], img.size[0])
+        ]
+        largest = None
+        for w in widths:
+            fname = '%d.W%d.%s' % (i, w, ext)
+            fpath = gallery_path + fname
+            if not os.path.exists(fpath):
+                height = round(img.size[1] * w / img.size[0])
+                th = img.resize((w, height))
+                th.save(fpath)
+            th_url = gallery_url + fname
+            srcset.append(" ".join((
+                th_url,
+                '%dw' % w
+            )))
+            largest_url = th_url
+        ilustr.attrib['srcset'] = ", ".join(srcset)
+        ilustr.attrib['src'] = largest_url
+
+        f.close()
+
+
+def transform(wldoc, stylesheet='legacy', options=None, flags=None, css=None, gallery_path='img/', gallery_url='img/', base_url='file://./'):
     """Transforms the WL document to XHTML.
 
     If output_filename is None, returns an XML,
@@ -67,12 +110,22 @@ def transform(wldoc, stylesheet='legacy', options=None, flags=None, css=None):
 
         document.clean_ed_note()
         document.clean_ed_note('abstrakt')
-
+        document.fix_pa_akap()
+        
         if not options:
             options = {}
-        options.setdefault('gallery', "''")
 
-        css = css or 'https://static.wolnelektury.pl/css/compressed/book_text.css'
+        try:
+            os.makedirs(gallery_path)
+        except OSError:
+            pass
+
+        add_image_sizes(document.edoc, gallery_path, gallery_url, base_url)
+
+        css = (
+            css
+            or 'https://static.wolnelektury.pl/css/compressed/book_text.css'
+        )
         css = "'%s'" % css
         result = document.transform(style, css=css, **options)
         del document  # no longer needed large object :)
@@ -83,7 +136,9 @@ def transform(wldoc, stylesheet='legacy', options=None, flags=None, css=None):
             add_table_of_contents(result.getroot())
 
             return OutputFile.from_bytes(etree.tostring(
-                result, method='html', xml_declaration=False, pretty_print=True, encoding='utf-8'))
+                result, method='html', xml_declaration=False,
+                pretty_print=True, encoding='utf-8'
+            ))
         else:
             return None
     except KeyError:
@@ -92,8 +147,7 @@ def transform(wldoc, stylesheet='legacy', options=None, flags=None, css=None):
         raise ParseError(e)
 
 
-@six.python_2_unicode_compatible
-class Fragment(object):
+class Fragment:
     def __init__(self, id, themes):
         super(Fragment, self).__init__()
         self.id = id
@@ -121,12 +175,17 @@ class Fragment(object):
         result = []
         for event, element in self.closed_events():
             if event == 'start':
-                result.append(u'<%s %s>' % (
-                    element.tag, ' '.join('%s="%s"' % (k, v) for k, v in element.attrib.items())))
+                result.append('<%s %s>' % (
+                    element.tag,
+                    ' '.join(
+                        '%s="%s"' % (k, v)
+                        for k, v in element.attrib.items()
+                    )
+                ))
                 if element.text:
                     result.append(element.text)
             elif event == 'end':
-                result.append(u'</%s>' % element.tag)
+                result.append('</%s>' % element.tag)
                 if element.tail:
                     result.append(element.tail)
             else:
@@ -145,8 +204,11 @@ def extract_fragments(input_filename):
 
     # iterparse would die on a HTML document
     parser = etree.HTMLParser(encoding='utf-8')
-    buf = six.BytesIO()
-    buf.write(etree.tostring(etree.parse(input_filename, parser).getroot()[0][0], encoding='utf-8'))
+    buf = io.BytesIO()
+    buf.write(etree.tostring(
+        etree.parse(input_filename, parser).getroot()[0][0],
+        encoding='utf-8'
+    ))
     buf.seek(0)
 
     for event, element in etree.iterparse(buf, events=('start', 'end')):
@@ -165,6 +227,8 @@ def extract_fragments(input_filename):
                 while parent.get('id', None) != 'book-text':
                     cparent = copy.deepcopy(parent)
                     cparent.text = None
+                    if 'id' in cparent.attrib:
+                        del cparent.attrib['id']
                     parents.append(cparent)
                     parent = parent.getparent()
 
@@ -172,19 +236,23 @@ def extract_fragments(input_filename):
                 for parent in parents:
                     fragment.append('start', parent)
 
-                open_fragments[fragment.id] = fragment
+                if fragment.id not in open_fragments:
+                    open_fragments[fragment.id] = fragment
 
             # Close existing fragment
             else:
                 try:
                     fragment = open_fragments[element.get('fid')]
                 except KeyError:
-                    print('%s:closed not open fragment #%s' % (input_filename, element.get('fid')))
+                    print('%s:closed not open fragment #%s' % (
+                        input_filename, element.get('fid')
+                    ))
                 else:
                     closed_fragments[fragment.id] = fragment
                     del open_fragments[fragment.id]
 
-            # Append element tail to lost_text (we don't want to lose any text)
+            # Append element tail to lost_text
+            # (we don't want to lose any text)
             if element.tail:
                 for fragment_id in open_fragments:
                     open_fragments[fragment_id].append('text', element.tail)
@@ -192,19 +260,27 @@ def extract_fragments(input_filename):
         # Process all elements except begin and end
         else:
             # Omit annotation tags
-            if (len(element.get('name', '')) or 
+            if (len(element.get('name', '')) or
                     element.get('class', '') in ('annotation', 'anchor')):
                 if event == 'end' and element.tail:
                     for fragment_id in open_fragments:
-                        open_fragments[fragment_id].append('text', element.tail)
+                        open_fragments[fragment_id].append(
+                            'text', element.tail
+                        )
             else:
                 for fragment_id in open_fragments:
-                    open_fragments[fragment_id].append(event, copy.copy(element))
+                    celem = copy.copy(element)
+                    if 'id' in celem.attrib:
+                        del celem.attrib['id']
+                    open_fragments[fragment_id].append(
+                        event, celem
+                    )
 
     return closed_fragments, open_fragments
 
 
-def add_anchor(element, prefix, with_link=True, with_target=True, link_text=None):
+def add_anchor(element, prefix, with_link=True, with_target=True,
+               link_text=None):
     parent = element.getparent()
     index = parent.index(element)
 
@@ -213,13 +289,13 @@ def add_anchor(element, prefix, with_link=True, with_target=True, link_text=None
             link_text = prefix
         anchor = etree.Element('a', href='#%s' % prefix)
         anchor.set('class', 'anchor')
-        anchor.text = six.text_type(link_text)
+        anchor.text = str(link_text)
         parent.insert(index, anchor)
 
     if with_target:
         anchor_target = etree.Element('a', name='%s' % prefix)
         anchor_target.set('class', 'target')
-        anchor_target.text = u' '
+        anchor_target.text = ' '
         parent.insert(index, anchor_target)
 
 
@@ -232,20 +308,36 @@ def any_ancestor(element, test):
 
 def add_anchors(root):
     counter = 1
+    visible_counter = 1
     for element in root.iterdescendants():
         def f(e):
-            return e.get('class') in ('note', 'motto', 'motto_podpis', 'dedication', 'frame') or \
-                e.get('id') == 'nota_red' or e.tag == 'blockquote'
+            return (
+                e.get('class') in (
+                    'note', 'motto', 'motto_podpis', 'dedication', 'frame'
+                )
+                or e.get('id') == 'nota_red'
+                or e.tag == 'blockquote'
+                or e.get('id') == 'footnotes'
+            )
+
+        if element.get('class') == 'numeracja':
+            try:
+                visible_counter = int(element.get('data-start'))
+            except ValueError:
+                visible_counter = 1
+
         if any_ancestor(element, f):
             continue
 
         if element.tag == 'div' and 'verse' in element.get('class', ''):
-            if counter == 1 or counter % 5 == 0:
-                add_anchor(element, "f%d" % counter, link_text=counter)
+            if visible_counter == 1 or visible_counter % 5 == 0:
+                add_anchor(element, "f%d" % counter, link_text=visible_counter)
             counter += 1
+            visible_counter += 1
         elif 'paragraph' in element.get('class', ''):
-            add_anchor(element, "f%d" % counter, link_text=counter)
+            add_anchor(element, "f%d" % counter, link_text=visible_counter)
             counter += 1
+            visible_counter += 1
 
 
 def raw_printable_text(element):
@@ -261,13 +353,19 @@ def add_table_of_contents(root):
     counter = 1
     for element in root.iterdescendants():
         if element.tag in ('h2', 'h3'):
-            if any_ancestor(element,
-                            lambda e: e.get('id') in ('footnotes', 'nota_red') or e.get('class') in ('person-list',)):
+            if any_ancestor(
+                    element,
+                    lambda e: e.get('id') in (
+                        'footnotes', 'nota_red'
+                    ) or e.get('class') in ('person-list',)):
                 continue
 
             element_text = raw_printable_text(element)
-            if element.tag == 'h3' and len(sections) and sections[-1][1] == 'h2':
-                sections[-1][3].append((counter, element.tag, element_text, []))
+            if (element.tag == 'h3' and len(sections)
+                    and sections[-1][1] == 'h2'):
+                sections[-1][3].append(
+                    (counter, element.tag, element_text, [])
+                )
             else:
                 sections.append((counter, element.tag, element_text, []))
             add_anchor(element, "s%d" % counter, with_link=False)
@@ -276,22 +374,24 @@ def add_table_of_contents(root):
     toc = etree.Element('div')
     toc.set('id', 'toc')
     toc_header = etree.SubElement(toc, 'h2')
-    toc_header.text = u'Spis treści'
+    toc_header.text = 'Spis treści'
     toc_list = etree.SubElement(toc, 'ol')
 
     for n, section, text, subsections in sections:
         section_element = etree.SubElement(toc_list, 'li')
-        add_anchor(section_element, "s%d" % n, with_target=False, link_text=text)
+        add_anchor(section_element, "s%d" % n, with_target=False,
+                   link_text=text)
 
         if len(subsections):
             subsection_list = etree.SubElement(section_element, 'ol')
             for n1, subsection, subtext, _ in subsections:
                 subsection_element = etree.SubElement(subsection_list, 'li')
-                add_anchor(subsection_element, "s%d" % n1, with_target=False, link_text=subtext)
+                add_anchor(subsection_element, "s%d" % n1, with_target=False,
+                           link_text=subtext)
 
     root.insert(0, toc)
 
-    
+
 def add_table_of_themes(root):
     try:
         from sortify import sortify
@@ -341,8 +441,10 @@ def extract_annotations(html_path):
             footnote.text = None
             if len(footnote) and footnote[-1].tail == '\n':
                 footnote[-1].tail = None
-            text_str = etree.tostring(footnote, method='text', encoding='unicode').strip()
-            html_str = etree.tostring(footnote, method='html', encoding='unicode').strip()
+            text_str = etree.tostring(footnote, method='text',
+                                      encoding='unicode').strip()
+            html_str = etree.tostring(footnote, method='html',
+                                      encoding='unicode').strip()
 
             match = re_qualifier.match(text_str)
             if match: