X-Git-Url: https://git.mdrn.pl/librarian.git/blobdiff_plain/28d85f12957a4b8609b4f419e70dd42a22b57765..9c3ec340c866d7382a53bcbb6afae839c31152fb:/librarian/html.py diff --git a/librarian/html.py b/librarian/html.py index 24d506f..096e399 100644 --- a/librarian/html.py +++ b/librarian/html.py @@ -3,9 +3,10 @@ # This file is part of Librarian, licensed under GNU Affero GPLv3 or later. # Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information. # +from __future__ import print_function, unicode_literals + import os import re -import cStringIO import copy from lxml import etree @@ -13,6 +14,8 @@ from librarian import XHTMLNS, ParseError, OutputFile from librarian import functions from lxml.etree import XMLSyntaxError, XSLTApplyError +import six + functions.reg_substitute_entities() functions.reg_person_name() @@ -23,13 +26,26 @@ STYLESHEETS = { 'partial': 'xslt/wl2html_partial.xslt' } + def get_stylesheet(name): return os.path.join(os.path.dirname(__file__), STYLESHEETS[name]) + def html_has_content(text): return etree.ETXPath('//p|//{%(ns)s}p|//h1|//{%(ns)s}h1' % {'ns': str(XHTMLNS)})(text) -def transform(wldoc, stylesheet='legacy', options=None, flags=None): + +def transform_abstrakt(abstrakt_element): + style_filename = get_stylesheet('legacy') + style = etree.parse(style_filename) + xml = etree.tostring(abstrakt_element, encoding='unicode') + document = etree.parse(six.StringIO(xml.replace('abstrakt', 'dlugi_cytat'))) # HACK + result = document.xslt(style) + html = re.sub('', '', etree.tostring(result, encoding='unicode')) + return re.sub(']*>', '', html) + + +def transform(wldoc, stylesheet='legacy', options=None, flags=None, css=None): """Transforms the WL document to XHTML. If output_filename is None, returns an XML, @@ -54,23 +70,29 @@ def transform(wldoc, stylesheet='legacy', options=None, flags=None): if not options: options = {} - result = document.transform(style, **options) - del document # no longer needed large object :) + options.setdefault('gallery', "''") + + css = css or 'https://static.wolnelektury.pl/css/compressed/book_text.css' + css = "'%s'" % css + result = document.transform(style, css=css, **options) + del document # no longer needed large object :) if html_has_content(result): add_anchors(result.getroot()) add_table_of_themes(result.getroot()) add_table_of_contents(result.getroot()) - return OutputFile.from_string(etree.tostring(result, method='html', - xml_declaration=False, pretty_print=True, encoding='utf-8')) + return OutputFile.from_bytes(etree.tostring( + result, method='html', xml_declaration=False, pretty_print=True, encoding='utf-8')) else: return None except KeyError: raise ValueError("'%s' is not a valid stylesheet.") - except (XMLSyntaxError, XSLTApplyError), e: + except (XMLSyntaxError, XSLTApplyError) as e: raise ParseError(e) + +@six.python_2_unicode_compatible class Fragment(object): def __init__(self, id, themes): super(Fragment, self).__init__() @@ -90,7 +112,7 @@ class Fragment(object): try: stack.pop() except IndexError: - print 'CLOSED NON-OPEN TAG:', element + print('CLOSED NON-OPEN TAG:', element) stack.reverse() return self.events + stack @@ -99,7 +121,8 @@ class Fragment(object): result = [] for event, element in self.closed_events(): if event == 'start': - result.append(u'<%s %s>' % (element.tag, ' '.join('%s="%s"' % (k, v) for k, v in element.attrib.items()))) + result.append(u'<%s %s>' % ( + element.tag, ' '.join('%s="%s"' % (k, v) for k, v in element.attrib.items()))) if element.text: result.append(element.text) elif event == 'end': @@ -111,7 +134,7 @@ class Fragment(object): return ''.join(result) - def __unicode__(self): + def __str__(self): return self.to_string() @@ -122,14 +145,15 @@ def extract_fragments(input_filename): # iterparse would die on a HTML document parser = etree.HTMLParser(encoding='utf-8') - buf = cStringIO.StringIO() + buf = six.BytesIO() buf.write(etree.tostring(etree.parse(input_filename, parser).getroot()[0][0], encoding='utf-8')) buf.seek(0) for event, element in etree.iterparse(buf, events=('start', 'end')): # Process begin and end elements if element.get('class', '') in ('theme-begin', 'theme-end'): - if not event == 'end': continue # Process elements only once, on end event + if not event == 'end': + continue # Process elements only once, on end event # Open new fragment if element.get('class', '') == 'theme-begin': @@ -155,7 +179,7 @@ def extract_fragments(input_filename): try: fragment = open_fragments[element.get('fid')] except KeyError: - print '%s:closed not open fragment #%s' % (input_filename, element.get('fid')) + print('%s:closed not open fragment #%s' % (input_filename, element.get('fid'))) else: closed_fragments[fragment.id] = fragment del open_fragments[fragment.id] @@ -165,7 +189,6 @@ def extract_fragments(input_filename): for fragment_id in open_fragments: open_fragments[fragment_id].append('text', element.tail) - # Process all elements except begin and end else: # Omit annotation tags @@ -190,7 +213,7 @@ def add_anchor(element, prefix, with_link=True, with_target=True, link_text=None link_text = prefix anchor = etree.Element('a', href='#%s' % prefix) anchor.set('class', 'anchor') - anchor.text = unicode(link_text) + anchor.text = six.text_type(link_text) parent.insert(index, anchor) if with_target: @@ -210,9 +233,10 @@ def any_ancestor(element, test): def add_anchors(root): counter = 1 for element in root.iterdescendants(): - if any_ancestor(element, lambda e: e.get('class') in ('note', 'motto', 'motto_podpis', 'dedication') - or e.get('id') == 'nota_red' - or e.tag == 'blockquote'): + def f(e): + return e.get('class') in ('note', 'motto', 'motto_podpis', 'dedication', 'frame') or \ + e.get('id') == 'nota_red' or e.tag == 'blockquote' + if any_ancestor(element, f): continue if element.tag == 'p' and 'verse' in element.get('class', ''): @@ -229,7 +253,7 @@ def raw_printable_text(element): for e in working.findall('a'): if e.get('class') in ('annotation', 'theme-begin'): e.text = '' - return etree.tostring(working, method='text', encoding=unicode).strip() + return etree.tostring(working, method='text', encoding='unicode').strip() def add_table_of_contents(root): @@ -237,7 +261,8 @@ def add_table_of_contents(root): counter = 1 for element in root.iterdescendants(): if element.tag in ('h2', 'h3'): - if any_ancestor(element, lambda e: e.get('id') in ('footnotes', 'nota_red') or e.get('class') in ('person-list',)): + if any_ancestor(element, + lambda e: e.get('id') in ('footnotes', 'nota_red') or e.get('class') in ('person-list',)): continue element_text = raw_printable_text(element) @@ -260,9 +285,9 @@ def add_table_of_contents(root): if len(subsections): subsection_list = etree.SubElement(section_element, 'ol') - for n, subsection, text, _ in subsections: + for n1, subsection, subtext, _ in subsections: subsection_element = etree.SubElement(subsection_list, 'li') - add_anchor(subsection_element, "s%d" % n, with_target=False, link_text=text) + add_anchor(subsection_element, "s%d" % n1, with_target=False, link_text=subtext) root.insert(0, toc) @@ -271,7 +296,8 @@ def add_table_of_themes(root): try: from sortify import sortify except ImportError: - sortify = lambda x: x + def sortify(x): + return x book_themes = {} for fragment in root.findall('.//a[@class="theme-begin"]'): @@ -280,7 +306,7 @@ def add_table_of_themes(root): theme_names = [s.strip() for s in fragment.text.split(',')] for theme_name in theme_names: book_themes.setdefault(theme_name, []).append(fragment.get('name')) - book_themes = book_themes.items() + book_themes = list(book_themes.items()) book_themes.sort(key=lambda s: sortify(s[0])) themes_div = etree.Element('div', id="themes") themes_ol = etree.SubElement(themes_div, 'ol') @@ -306,7 +332,7 @@ def extract_annotations(html_path): parser = etree.HTMLParser(encoding='utf-8') tree = etree.parse(html_path, parser) footnotes = tree.find('//*[@id="footnotes"]') - re_qualifier = re.compile(ur'[^\u2014]+\s+\(([^\)]+)\)\s+\u2014') + re_qualifier = re.compile(r'[^\u2014]+\s+\(([^\)]+)\)\s+\u2014') if footnotes is not None: for footnote in footnotes.findall('div'): fn_type = footnote.get('class').split('-')[1] @@ -315,8 +341,8 @@ def extract_annotations(html_path): footnote.text = None if len(footnote) and footnote[-1].tail == '\n': footnote[-1].tail = None - text_str = etree.tostring(footnote, method='text', encoding=unicode).strip() - html_str = etree.tostring(footnote, method='html', encoding=unicode).strip() + text_str = etree.tostring(footnote, method='text', encoding='unicode').strip() + html_str = etree.tostring(footnote, method='html', encoding='unicode').strip() match = re_qualifier.match(text_str) if match: @@ -334,4 +360,3 @@ def extract_annotations(html_path): qualifiers = [] yield anchor, fn_type, qualifiers, text_str, html_str -