X-Git-Url: https://git.mdrn.pl/librarian.git/blobdiff_plain/895f081f74ce3f116bebb4ef76f5ab3d2e392fef..2c15db814c7f40406b6a86383e3e4bc8825b7faf:/librarian/html.py diff --git a/librarian/html.py b/librarian/html.py index 7733fee..ed7b4d6 100644 --- a/librarian/html.py +++ b/librarian/html.py @@ -1,42 +1,24 @@ # -*- coding: utf-8 -*- # -# This file is part of Librarian. -# -# Copyright © 2008,2009,2010 Fundacja Nowoczesna Polska -# -# For full list of contributors see AUTHORS file. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. -# -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . +# This file is part of Librarian, licensed under GNU Affero GPLv3 or later. +# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information. # +from __future__ import print_function, unicode_literals + import os -import cStringIO import re import copy from lxml import etree -from librarian.parser import WLDocument -from librarian import XHTMLNS, ParseError +from librarian import XHTMLNS, ParseError, OutputFile +from librarian import functions from lxml.etree import XMLSyntaxError, XSLTApplyError +import six + -ENTITY_SUBSTITUTIONS = [ - (u'---', u'—'), - (u'--', u'–'), - (u'...', u'…'), - (u',,', u'„'), - (u'"', u'”'), -] +functions.reg_substitute_entities() +functions.reg_person_name() STYLESHEETS = { 'legacy': 'xslt/book2html.xslt', @@ -44,55 +26,70 @@ STYLESHEETS = { 'partial': 'xslt/wl2html_partial.xslt' } + def get_stylesheet(name): return os.path.join(os.path.dirname(__file__), STYLESHEETS[name]) -def substitute_entities(context, text): - """XPath extension function converting all entites in passed text.""" - if isinstance(text, list): - text = ''.join(text) - for entity, substitutution in ENTITY_SUBSTITUTIONS: - text = text.replace(entity, substitutution) - return text - -# Register substitute_entities function with lxml -ns = etree.FunctionNamespace('http://wolnelektury.pl/functions') -ns['substitute_entities'] = substitute_entities - -def transform(input, output_filename=None, is_file=True, \ - parse_dublincore=True, stylesheet='legacy', options={}): - """Transforms file input_filename in XML to output_filename in XHTML.""" + +def html_has_content(text): + return etree.ETXPath('//p|//{%(ns)s}p|//h1|//{%(ns)s}h1' % {'ns': str(XHTMLNS)})(text) + + +def transform_abstrakt(abstrakt_element): + style_filename = get_stylesheet('legacy') + style = etree.parse(style_filename) + xml = etree.tostring(abstrakt_element, encoding='unicode') + document = etree.parse(six.StringIO(xml.replace('abstrakt', 'dlugi_cytat'))) # HACK + result = document.xslt(style) + html = re.sub('', '', etree.tostring(result, encoding='unicode')) + return re.sub(']*>', '', html) + + +def transform(wldoc, stylesheet='legacy', options=None, flags=None): + """Transforms the WL document to XHTML. + + If output_filename is None, returns an XML, + otherwise returns True if file has been written,False if it hasn't. + File won't be written if it has no content. + """ # Parse XSLT try: style_filename = get_stylesheet(stylesheet) style = etree.parse(style_filename) - if is_file: - document = WLDocument.from_file(input, True, \ - parse_dublincore=parse_dublincore) - else: - document = WLDocument.from_string(input, True, \ - parse_dublincore=parse_dublincore) + document = copy.deepcopy(wldoc) + del wldoc + document.swap_endlines() + + if flags: + for flag in flags: + document.edoc.getroot().set(flag, 'yes') + + document.clean_ed_note() + document.clean_ed_note('abstrakt') + if not options: + options = {} + options.setdefault('gallery', "''") result = document.transform(style, **options) - del document # no longer needed large object :) - - if etree.ETXPath('//p|//{%s}p' % str(XHTMLNS))(result) is not None: + del document # no longer needed large object :) + + if html_has_content(result): add_anchors(result.getroot()) + add_table_of_themes(result.getroot()) add_table_of_contents(result.getroot()) - - if output_filename is not None: - result.write(output_filename, xml_declaration=False, pretty_print=True, encoding='utf-8') - else: - return result - return True + + return OutputFile.from_bytes(etree.tostring( + result, method='html', xml_declaration=False, pretty_print=True, encoding='utf-8')) else: - return "" + return None except KeyError: raise ValueError("'%s' is not a valid stylesheet.") - except (XMLSyntaxError, XSLTApplyError), e: + except (XMLSyntaxError, XSLTApplyError) as e: raise ParseError(e) + +@six.python_2_unicode_compatible class Fragment(object): def __init__(self, id, themes): super(Fragment, self).__init__() @@ -112,7 +109,7 @@ class Fragment(object): try: stack.pop() except IndexError: - print 'CLOSED NON-OPEN TAG:', element + print('CLOSED NON-OPEN TAG:', element) stack.reverse() return self.events + stack @@ -121,7 +118,8 @@ class Fragment(object): result = [] for event, element in self.closed_events(): if event == 'start': - result.append(u'<%s %s>' % (element.tag, ' '.join('%s="%s"' % (k, v) for k, v in element.attrib.items()))) + result.append(u'<%s %s>' % ( + element.tag, ' '.join('%s="%s"' % (k, v) for k, v in element.attrib.items()))) if element.text: result.append(element.text) elif event == 'end': @@ -133,7 +131,7 @@ class Fragment(object): return ''.join(result) - def __unicode__(self): + def __str__(self): return self.to_string() @@ -142,24 +140,34 @@ def extract_fragments(input_filename): open_fragments = {} closed_fragments = {} - for event, element in etree.iterparse(input_filename, events=('start', 'end')): + # iterparse would die on a HTML document + parser = etree.HTMLParser(encoding='utf-8') + buf = six.BytesIO() + buf.write(etree.tostring(etree.parse(input_filename, parser).getroot()[0][0], encoding='utf-8')) + buf.seek(0) + + for event, element in etree.iterparse(buf, events=('start', 'end')): # Process begin and end elements if element.get('class', '') in ('theme-begin', 'theme-end'): - if not event == 'end': continue # Process elements only once, on end event + if not event == 'end': + continue # Process elements only once, on end event # Open new fragment if element.get('class', '') == 'theme-begin': fragment = Fragment(id=element.get('fid'), themes=element.text) # Append parents - if element.getparent().get('id', None) != 'book-text': - parents = [element.getparent()] - while parents[-1].getparent().get('id', None) != 'book-text': - parents.append(parents[-1].getparent()) - - parents.reverse() - for parent in parents: - fragment.append('start', parent) + parent = element.getparent() + parents = [] + while parent.get('id', None) != 'book-text': + cparent = copy.deepcopy(parent) + cparent.text = None + parents.append(cparent) + parent = parent.getparent() + + parents.reverse() + for parent in parents: + fragment.append('start', parent) open_fragments[fragment.id] = fragment @@ -168,7 +176,7 @@ def extract_fragments(input_filename): try: fragment = open_fragments[element.get('fid')] except KeyError: - print '%s:closed not open fragment #%s' % (input_filename, element.get('fid')) + print('%s:closed not open fragment #%s' % (input_filename, element.get('fid'))) else: closed_fragments[fragment.id] = fragment del open_fragments[fragment.id] @@ -178,11 +186,11 @@ def extract_fragments(input_filename): for fragment_id in open_fragments: open_fragments[fragment_id].append('text', element.tail) - # Process all elements except begin and end else: # Omit annotation tags - if len(element.get('name', '')) or element.get('class', '') == 'annotation': + if (len(element.get('name', '')) or + element.get('class', '') in ('annotation', 'anchor')): if event == 'end' and element.tail: for fragment_id in open_fragments: open_fragments[fragment_id].append('text', element.tail) @@ -194,25 +202,22 @@ def extract_fragments(input_filename): def add_anchor(element, prefix, with_link=True, with_target=True, link_text=None): + parent = element.getparent() + index = parent.index(element) + if with_link: if link_text is None: link_text = prefix anchor = etree.Element('a', href='#%s' % prefix) anchor.set('class', 'anchor') - anchor.text = unicode(link_text) - if element.text: - anchor.tail = element.text - element.text = u'' - element.insert(0, anchor) - + anchor.text = six.text_type(link_text) + parent.insert(index, anchor) + if with_target: anchor_target = etree.Element('a', name='%s' % prefix) anchor_target.set('class', 'target') anchor_target.text = u' ' - if element.text: - anchor_target.tail = element.text - element.text = u'' - element.insert(0, anchor_target) + parent.insert(index, anchor_target) def any_ancestor(element, test): @@ -225,10 +230,12 @@ def any_ancestor(element, test): def add_anchors(root): counter = 1 for element in root.iterdescendants(): - if any_ancestor(element, lambda e: e.get('class') in ('note', 'motto', 'motto_podpis', 'dedication') - or e.tag == 'blockquote'): + def f(e): + return e.get('class') in ('note', 'motto', 'motto_podpis', 'dedication', 'frame') or \ + e.get('id') == 'nota_red' or e.tag == 'blockquote' + if any_ancestor(element, f): continue - + if element.tag == 'p' and 'verse' in element.get('class', ''): if counter == 1 or counter % 5 == 0: add_anchor(element, "f%d" % counter, link_text=counter) @@ -238,21 +245,31 @@ def add_anchors(root): counter += 1 +def raw_printable_text(element): + working = copy.deepcopy(element) + for e in working.findall('a'): + if e.get('class') in ('annotation', 'theme-begin'): + e.text = '' + return etree.tostring(working, method='text', encoding='unicode').strip() + + def add_table_of_contents(root): sections = [] counter = 1 for element in root.iterdescendants(): if element.tag in ('h2', 'h3'): - if any_ancestor(element, lambda e: e.get('id') in ('footnotes',) or e.get('class') in ('person-list',)): + if any_ancestor(element, + lambda e: e.get('id') in ('footnotes', 'nota_red') or e.get('class') in ('person-list',)): continue - + + element_text = raw_printable_text(element) if element.tag == 'h3' and len(sections) and sections[-1][1] == 'h2': - sections[-1][3].append((counter, element.tag, ''.join(element.xpath('text()')), [])) + sections[-1][3].append((counter, element.tag, element_text, [])) else: - sections.append((counter, element.tag, ''.join(element.xpath('text()')), [])) + sections.append((counter, element.tag, element_text, [])) add_anchor(element, "s%d" % counter, with_link=False) counter += 1 - + toc = etree.Element('div') toc.set('id', 'toc') toc_header = etree.SubElement(toc, 'h2') @@ -262,12 +279,81 @@ def add_table_of_contents(root): for n, section, text, subsections in sections: section_element = etree.SubElement(toc_list, 'li') add_anchor(section_element, "s%d" % n, with_target=False, link_text=text) - + if len(subsections): subsection_list = etree.SubElement(section_element, 'ol') - for n, subsection, text, _ in subsections: + for n1, subsection, subtext, _ in subsections: subsection_element = etree.SubElement(subsection_list, 'li') - add_anchor(subsection_element, "s%d" % n, with_target=False, link_text=text) - + add_anchor(subsection_element, "s%d" % n1, with_target=False, link_text=subtext) + root.insert(0, toc) + +def add_table_of_themes(root): + try: + from sortify import sortify + except ImportError: + def sortify(x): + return x + + book_themes = {} + for fragment in root.findall('.//a[@class="theme-begin"]'): + if not fragment.text: + continue + theme_names = [s.strip() for s in fragment.text.split(',')] + for theme_name in theme_names: + book_themes.setdefault(theme_name, []).append(fragment.get('name')) + book_themes = list(book_themes.items()) + book_themes.sort(key=lambda s: sortify(s[0])) + themes_div = etree.Element('div', id="themes") + themes_ol = etree.SubElement(themes_div, 'ol') + for theme_name, fragments in book_themes: + themes_li = etree.SubElement(themes_ol, 'li') + themes_li.text = "%s: " % theme_name + for i, fragment in enumerate(fragments): + item = etree.SubElement(themes_li, 'a', href="#%s" % fragment) + item.text = str(i + 1) + item.tail = ' ' + root.insert(0, themes_div) + + +def extract_annotations(html_path): + """Extracts annotations from HTML for annotations dictionary. + + For each annotation, yields a tuple of: + anchor, footnote type, valid qualifiers, text, html. + + """ + from .fn_qualifiers import FN_QUALIFIERS + + parser = etree.HTMLParser(encoding='utf-8') + tree = etree.parse(html_path, parser) + footnotes = tree.find('//*[@id="footnotes"]') + re_qualifier = re.compile(r'[^\u2014]+\s+\(([^\)]+)\)\s+\u2014') + if footnotes is not None: + for footnote in footnotes.findall('div'): + fn_type = footnote.get('class').split('-')[1] + anchor = footnote.find('a[@class="annotation"]').get('href')[1:] + del footnote[:2] + footnote.text = None + if len(footnote) and footnote[-1].tail == '\n': + footnote[-1].tail = None + text_str = etree.tostring(footnote, method='text', encoding='unicode').strip() + html_str = etree.tostring(footnote, method='html', encoding='unicode').strip() + + match = re_qualifier.match(text_str) + if match: + qualifier_str = match.group(1) + qualifiers = [] + for candidate in re.split('[;,]', qualifier_str): + candidate = candidate.strip() + if candidate in FN_QUALIFIERS: + qualifiers.append(candidate) + elif candidate.startswith('z '): + subcandidate = candidate.split()[1] + if subcandidate in FN_QUALIFIERS: + qualifiers.append(subcandidate) + else: + qualifiers = [] + + yield anchor, fn_type, qualifiers, text_str, html_str