# -*- coding: utf-8 -*-
#
# This file is part of Librarian, licensed under GNU Affero GPLv3 or later.
-# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
+# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
import os
import cStringIO
-import re
import copy
from lxml import etree
-from librarian.parser import WLDocument
-from librarian import XHTMLNS, ParseError
+from librarian import XHTMLNS, ParseError, OutputFile
+from librarian import functions
from lxml.etree import XMLSyntaxError, XSLTApplyError
-ENTITY_SUBSTITUTIONS = [
- (u'---', u'—'),
- (u'--', u'–'),
- (u'...', u'…'),
- (u',,', u'„'),
- (u'"', u'”'),
-]
+functions.reg_substitute_entities()
+functions.reg_person_name()
STYLESHEETS = {
'legacy': 'xslt/book2html.xslt',
def get_stylesheet(name):
return os.path.join(os.path.dirname(__file__), STYLESHEETS[name])
-def substitute_entities(context, text):
- """XPath extension function converting all entites in passed text."""
- if isinstance(text, list):
- text = ''.join(text)
- for entity, substitutution in ENTITY_SUBSTITUTIONS:
- text = text.replace(entity, substitutution)
- return text
-
-# Register substitute_entities function with lxml
-ns = etree.FunctionNamespace('http://wolnelektury.pl/functions')
-ns['substitute_entities'] = substitute_entities
-
-def transform(input, output_filename=None, is_file=True, \
- parse_dublincore=True, stylesheet='legacy', options={}):
- """Transforms file input_filename in XML to output_filename in XHTML."""
+def html_has_content(text):
+ return etree.ETXPath('//p|//{%(ns)s}p|//h1|//{%(ns)s}h1' % {'ns': str(XHTMLNS)})(text)
+
+def transform(wldoc, stylesheet='legacy', options=None, flags=None):
+ """Transforms the WL document to XHTML.
+
+ If output_filename is None, returns an XML,
+ otherwise returns True if file has been written,False if it hasn't.
+ File won't be written if it has no content.
+ """
# Parse XSLT
try:
style_filename = get_stylesheet(stylesheet)
style = etree.parse(style_filename)
- if is_file:
- document = WLDocument.from_file(input, True, \
- parse_dublincore=parse_dublincore)
- else:
- document = WLDocument.from_string(input, True, \
- parse_dublincore=parse_dublincore)
+ document = copy.deepcopy(wldoc)
+ del wldoc
+ document.swap_endlines()
+
+ if flags:
+ for flag in flags:
+ document.edoc.getroot().set(flag, 'yes')
+ document.clean_ed_note()
+
+ if not options:
+ options = {}
result = document.transform(style, **options)
- del document # no longer needed large object :)
-
- if etree.ETXPath('//p|//{%s}p' % str(XHTMLNS))(result) is not None:
+ del document # no longer needed large object :)
+
+ if html_has_content(result):
add_anchors(result.getroot())
+ add_table_of_themes(result.getroot())
add_table_of_contents(result.getroot())
-
- if output_filename is not None:
- result.write(output_filename, xml_declaration=False, pretty_print=True, encoding='utf-8')
- else:
- return result
- return True
+
+ return OutputFile.from_string(etree.tostring(result, method='html',
+ xml_declaration=False, pretty_print=True, encoding='utf-8'))
else:
- return "<empty />"
+ return None
except KeyError:
raise ValueError("'%s' is not a valid stylesheet.")
except (XMLSyntaxError, XSLTApplyError), e:
open_fragments = {}
closed_fragments = {}
- for event, element in etree.iterparse(input_filename, events=('start', 'end')):
+ # iterparse would die on a HTML document
+ parser = etree.HTMLParser(encoding='utf-8')
+ buf = cStringIO.StringIO()
+ buf.write(etree.tostring(etree.parse(input_filename, parser).getroot()[0][0], encoding='utf-8'))
+ buf.seek(0)
+
+ for event, element in etree.iterparse(buf, events=('start', 'end')):
# Process begin and end elements
if element.get('class', '') in ('theme-begin', 'theme-end'):
if not event == 'end': continue # Process elements only once, on end event
# Process all elements except begin and end
else:
# Omit annotation tags
- if len(element.get('name', '')) or element.get('class', '') == 'annotation':
+ if (len(element.get('name', '')) or
+ element.get('class', '') in ('annotation', 'anchor')):
if event == 'end' and element.tail:
for fragment_id in open_fragments:
open_fragments[fragment_id].append('text', element.tail)
anchor.tail = element.text
element.text = u''
element.insert(0, anchor)
-
+
if with_target:
anchor_target = etree.Element('a', name='%s' % prefix)
anchor_target.set('class', 'target')
counter = 1
for element in root.iterdescendants():
if any_ancestor(element, lambda e: e.get('class') in ('note', 'motto', 'motto_podpis', 'dedication')
+ or e.get('id') == 'nota_red'
or e.tag == 'blockquote'):
continue
-
+
if element.tag == 'p' and 'verse' in element.get('class', ''):
if counter == 1 or counter % 5 == 0:
add_anchor(element, "f%d" % counter, link_text=counter)
counter += 1
+def raw_printable_text(element):
+ working = copy.deepcopy(element)
+ for e in working.findall('a'):
+ if e.get('class') == 'annotation':
+ e.text = ''
+ return etree.tostring(working, method='text', encoding=unicode).strip()
+
+
def add_table_of_contents(root):
sections = []
counter = 1
for element in root.iterdescendants():
if element.tag in ('h2', 'h3'):
- if any_ancestor(element, lambda e: e.get('id') in ('footnotes',) or e.get('class') in ('person-list',)):
+ if any_ancestor(element, lambda e: e.get('id') in ('footnotes', 'nota_red') or e.get('class') in ('person-list',)):
continue
-
+
+ element_text = raw_printable_text(element)
if element.tag == 'h3' and len(sections) and sections[-1][1] == 'h2':
- sections[-1][3].append((counter, element.tag, ''.join(element.xpath('text()')), []))
+ sections[-1][3].append((counter, element.tag, element_text, []))
else:
- sections.append((counter, element.tag, ''.join(element.xpath('text()')), []))
+ sections.append((counter, element.tag, element_text, []))
add_anchor(element, "s%d" % counter, with_link=False)
counter += 1
-
+
toc = etree.Element('div')
toc.set('id', 'toc')
toc_header = etree.SubElement(toc, 'h2')
for n, section, text, subsections in sections:
section_element = etree.SubElement(toc_list, 'li')
add_anchor(section_element, "s%d" % n, with_target=False, link_text=text)
-
+
if len(subsections):
subsection_list = etree.SubElement(section_element, 'ol')
for n, subsection, text, _ in subsections:
subsection_element = etree.SubElement(subsection_list, 'li')
add_anchor(subsection_element, "s%d" % n, with_target=False, link_text=text)
-
+
root.insert(0, toc)
+
+def add_table_of_themes(root):
+ try:
+ from sortify import sortify
+ except ImportError:
+ sortify = lambda x: x
+
+ book_themes = {}
+ for fragment in root.findall('.//a[@class="theme-begin"]'):
+ if not fragment.text:
+ continue
+ theme_names = [s.strip() for s in fragment.text.split(',')]
+ for theme_name in theme_names:
+ book_themes.setdefault(theme_name, []).append(fragment.get('name'))
+ book_themes = book_themes.items()
+ book_themes.sort(key=lambda s: sortify(s[0]))
+ themes_div = etree.Element('div', id="themes")
+ themes_ol = etree.SubElement(themes_div, 'ol')
+ for theme_name, fragments in book_themes:
+ themes_li = etree.SubElement(themes_ol, 'li')
+ themes_li.text = "%s: " % theme_name
+ for i, fragment in enumerate(fragments):
+ item = etree.SubElement(themes_li, 'a', href="#%s" % fragment)
+ item.text = str(i + 1)
+ root.insert(0, themes_div)
+
+
+
+def extract_annotations(html_path):
+ """For each annotation, yields a tuple: anchor, text, html."""
+ parser = etree.HTMLParser(encoding='utf-8')
+ tree = etree.parse(html_path, parser)
+ footnotes = tree.find('//*[@id="footnotes"]')
+ if footnotes is not None:
+ for footnote in footnotes.findall('div'):
+ anchor = footnote.find('a[@name]').get('name')
+ del footnote[:2]
+ text_str = etree.tostring(footnote, method='text', encoding='utf-8').strip()
+ html_str = etree.tostring(footnote, method='html', encoding='utf-8')
+ yield anchor, text_str, html_str
+