X-Git-Url: https://git.mdrn.pl/librarian.git/blobdiff_plain/33db54c61d9818547c059e3e3cda2518a260bcda..f492e325efb42a3348b2479a0fd0ffc3c484657b:/librarian/epub.py?ds=inline
diff --git a/librarian/epub.py b/librarian/epub.py
index cb2166b..f2987b5 100644
--- a/librarian/epub.py
+++ b/librarian/epub.py
@@ -1,22 +1,28 @@
# -*- coding: utf-8 -*-
#
# This file is part of Librarian, licensed under GNU Affero GPLv3 or later.
-# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
+# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
from __future__ import with_statement
import os
import os.path
-import shutil
-import sys
+import subprocess
+from StringIO import StringIO
from copy import deepcopy
from lxml import etree
import zipfile
+from tempfile import mkdtemp
+from shutil import rmtree
+
+import sys
-from librarian import XMLNamespace, RDFNS, DCNS, WLNS, XHTMLNS
+from librarian import XMLNamespace, RDFNS, DCNS, WLNS, NCXNS, OPFNS, XHTMLNS, NoDublinCore
+from librarian.dcparser import BookInfo
-NCXNS = XMLNamespace("http://www.daisy.org/z3986/2005/ncx/")
-OPFNS = XMLNamespace("http://www.idpf.org/2007/opf")
+from librarian import functions, get_resource
+
+functions.reg_person_name()
def inner_xml(node):
@@ -27,7 +33,7 @@ def inner_xml(node):
"""
nt = node.text if node.text is not None else ''
- return ''.join([nt] + [etree.tostring(child) for child in node])
+ return ''.join([nt] + [etree.tostring(child) for child in node])
def set_inner_xml(node, text):
""" sets node's text and children from a string
@@ -38,7 +44,6 @@ def set_inner_xml(node, text):
xyz
"""
-
p = etree.fromstring('%s' % text)
node.text = p.text
node[:] = p[:]
@@ -69,21 +74,16 @@ def xslt(xml, sheet):
return xml.xslt(etree.parse(xsltf))
-_resdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'epub')
-def res(fname):
- return os.path.join(_resdir, fname)
-
-
def replace_characters(node):
def replace_chars(text):
if text is None:
return None
- return text.replace("&", "&")\
- .replace("---", "—")\
- .replace("--", "–")\
- .replace(",,", "„")\
- .replace('"', "”")\
- .replace("'", "’")
+ return text.replace(u"\ufeff", u"")\
+ .replace("---", u"\u2014")\
+ .replace("--", u"\u2013")\
+ .replace(",,", u"\u201E")\
+ .replace('"', u"\u201D")\
+ .replace("'", u"\u2019")
if node.tag == 'extra':
node.clear()
else:
@@ -93,24 +93,26 @@ def replace_characters(node):
replace_characters(child)
-def find_annotations(annotations, source, part_number):
+def find_annotations(annotations, source, part_no):
for child in source:
if child.tag in ('pe', 'pa', 'pt', 'pr'):
annotation = deepcopy(child)
- annotation.set('number', str(len(annotations)+1))
- annotation.set('part', str(part_number))
+ number = str(len(annotations)+1)
+ annotation.set('number', number)
+ annotation.set('part', str(part_no))
annotation.tail = ''
annotations.append(annotation)
tail = child.tail
child.clear()
child.tail = tail
- child.text = str(len(annotations))
- if child.tag not in ('extra', 'podtytul'):
- find_annotations(annotations, child, part_number)
+ child.text = number
+ if child.tag not in ('extra',):
+ find_annotations(annotations, child, part_no)
def replace_by_verse(tree):
""" Find stanzas and create new verses in place of a '/' character """
+
stanzas = tree.findall('.//' + WLNS('strofa'))
for node in stanzas:
for child_node in node:
@@ -137,6 +139,7 @@ def replace_by_verse(tree):
def add_to_manifest(manifest, partno):
""" Adds a node to the manifest section in content.opf file """
+
partstr = 'part%d' % partno
e = manifest.makeelement(OPFNS('item'), attrib={
'id': partstr,
@@ -148,82 +151,279 @@ def add_to_manifest(manifest, partno):
def add_to_spine(spine, partno):
""" Adds a node to the spine section in content.opf file """
+
e = spine.makeelement(OPFNS('itemref'), attrib={'idref': 'part%d' % partno});
spine.append(e)
-def add_nav_point(nav_map, counter, title, part_counter):
- nav_point = nav_map.makeelement(NCXNS('navPoint'))
- nav_point.set('id', 'NavPoint-%d' % counter)
- nav_point.set('playOrder', str(counter))
-
- nav_label = nav_map.makeelement(NCXNS('navLabel'))
- text = nav_map.makeelement(NCXNS('text'))
- text.text = title
- nav_label.append(text)
- nav_point.append(nav_label)
-
- content = nav_map.makeelement(NCXNS('content'))
- content.set('src', 'part%d.html' % part_counter)
- nav_point.append(content)
-
- nav_map.append(nav_point)
-
-
-def add_nav_point2(nav_map, counter, title, part_counter, subcounter):
- nav_point = nav_map.makeelement(NCXNS('navPoint'))
- nav_point.set('id', 'NavPoint-%d' % counter)
- nav_point.set('playOrder', str(counter))
-
- nav_label = nav_map.makeelement(NCXNS('navLabel'))
- text = nav_map.makeelement(NCXNS('text'))
- text.text = title
- nav_label.append(text)
- nav_point.append(nav_label)
-
- content = nav_map.makeelement(NCXNS('content'))
- content.set('src', 'part%d.html#sub%d' % (part_counter, subcounter))
- nav_point.append(content)
-
- nav_map[-1].append(nav_point)
+class TOC(object):
+ def __init__(self, name=None, part_number=None):
+ self.children = []
+ self.name = name
+ self.part_number = part_number
+ self.sub_number = None
+ def add(self, name, part_number, level=0, is_part=True):
+ if level > 0 and self.children:
+ return self.children[-1].add(name, part_number, level-1, is_part)
+ else:
+ t = TOC(name)
+ t.part_number = part_number
+ self.children.append(t)
+ if not is_part:
+ t.sub_number = len(self.children) + 1
+ return t.sub_number
+
+ def append(self, toc):
+ self.children.append(toc)
+
+ def extend(self, toc):
+ self.children.extend(toc.children)
+
+ def depth(self):
+ if self.children:
+ return max((c.depth() for c in self.children)) + 1
+ else:
+ return 0
+
+ def write_to_xml(self, nav_map, counter):
+ for child in self.children:
+ nav_point = nav_map.makeelement(NCXNS('navPoint'))
+ nav_point.set('id', 'NavPoint-%d' % counter)
+ nav_point.set('playOrder', str(counter))
+
+ nav_label = nav_map.makeelement(NCXNS('navLabel'))
+ text = nav_map.makeelement(NCXNS('text'))
+ text.text = child.name
+ nav_label.append(text)
+ nav_point.append(nav_label)
+
+ content = nav_map.makeelement(NCXNS('content'))
+ src = 'part%d.html' % child.part_number
+ if child.sub_number is not None:
+ src += '#sub%d' % child.sub_number
+ content.set('src', src)
+ nav_point.append(content)
+ nav_map.append(nav_point)
+ counter = child.write_to_xml(nav_point, counter + 1)
+ return counter
+
+
+def used_chars(element):
+ """ Lists characters used in an ETree Element """
+ chars = set((element.text or '') + (element.tail or ''))
+ for child in element:
+ chars = chars.union(used_chars(child))
+ return chars
+
+
+def chop(main_text):
+ """ divide main content of the XML file into chunks """
+
+ # prepare a container for each chunk
+ part_xml = etree.Element('utwor')
+ etree.SubElement(part_xml, 'master')
+ main_xml_part = part_xml[0] # master
-def transform(input_file, output_file):
- """ produces an epub
-
- input_file and output_file should be filelike objects
+ last_node_part = False
+ for one_part in main_text:
+ name = one_part.tag
+ if name == 'naglowek_czesc':
+ yield part_xml
+ last_node_part = True
+ main_xml_part[:] = [deepcopy(one_part)]
+ elif not last_node_part and name in ("naglowek_rozdzial", "naglowek_akt", "srodtytul"):
+ yield part_xml
+ main_xml_part[:] = [deepcopy(one_part)]
+ else:
+ main_xml_part.append(deepcopy(one_part))
+ last_node_part = False
+ yield part_xml
+
+
+def transform_chunk(chunk_xml, chunk_no, annotations, empty=False, _empty_html_static=[]):
+ """ transforms one chunk, returns a HTML string, a TOC object and a set of used characters """
+
+ toc = TOC()
+ for element in chunk_xml[0]:
+ if element.tag in ("naglowek_czesc", "naglowek_rozdzial", "naglowek_akt", "srodtytul"):
+ toc.add(node_name(element), chunk_no)
+ elif element.tag in ('naglowek_podrozdzial', 'naglowek_scena'):
+ subnumber = toc.add(node_name(element), chunk_no, level=1, is_part=False)
+ element.set('sub', str(subnumber))
+ if empty:
+ if not _empty_html_static:
+ _empty_html_static.append(open(get_resource('epub/emptyChunk.html')).read())
+ chars = set()
+ output_html = _empty_html_static[0]
+ else:
+ find_annotations(annotations, chunk_xml, chunk_no)
+ replace_by_verse(chunk_xml)
+ html_tree = xslt(chunk_xml, get_resource('epub/xsltScheme.xsl'))
+ chars = used_chars(html_tree.getroot())
+ output_html = etree.tostring(html_tree, method="html", pretty_print=True)
+ return output_html, toc, chars
+
+
+def transform(provider, slug=None, file_path=None, output_file=None, output_dir=None, make_dir=False, verbose=False,
+ sample=None, cover=None, flags=None):
+ """ produces a EPUB file
+
+ provider: a DocProvider
+ slug: slug of file to process, available by provider
+ output_file: file-like object or path to output file
+ output_dir: path to directory to save output file to; either this or output_file must be present
+ make_dir: writes output to //.epub instead of /.epub
+ sample=n: generate sample e-book (with at least n paragraphs)
+ cover: a cover.Cover object
+ flags: less-advertising,
"""
- input_xml = etree.parse(input_file)
+ def transform_file(input_xml, chunk_counter=1, first=True, sample=None):
+ """ processes one input file and proceeds to its children """
+
+ replace_characters(input_xml.getroot())
+
+ children = [child.text for child in input_xml.findall('.//'+DCNS('relation.hasPart'))]
+
+ # every input file will have a TOC entry,
+ # pointing to starting chunk
+ toc = TOC(node_name(input_xml.find('.//'+DCNS('title'))), chunk_counter)
+ chars = set()
+ if first:
+ # write book title page
+ html_tree = xslt(input_xml, get_resource('epub/xsltTitle.xsl'))
+ chars = used_chars(html_tree.getroot())
+ zip.writestr('OPS/title.html',
+ etree.tostring(html_tree, method="html", pretty_print=True))
+ elif children:
+ # write title page for every parent
+ if sample is not None and sample <= 0:
+ chars = set()
+ html_string = open(get_resource('epub/emptyChunk.html')).read()
+ else:
+ html_tree = xslt(input_xml, get_resource('epub/xsltChunkTitle.xsl'))
+ chars = used_chars(html_tree.getroot())
+ html_string = etree.tostring(html_tree, method="html", pretty_print=True)
+ zip.writestr('OPS/part%d.html' % chunk_counter, html_string)
+ add_to_manifest(manifest, chunk_counter)
+ add_to_spine(spine, chunk_counter)
+ chunk_counter += 1
+
+ if len(input_xml.getroot()) > 1:
+ # rdf before style master
+ main_text = input_xml.getroot()[1]
+ else:
+ # rdf in style master
+ main_text = input_xml.getroot()[0]
+ if main_text.tag == RDFNS('RDF'):
+ main_text = None
+
+ if main_text is not None:
+ for chunk_xml in chop(main_text):
+ empty = False
+ if sample is not None:
+ if sample <= 0:
+ empty = True
+ else:
+ sample -= len(chunk_xml.xpath('//strofa|//akap|//akap_cd|//akap_dialog'))
+ chunk_html, chunk_toc, chunk_chars = transform_chunk(chunk_xml, chunk_counter, annotations, empty)
+
+ toc.extend(chunk_toc)
+ chars = chars.union(chunk_chars)
+ zip.writestr('OPS/part%d.html' % chunk_counter, chunk_html)
+ add_to_manifest(manifest, chunk_counter)
+ add_to_spine(spine, chunk_counter)
+ chunk_counter += 1
+
+ if children:
+ for child in children:
+ child_xml = etree.parse(provider.by_uri(child))
+ child_toc, chunk_counter, chunk_chars, sample = transform_file(child_xml, chunk_counter, first=False, sample=sample)
+ toc.append(child_toc)
+ chars = chars.union(chunk_chars)
+
+ return toc, chunk_counter, chars, sample
+
+ # read metadata from the first file
+ if file_path:
+ if slug:
+ raise ValueError('slug or file_path should be specified, not both')
+ f = open(file_path, 'r')
+ input_xml = etree.parse(f)
+ f.close()
+ else:
+ if not slug:
+ raise ValueError('either slug or file_path should be specified')
+ input_xml = etree.parse(provider[slug])
+
+ if flags:
+ for flag in flags:
+ input_xml.getroot().set(flag, 'yes')
+
+ metadata = input_xml.find('.//'+RDFNS('Description'))
+ if metadata is None:
+ raise NoDublinCore('Document has no DublinCore - which is required.')
+ book_info = BookInfo.from_element(input_xml)
+ metadata = etree.ElementTree(metadata)
+
+ # if output to dir, create the file
+ if output_dir is not None:
+ if make_dir:
+ author = unicode(book_info.author)
+ output_dir = os.path.join(output_dir, author)
+ try:
+ os.makedirs(output_dir)
+ except OSError:
+ pass
+ if slug:
+ output_file = open(os.path.join(output_dir, '%s.epub' % slug), 'w')
+ else:
+ output_file = open(os.path.join(output_dir, os.path.splitext(os.path.basename(file_path))[0] + '.epub'), 'w')
zip = zipfile.ZipFile(output_file, 'w', zipfile.ZIP_DEFLATED)
+ # write static elements
mime = zipfile.ZipInfo()
mime.filename = 'mimetype'
mime.compress_type = zipfile.ZIP_STORED
mime.extra = ''
zip.writestr(mime, 'application/epub+zip')
-
zip.writestr('META-INF/container.xml', '' \
'' \
'')
+ zip.write(get_resource('epub/style.css'), os.path.join('OPS', 'style.css'))
+ zip.write(get_resource('res/wl-logo-small.png'), os.path.join('OPS', 'logo_wolnelektury.png'))
- metadata_el = input_xml.find('.//'+RDFNS('Description'))
- metadatasource = etree.ElementTree(metadata_el)
-
- opf = xslt(metadatasource, res('xsltContent.xsl'))
-
+ opf = xslt(metadata, get_resource('epub/xsltContent.xsl'))
manifest = opf.find('.//' + OPFNS('manifest'))
spine = opf.find('.//' + OPFNS('spine'))
- for fname in 'style.css', 'logo_wolnelektury.png':
- zip.write(res(fname), os.path.join('OPS', fname))
+ if cover:
+ cover_file = StringIO()
+ c = cover(book_info.author.readable(), book_info.title)
+ c.save(cover_file)
+ c_name = 'cover.%s' % c.ext()
+ zip.writestr(os.path.join('OPS', c_name), cover_file.getvalue())
+ del cover_file
+
+ cover_tree = etree.parse(get_resource('epub/cover.html'))
+ cover_tree.find('//' + XHTMLNS('img')).set('src', c_name)
+ zip.writestr('OPS/cover.html', etree.tostring(
+ cover_tree, method="html", pretty_print=True))
+
+ manifest.append(etree.fromstring(
+ ' '))
+ manifest.append(etree.fromstring(
+ ' ' % (c_name, c.mime_type())))
+ spine.insert(0, etree.fromstring(''))
+ opf.getroot()[0].append(etree.fromstring(''))
+ opf.getroot().append(etree.fromstring(''))
+
annotations = etree.Element('annotations')
- part_xml = etree.Element('utwor')
- etree.SubElement(part_xml, 'master')
toc_file = etree.fromstring('' \
@@ -231,99 +431,59 @@ def transform(input_file, output_file):
'version="2005-1">' \
'' \
'Strona tytuÅowa' \
- '' \
- 'PoczÄ
tek utworu' \
'')
+ nav_map = toc_file[-1]
- main_xml_part = part_xml[0] # byÅo [0][0], master
- nav_map = toc_file[-1] # byÅo [-1][-1]
- depth = 1 # navmap
+ toc, chunk_counter, chars, sample = transform_file(input_xml, sample=sample)
- if len(input_xml.getroot()) > 1:
- # rdf before style master
- main_text = input_xml.getroot()[1]
- else:
- # rdf in style master
- main_text = input_xml.getroot()[0]
-
- replace_characters(main_text)
- zip.writestr('OPS/title.html',
- etree.tostring(xslt(input_xml, res('xsltTitle.xsl')), pretty_print=True))
-
- # Search for table of contents elements and book division
-
- stupid_i = stupid_j = stupid_k = 1
- last_node_part = False
- for one_part in main_text:
- name = one_part.tag
- if name in ("naglowek_czesc", "naglowek_rozdzial", "naglowek_akt", "srodtytul"):
- if name == "naglowek_czesc":
- stupid_k = 1
- last_node_part = True
- find_annotations(annotations, part_xml, stupid_j)
- replace_by_verse(part_xml)
- zip.writestr('OPS/part%d.html' % stupid_j,
- etree.tostring(xslt(part_xml, res('xsltScheme.xsl')), pretty_print=True))
- main_xml_part[:] = [deepcopy(one_part)]
- # add to manifest and spine
- add_to_manifest(manifest, stupid_j)
- add_to_spine(spine, stupid_j)
- name_toc = node_name(one_part)
- # build table of contents
- # i+2 because of title page
- add_nav_point(nav_map, stupid_i+2, name_toc, stupid_j + 1)
- stupid_i += 1
- stupid_j += 1
- else:
- if last_node_part:
- main_xml_part.append(one_part)
- last_node_part = False
- name_toc = node_name(one_part)
- add_nav_point(nav_map, stupid_i + 1, name_toc, stupid_j)
- else:
- stupid_k = 1
- find_annotations(annotations, part_xml, stupid_j)
- replace_by_verse(part_xml)
- zip.writestr('OPS/part%d.html' % stupid_j,
- etree.tostring(xslt(part_xml, res('xsltScheme.xsl')), pretty_print=True))
- # start building a new part
- main_xml_part[:] = [deepcopy(one_part)]
- add_to_manifest(manifest, stupid_j)
- add_to_spine(spine, stupid_j)
- name_toc = node_name(one_part)
- add_nav_point(nav_map, stupid_i + 2, name_toc, stupid_j + 1) # title page
- stupid_j += 1
- stupid_i += 1
- else:
- if name in ('naglowek_podrozdzial', 'naglowek_scena'):
- depth = 2
- name_toc = node_name(one_part)
- add_nav_point2(nav_map, stupid_i + 2, name_toc, stupid_j, stupid_k)
- one_part.set('sub', str(stupid_k))
- stupid_k += 1
- stupid_i += 1
- main_xml_part.append(deepcopy(one_part))
- last_node_part = False
- find_annotations(annotations, part_xml, stupid_j)
- replace_by_verse(part_xml)
- add_to_manifest(manifest, stupid_j)
- add_to_spine(spine, stupid_j)
-
- zip.writestr('OPS/part%d.html' % stupid_j,
- etree.tostring(xslt(part_xml, res('xsltScheme.xsl')), pretty_print=True))
+ if not toc.children:
+ toc.add(u"PoczÄ
tek utworu", 1)
+ toc_counter = toc.write_to_xml(nav_map, 2)
# Last modifications in container files and EPUB creation
if len(annotations) > 0:
nav_map.append(etree.fromstring(
'Przypisy'\
- '' % {'i':stupid_i+2}))
+ '' % {'i': toc_counter}))
+ toc_counter += 1
manifest.append(etree.fromstring(
' '))
spine.append(etree.fromstring(
''))
replace_by_verse(annotations)
+ html_tree = xslt(annotations, get_resource('epub/xsltAnnotations.xsl'))
+ chars = chars.union(used_chars(html_tree.getroot()))
zip.writestr('OPS/annotations.html', etree.tostring(
- xslt(annotations, res("xsltAnnotations.xsl")), pretty_print=True))
+ html_tree, method="html", pretty_print=True))
+
+ nav_map.append(etree.fromstring(
+ 'Strona redakcyjna'\
+ '' % {'i': toc_counter}))
+ manifest.append(etree.fromstring(
+ ' '))
+ spine.append(etree.fromstring(
+ ''))
+ html_tree = xslt(input_xml, get_resource('epub/xsltLast.xsl'))
+ chars.update(used_chars(html_tree.getroot()))
+ zip.writestr('OPS/last.html', etree.tostring(
+ html_tree, method="html", pretty_print=True))
+
+ # strip fonts
+ tmpdir = mkdtemp('-librarian-epub')
+ cwd = os.getcwd()
+
+ os.chdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'font-optimizer'))
+ for fname in 'DejaVuSerif.ttf', 'DejaVuSerif-Bold.ttf', 'DejaVuSerif-Italic.ttf', 'DejaVuSerif-BoldItalic.ttf':
+ optimizer_call = ['perl', 'subset.pl', '--chars', ''.join(chars).encode('utf-8'),
+ get_resource('fonts/' + fname), os.path.join(tmpdir, fname)]
+ if verbose:
+ print "Running font-optimizer"
+ subprocess.check_call(optimizer_call)
+ else:
+ subprocess.check_call(optimizer_call, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ zip.write(os.path.join(tmpdir, fname), os.path.join('OPS', fname))
+ rmtree(tmpdir)
+ os.chdir(cwd)
zip.writestr('OPS/content.opf', etree.tostring(opf, pretty_print=True))
contents = []
@@ -335,31 +495,7 @@ def transform(input_file, output_file):
meta.set('content', '0')
toc_file[0].append(meta)
toc_file[0][0].set('content', ''.join((title, 'WolneLektury.pl')))
- toc_file[0][1].set('content', str(depth))
+ toc_file[0][1].set('content', str(toc.depth()))
set_inner_xml(toc_file[1], ''.join(('', title, '')))
zip.writestr('OPS/toc.ncx', etree.tostring(toc_file, pretty_print=True))
zip.close()
-
-
-if __name__ == '__main__':
- import html
-
- if len(sys.argv) < 2:
- print >> sys.stderr, 'Usage: wl2epub [output file]'
- sys.exit(1)
-
- input = sys.argv[1]
- if len(sys.argv) > 2:
- output = sys.argv[2]
- else:
- basename, ext = os.path.splitext(input)
- output = basename + '.epub'
-
- print input
- if html.transform(input, is_file=True) == '':
- print 'empty content - skipping'
- else:
- transform(open(input, 'r'), open(output, 'w'))
-
-
-