pdf with motifs (dirty hack for marginpar-orphan problem)
[librarian.git] / librarian / epub.py
index a0d9742..e992f40 100644 (file)
@@ -7,41 +7,22 @@ from __future__ import with_statement
 
 import os
 import os.path
 
 import os
 import os.path
-import shutil
-import sys
+import subprocess
 from copy import deepcopy
 from lxml import etree
 import zipfile
 from copy import deepcopy
 from lxml import etree
 import zipfile
+from tempfile import mkdtemp
+from shutil import rmtree
 
 
-from librarian import XMLNamespace, RDFNS, DCNS, WLNS, XHTMLNS, NoDublinCore
-from librarian.parser import WLDocument
-
-#TODO: shouldn't be repeated here
-NCXNS = XMLNamespace("http://www.daisy.org/z3986/2005/ncx/")
-OPFNS = XMLNamespace("http://www.idpf.org/2007/opf")
-
-
-class DocProvider(object):
-    class DoesNotExist(Exception):
-        pass
-    
-    def by_slug(self, slug):
-        raise NotImplemented
-
-    def __getitem__(self, slug):
-        return self.by_slug(slug)
-
-    def by_uri(self, uri):
-        return self.by_slug(uri.rsplit('/', 1)[1])
+import sys
+sys.path.append('..') # for running from working copy
 
 
+from librarian import XMLNamespace, RDFNS, DCNS, WLNS, NCXNS, OPFNS, NoDublinCore
+from librarian.dcparser import BookInfo
 
 
-class DirDocProvider(DocProvider):
-    def __init__(self, dir):
-        self.dir = dir
-        self.files = {}
+from librarian import functions
 
 
-    def by_slug(self, slug):
-        return open(os.path.join(self.dir, '%s.xml' % slug))
+functions.reg_person_name()
 
 
 def inner_xml(node):
 
 
 def inner_xml(node):
@@ -63,7 +44,6 @@ def set_inner_xml(node, text):
     <a>x<b>y</b>z</a>
     """
 
     <a>x<b>y</b>z</a>
     """
 
-    
     p = etree.fromstring('<x>%s</x>' % text)
     node.text = p.text
     node[:] = p[:]
     p = etree.fromstring('<x>%s</x>' % text)
     node.text = p.text
     node[:] = p[:]
@@ -103,12 +83,11 @@ def replace_characters(node):
     def replace_chars(text):
         if text is None:
             return None
     def replace_chars(text):
         if text is None:
             return None
-        return text.replace("&", "&amp;")\
-                   .replace("---", "&#8212;")\
-                   .replace("--", "&#8211;")\
-                   .replace(",,", "&#8222;")\
-                   .replace('"', "&#8221;")\
-                   .replace("'", "&#8217;")
+        return text.replace("---", u"\u2014")\
+                   .replace("--", u"\u2013")\
+                   .replace(",,", u"\u201E")\
+                   .replace('"', u"\u201D")\
+                   .replace("'", u"\u2019")
     if node.tag == 'extra':
         node.clear()
     else:
     if node.tag == 'extra':
         node.clear()
     else:
@@ -234,6 +213,14 @@ class TOC(object):
         return counter
 
 
         return counter
 
 
+def used_chars(element):
+    """ Lists characters used in an ETree Element """
+    chars = set((element.text or '') + (element.tail or ''))
+    for child in element:
+        chars = chars.union(used_chars(child))
+    return chars
+
+
 def chop(main_text):
     """ divide main content of the XML file into chunks """
 
 def chop(main_text):
     """ divide main content of the XML file into chunks """
 
@@ -259,7 +246,7 @@ def chop(main_text):
 
 
 def transform_chunk(chunk_xml, chunk_no, annotations):
 
 
 def transform_chunk(chunk_xml, chunk_no, annotations):
-    """ transforms one chunk, returns a HTML string and a TOC object """
+    """ transforms one chunk, returns a HTML string, a TOC object and a set of used characters """
 
     toc = TOC()
     for element in chunk_xml[0]:
 
     toc = TOC()
     for element in chunk_xml[0]:
@@ -270,15 +257,18 @@ def transform_chunk(chunk_xml, chunk_no, annotations):
             element.set('sub', str(subnumber))
     find_annotations(annotations, chunk_xml, chunk_no)
     replace_by_verse(chunk_xml)
             element.set('sub', str(subnumber))
     find_annotations(annotations, chunk_xml, chunk_no)
     replace_by_verse(chunk_xml)
-    output_html = etree.tostring(xslt(chunk_xml, res('xsltScheme.xsl')), pretty_print=True)
-    return output_html, toc
+    html_tree = xslt(chunk_xml, res('xsltScheme.xsl'))
+    chars = used_chars(html_tree.getroot())
+    output_html = etree.tostring(html_tree, pretty_print=True)
+    return output_html, toc, chars
 
 
 
 
-def transform(provider, slug, output_file):
+def transform(provider, slug, output_file=None, output_dir=None):
     """ produces an epub
 
     provider is a DocProvider
     """ produces an epub
 
     provider is a DocProvider
-    output_file should be filelike object
+    either output_file (a file-like object) or output_dir (path to file/dir) should be specified
+    if output_dir is specified, file will be written to <output_dir>/<author>/<slug>.epub
     """
 
     def transform_file(input_xml, chunk_counter=1, first=True):
     """
 
     def transform_file(input_xml, chunk_counter=1, first=True):
@@ -289,14 +279,19 @@ def transform(provider, slug, output_file):
         # every input file will have a TOC entry,
         # pointing to starting chunk
         toc = TOC(node_name(input_xml.find('.//'+DCNS('title'))), chunk_counter)
         # every input file will have a TOC entry,
         # pointing to starting chunk
         toc = TOC(node_name(input_xml.find('.//'+DCNS('title'))), chunk_counter)
+        chars = set()
         if first:
             # write book title page
         if first:
             # write book title page
+            html_tree = xslt(input_xml, res('xsltTitle.xsl'))
+            chars = used_chars(html_tree.getroot())
             zip.writestr('OPS/title.html',
             zip.writestr('OPS/title.html',
-                 etree.tostring(xslt(input_xml, res('xsltTitle.xsl')), pretty_print=True))
+                 etree.tostring(html_tree, pretty_print=True))
         elif children:
             # write title page for every parent
         elif children:
             # write title page for every parent
+            html_tree = xslt(input_xml, res('xsltChunkTitle.xsl'))
+            chars = used_chars(html_tree.getroot())
             zip.writestr('OPS/part%d.html' % chunk_counter, 
             zip.writestr('OPS/part%d.html' % chunk_counter, 
-                etree.tostring(xslt(input_xml, res('xsltChunkTitle.xsl')), pretty_print=True))
+                etree.tostring(html_tree, pretty_print=True))
             add_to_manifest(manifest, chunk_counter)
             add_to_spine(spine, chunk_counter)
             chunk_counter += 1
             add_to_manifest(manifest, chunk_counter)
             add_to_spine(spine, chunk_counter)
             chunk_counter += 1
@@ -313,9 +308,10 @@ def transform(provider, slug, output_file):
         if main_text is not None:
             replace_characters(main_text)
 
         if main_text is not None:
             replace_characters(main_text)
 
-            for chunk_no, chunk_xml in enumerate(chop(main_text), chunk_counter):
-                chunk_html, chunk_toc = transform_chunk(chunk_xml, chunk_counter, annotations)
+            for chunk_xml in chop(main_text):
+                chunk_html, chunk_toc, chunk_chars = transform_chunk(chunk_xml, chunk_counter, annotations)
                 toc.extend(chunk_toc)
                 toc.extend(chunk_toc)
+                chars = chars.union(chunk_chars)
                 zip.writestr('OPS/part%d.html' % chunk_counter, chunk_html)
                 add_to_manifest(manifest, chunk_counter)
                 add_to_spine(spine, chunk_counter)
                 zip.writestr('OPS/part%d.html' % chunk_counter, chunk_html)
                 add_to_manifest(manifest, chunk_counter)
                 add_to_spine(spine, chunk_counter)
@@ -324,10 +320,29 @@ def transform(provider, slug, output_file):
         if children:
             for child in children:
                 child_xml = etree.parse(provider.by_uri(child))
         if children:
             for child in children:
                 child_xml = etree.parse(provider.by_uri(child))
-                child_toc, chunk_counter = transform_file(child_xml, chunk_counter, first=False)
+                child_toc, chunk_counter, chunk_chars = transform_file(child_xml, chunk_counter, first=False)
                 toc.append(child_toc)
                 toc.append(child_toc)
+                chars = chars.union(chunk_chars)
 
 
-        return toc, chunk_counter
+        return toc, chunk_counter, chars
+
+    # read metadata from the first file
+    input_xml = etree.parse(provider[slug])
+    metadata = input_xml.find('.//'+RDFNS('Description'))
+    if metadata is None:
+        raise NoDublinCore('Document has no DublinCore - which is required.')
+    book_info = BookInfo.from_element(input_xml)
+    metadata = etree.ElementTree(metadata)
+
+    # if output to dir, create the file
+    if output_dir is not None:
+        author = unicode(book_info.author)
+        author_dir = os.path.join(output_dir, author)
+        try:
+            os.makedirs(author_dir)
+        except OSError:
+            pass
+        output_file = open(os.path.join(author_dir, '%s.epub' % slug), 'w')
 
 
     zip = zipfile.ZipFile(output_file, 'w', zipfile.ZIP_DEFLATED)
 
 
     zip = zipfile.ZipFile(output_file, 'w', zipfile.ZIP_DEFLATED)
@@ -346,12 +361,6 @@ def transform(provider, slug, output_file):
     for fname in 'style.css', 'logo_wolnelektury.png':
         zip.write(res(fname), os.path.join('OPS', fname))
 
     for fname in 'style.css', 'logo_wolnelektury.png':
         zip.write(res(fname), os.path.join('OPS', fname))
 
-    # metadata from first file
-    input_xml = etree.parse(provider[slug])
-    metadata = input_xml.find('.//'+RDFNS('Description'))
-    if metadata is None:
-        raise NoDublinCore('Document has no DublinCore - which is required.')
-    metadata = etree.ElementTree(metadata)
     opf = xslt(metadata, res('xsltContent.xsl'))
     manifest = opf.find('.//' + OPFNS('manifest'))
     spine = opf.find('.//' + OPFNS('spine'))
     opf = xslt(metadata, res('xsltContent.xsl'))
     manifest = opf.find('.//' + OPFNS('manifest'))
     spine = opf.find('.//' + OPFNS('spine'))
@@ -364,13 +373,14 @@ def transform(provider, slug, output_file):
                                'version="2005-1"><head></head><docTitle></docTitle><navMap>' \
                                '<navPoint id="NavPoint-1" playOrder="1"><navLabel>' \
                                '<text>Strona tytułowa</text></navLabel><content src="title.html" />' \
                                'version="2005-1"><head></head><docTitle></docTitle><navMap>' \
                                '<navPoint id="NavPoint-1" playOrder="1"><navLabel>' \
                                '<text>Strona tytułowa</text></navLabel><content src="title.html" />' \
-                               '</navPoint><navPoint id="NavPoint-2" playOrder="2"><navLabel>' \
-                               '<text>Początek utworu</text></navLabel><content src="part1.html" />' \
                                '</navPoint></navMap></ncx>')
     nav_map = toc_file[-1]
 
                                '</navPoint></navMap></ncx>')
     nav_map = toc_file[-1]
 
-    toc, chunk_counter = transform_file(input_xml)
-    toc_counter = toc.write_to_xml(nav_map, 3) # we already have 2 navpoints
+    toc, chunk_counter, chars = transform_file(input_xml)
+
+    if not toc.children:
+        toc.add(u"Początek utworu", 1)
+    toc_counter = toc.write_to_xml(nav_map, 2)
 
     # Last modifications in container files and EPUB creation
     if len(annotations) > 0:
 
     # Last modifications in container files and EPUB creation
     if len(annotations) > 0:
@@ -382,8 +392,21 @@ def transform(provider, slug, output_file):
         spine.append(etree.fromstring(
             '<itemref idref="annotations" />'))
         replace_by_verse(annotations)
         spine.append(etree.fromstring(
             '<itemref idref="annotations" />'))
         replace_by_verse(annotations)
+        html_tree = xslt(annotations, res("xsltAnnotations.xsl"))
+        chars = chars.union(used_chars(html_tree.getroot()))
         zip.writestr('OPS/annotations.html', etree.tostring(
         zip.writestr('OPS/annotations.html', etree.tostring(
-                            xslt(annotations, res("xsltAnnotations.xsl")), pretty_print=True))
+                            html_tree, pretty_print=True))
+
+    # strip fonts
+    tmpdir = mkdtemp('-librarian-epub')
+    cwd = os.getcwd()
+
+    os.chdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../font-optimizer'))
+    for fname in 'DejaVuSerif.ttf', 'DejaVuSerif-Bold.ttf', 'DejaVuSerif-Italic.ttf', 'DejaVuSerif-BoldItalic.ttf':
+        subprocess.check_call(['./subset.pl', '--chars', ''.join(chars).encode('utf-8'), res('../fonts/' + fname), os.path.join(tmpdir, fname)])
+        zip.write(os.path.join(tmpdir, fname), os.path.join('OPS', fname))
+    rmtree(tmpdir)
+    os.chdir(cwd)
 
     zip.writestr('OPS/content.opf', etree.tostring(opf, pretty_print=True))
     contents = []
 
     zip.writestr('OPS/content.opf', etree.tostring(opf, pretty_print=True))
     contents = []
@@ -402,6 +425,8 @@ def transform(provider, slug, output_file):
 
 
 if __name__ == '__main__':
 
 
 if __name__ == '__main__':
+    from librarian import DirDocProvider
+
     if len(sys.argv) < 2:
         print >> sys.stderr, 'Usage: python epub.py <input file>'
         sys.exit(1)
     if len(sys.argv) < 2:
         print >> sys.stderr, 'Usage: python epub.py <input file>'
         sys.exit(1)
@@ -409,7 +434,6 @@ if __name__ == '__main__':
     main_input = sys.argv[1]
     basepath, ext = os.path.splitext(main_input)
     path, slug = os.path.realpath(basepath).rsplit('/', 1)
     main_input = sys.argv[1]
     basepath, ext = os.path.splitext(main_input)
     path, slug = os.path.realpath(basepath).rsplit('/', 1)
-    output = basepath + '.epub'
     provider = DirDocProvider(path)
     provider = DirDocProvider(path)
-    transform(provider, slug, open(output, 'w'))
+    transform(provider, slug, output_dir=path)