+ def transform_file(wldoc, chunk_counter=1, first=True, sample=None):
+ """ processes one input file and proceeds to its children """
+
+ replace_characters(wldoc.edoc.getroot())
+
+ hyphenator = set_hyph_language(wldoc.edoc.getroot()) if hyphenate else None
+ hyphenate_and_fix_conjunctions(wldoc.edoc.getroot(), hyphenator)
+
+ # every input file will have a TOC entry,
+ # pointing to starting chunk
+ toc = TOC(wldoc.book_info.title, "part%d.html" % chunk_counter)
+ chars = set()
+ if first:
+ # write book title page
+ html_tree = xslt(wldoc.edoc, get_resource('epub/xsltTitle.xsl'))
+ chars = used_chars(html_tree.getroot())
+ zip.writestr(
+ 'OPS/title.html',
+ etree.tostring(
+ html_tree, pretty_print=True, xml_declaration=True,
+ encoding="utf-8",
+ doctype='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"' +
+ ' "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
+ )
+ )
+ # add a title page TOC entry
+ toc.add(u"Strona tytułowa", "title.html")
+ elif wldoc.book_info.parts:
+ # write title page for every parent
+ if sample is not None and sample <= 0:
+ chars = set()
+ html_string = open(get_resource('epub/emptyChunk.html')).read()
+ else:
+ html_tree = xslt(wldoc.edoc, get_resource('epub/xsltChunkTitle.xsl'))
+ chars = used_chars(html_tree.getroot())
+ html_string = etree.tostring(
+ html_tree, pretty_print=True, xml_declaration=True,
+ encoding="utf-8",
+ doctype='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"' +
+ ' "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
+ )
+ zip.writestr('OPS/part%d.html' % chunk_counter, html_string)
+ add_to_manifest(manifest, chunk_counter)
+ add_to_spine(spine, chunk_counter)
+ chunk_counter += 1
+
+ if len(wldoc.edoc.getroot()) > 1:
+ # rdf before style master
+ main_text = wldoc.edoc.getroot()[1]
+ else:
+ # rdf in style master
+ main_text = wldoc.edoc.getroot()[0]
+ if main_text.tag == RDFNS('RDF'):
+ main_text = None
+
+ if main_text is not None:
+ for chunk_xml in chop(main_text):
+ empty = False
+ if sample is not None:
+ if sample <= 0:
+ empty = True
+ else:
+ sample -= len(chunk_xml.xpath('//strofa|//akap|//akap_cd|//akap_dialog'))
+ chunk_html, chunk_toc, chunk_chars = transform_chunk(chunk_xml, chunk_counter, annotations, empty)
+
+ toc.extend(chunk_toc)
+ chars = chars.union(chunk_chars)
+ zip.writestr('OPS/part%d.html' % chunk_counter, chunk_html)
+ add_to_manifest(manifest, chunk_counter)
+ add_to_spine(spine, chunk_counter)
+ chunk_counter += 1
+
+ for child in wldoc.parts():
+ child_toc, chunk_counter, chunk_chars, sample = transform_file(
+ child, chunk_counter, first=False, sample=sample)
+ toc.append(child_toc)
+ chars = chars.union(chunk_chars)
+
+ return toc, chunk_counter, chars, sample
+
+ document = deepcopy(wldoc)
+ del wldoc
+
+ if flags:
+ for flag in flags:
+ document.edoc.getroot().set(flag, 'yes')
+
+ document.clean_ed_note()
+ document.clean_ed_note('abstrakt')
+
+ # add editors info
+ editors = document.editors()
+ if editors:
+ document.edoc.getroot().set('editors', u', '.join(sorted(
+ editor.readable() for editor in editors)))
+ if document.book_info.funders:
+ document.edoc.getroot().set('funders', u', '.join(
+ document.book_info.funders))
+ if document.book_info.thanks:
+ document.edoc.getroot().set('thanks', document.book_info.thanks)
+
+ opf = xslt(document.book_info.to_etree(), get_resource('epub/xsltContent.xsl'))
+ manifest = opf.find('.//' + OPFNS('manifest'))
+ guide = opf.find('.//' + OPFNS('guide'))
+ spine = opf.find('.//' + OPFNS('spine'))