import re
from copy import deepcopy
from subprocess import call, PIPE
+from itertools import chain
from Texml.processor import process
from lxml import etree
from librarian import ParseError, DCNS, get_resource, OutputFile
from librarian import functions
from librarian.cover import DefaultEbookCover
+from .sponsor import sponsor_logo
functions.reg_substitute_entities()
'wl2tex': 'pdf/wl2tex.xslt',
}
-#CUSTOMIZATIONS = [
-# 'nofootnotes',
-# 'nothemes',
-# 'defaultleading',
-# 'onehalfleading',
-# 'doubleleading',
-# 'nowlfont',
-# ]
+# CUSTOMIZATIONS = [
+# 'nofootnotes',
+# 'nothemes',
+# 'defaultleading',
+# 'onehalfleading',
+# 'doubleleading',
+# 'nowlfont',
+# ]
+
def insert_tags(doc, split_re, tagname, exclude=None):
""" inserts <tagname> for every occurence of `split_re' in text nodes in the `doc' tree
- >>> t = etree.fromstring('<a><b>A-B-C</b>X-Y-Z</a>');
- >>> insert_tags(t, re.compile('-'), 'd');
+ >>> t = etree.fromstring('<a><b>A-B-C</b>X-Y-Z</a>')
+ >>> insert_tags(t, re.compile('-'), 'd')
>>> print etree.tostring(t)
<a><b>A<d/>B<d/>C</b>X<d/>Y<d/>Z</a>
"""
insert_tags(doc,
re.compile("(?<=[^-\s])-(?=[^-\s])"),
"dywiz",
- exclude=[DCNS("identifier.url"), DCNS("rights.license")]
+ exclude=[DCNS("identifier.url"), DCNS("rights.license"), "meta"]
)
)
+def fix_tables(doc):
+ for kol in doc.iter(tag='kol'):
+ if kol.tail is not None:
+ if not kol.tail.strip():
+ kol.tail = None
+ for table in chain(doc.iter(tag='tabela'), doc.iter(tag='tabelka')):
+ if table.get('ramka') == '1' or table.get('ramki') == '1':
+ table.set('_format', '|' + 'X|' * len(table[0]))
+ else:
+ table.set('_format', 'X' * len(table[0]))
+
+
def move_motifs_inside(doc):
""" moves motifs to be into block elements """
- for master in doc.xpath('//powiesc|//opowiadanie|//liryka_l|//liryka_lp|//dramat_wierszowany_l|//dramat_wierszowany_lp|//dramat_wspolczesny'):
+ for master in doc.xpath('//powiesc|//opowiadanie|//liryka_l|//liryka_lp|'
+ '//dramat_wierszowany_l|//dramat_wierszowany_lp|//dramat_wspolczesny'):
for motif in master.xpath('motyw'):
for sib in motif.itersiblings():
- if sib.tag not in ('sekcja_swiatlo', 'sekcja_asterysk', 'separator_linia', 'begin', 'end', 'motyw', 'extra', 'uwaga'):
+ if sib.tag not in ('sekcja_swiatlo', 'sekcja_asterysk', 'separator_linia',
+ 'begin', 'end', 'motyw', 'extra', 'uwaga'):
# motif shouldn't have a tail - it would be untagged text
motif.tail = None
motif.getparent().remove(motif)
Finds all dc:creator and dc.contributor.translator tags
and adds *_parsed versions with forenames first.
"""
- for person in doc.xpath("|".join('//dc:'+(tag) for tag in (
- 'creator', 'contributor.translator')),
- namespaces = {'dc': str(DCNS)})[::-1]:
+ for person in doc.xpath("|".join('//dc:' + tag for tag in ('creator', 'contributor.translator')),
+ namespaces={'dc': str(DCNS)})[::-1]:
if not person.text:
continue
p = Person.from_text(person.text)
def transform(wldoc, verbose=False, save_tex=None, morefloats=None,
- cover=None, flags=None, customizations=None):
+ cover=None, flags=None, customizations=None, ilustr_path='', latex_dir=False):
""" produces a PDF file with XeLaTeX
wldoc: a WLDocument
if cover:
if cover is True:
cover = DefaultEbookCover
- bound_cover = cover(book_info)
+ bound_cover = cover(book_info, width=1200)
root.set('data-cover-width', str(bound_cover.width))
root.set('data-cover-height', str(bound_cover.height))
if bound_cover.uses_dc_cover:
if book_info.cover_by:
root.set('data-cover-by', book_info.cover_by)
if book_info.cover_source:
- root.set('data-cover-source',
- book_info.cover_source)
+ root.set('data-cover-source', book_info.cover_source)
if flags:
for flag in flags:
root.set('flag-' + flag, 'yes')
root.set('customizations', u','.join(customizations))
# add editors info
- root.set('editors', u', '.join(sorted(
- editor.readable() for editor in document.editors())))
+ editors = document.editors()
+ if editors:
+ root.set('editors', u', '.join(sorted(
+ editor.readable() for editor in editors)))
if document.book_info.funders:
root.set('funders', u', '.join(document.book_info.funders))
if document.book_info.thanks:
parse_creator(document.edoc)
substitute_hyphens(document.edoc)
fix_hanging(document.edoc)
+ fix_tables(document.edoc)
# wl -> TeXML
style_filename = get_stylesheet("wl2tex")
style = etree.parse(style_filename)
-
- texml = document.transform(style)
+ functions.reg_mathml_latex()
# TeXML -> LaTeX
temp = mkdtemp('-wl2pdf')
+ for ilustr in document.edoc.findall("//ilustr"):
+ shutil.copy(os.path.join(ilustr_path, ilustr.get("src")), temp)
+
+ for sponsor in book_info.sponsors:
+ ins = etree.Element("data-sponsor", name=sponsor)
+ logo = sponsor_logo(sponsor)
+ if logo:
+ fname = 'sponsor-%s' % os.path.basename(logo)
+ shutil.copy(logo, os.path.join(temp, fname))
+ ins.set('src', fname)
+ root.insert(0, ins)
+
+ if book_info.sponsor_note:
+ root.set("sponsor-note", book_info.sponsor_note)
+
+ texml = document.transform(style)
+
if cover:
with open(os.path.join(temp, 'cover.png'), 'w') as f:
- bound_cover.save(f)
+ bound_cover.save(f, quality=80)
- del document # no longer needed large object :)
+ del document # no longer needed large object :)
tex_path = os.path.join(temp, 'doc.tex')
fout = open(tex_path, 'w')
shutil.copy(get_resource('pdf/wl.cls'), temp)
shutil.copy(get_resource('res/wl-logo.png'), temp)
+ if latex_dir:
+ return temp
+
try:
cwd = os.getcwd()
except OSError:
cwd = None
os.chdir(temp)
- if verbose:
- p = call(['xelatex', tex_path])
- else:
- p = call(['xelatex', '-interaction=batchmode', tex_path], stdout=PIPE, stderr=PIPE)
- if p:
- raise ParseError("Error parsing .tex file")
+ # some things work better when compiled twice
+ # but they are not enabled now (line numbers)
+ for run in xrange(1):
+ if verbose:
+ p = call(['xelatex', tex_path])
+ else:
+ p = call(['xelatex', '-interaction=batchmode', tex_path], stdout=PIPE, stderr=PIPE)
+ if p:
+ raise ParseError("Error parsing .tex file")
if cwd is not None:
os.chdir(cwd)
text = re.sub(ur"([\u0400-\u04ff]+)", ur"<alien>\1</alien>", text)
- document = WLDocument.from_string(text,
- parse_dublincore=True, provider=provider)
+ document = WLDocument.from_string(text, parse_dublincore=True, provider=provider)
document.swap_endlines()
for child_uri in document.book_info.parts: