X-Git-Url: https://git.mdrn.pl/librarian.git/blobdiff_plain/3615220e78b96f6c29a1d4e7c580904ecab204f7..5782cd6d2ee17eb613055e6b641f01dcd58af80e:/librarian/epub.py
diff --git a/librarian/epub.py b/librarian/epub.py
index 81dcb6e..8dba5ac 100644
--- a/librarian/epub.py
+++ b/librarian/epub.py
@@ -16,15 +16,51 @@ import zipfile
from tempfile import mkdtemp, NamedTemporaryFile
from shutil import rmtree
-from librarian import RDFNS, WLNS, NCXNS, OPFNS, XHTMLNS, OutputFile
+from librarian import RDFNS, WLNS, NCXNS, OPFNS, XHTMLNS, DCNS, OutputFile
from librarian.cover import DefaultEbookCover
from librarian import functions, get_resource
+from librarian.hyphenator import Hyphenator
+
functions.reg_person_name()
functions.reg_lang_code_3to2()
-
+def set_hyph_language(source_tree):
+ def get_short_lng_code(text):
+ result = ''
+ text = ''.join(text)
+ with open(get_resource('res/ISO-639-2_8859-1.txt'), 'rb') as f:
+ for line in f:
+ list = line.strip().split('|')
+ if list[0] == text:
+ result=list[2]
+ if result == '':
+ return text
+ else:
+ return result
+ bibl_lng = etree.XPath('//dc:language//text()', namespaces = {'dc':str(DCNS)})(source_tree)
+ short_lng = get_short_lng_code(bibl_lng[0])
+ try:
+ return Hyphenator(get_resource('res/hyph-dictionaries/hyph_' + short_lng + '.dic'))
+ except:
+ pass
+
+def hyphenate_and_fix_conjunctions(source_tree, hyph):
+ if hyph is not None:
+ texts = etree.XPath('/utwor/*[2]//text()')(source_tree)
+ for t in texts:
+ parent = t.getparent()
+ newt = ''
+ wlist = re.compile(r'\w+|[^\w]', re.UNICODE).findall(t)
+ for w in wlist:
+ newt += hyph.inserted(w, u'\u00AD')
+ newt = re.sub(r'(?<=\s\w)\s+', u'\u00A0', newt)
+ if t.is_text:
+ parent.text = newt
+ elif t.is_tail:
+ parent.tail = newt
+
def inner_xml(node):
""" returns node's text and children as a string
@@ -78,7 +114,6 @@ def replace_characters(node):
def replace_chars(text):
if text is None:
return None
- #text = re.sub(r"(?<=\s\w)\s+", u"\u00a0", text) #fix for hanging single letter conjunctions â for future use.
return text.replace(u"\ufeff", u"")\
.replace("---", u"\u2014")\
.replace("--", u"\u2013")\
@@ -249,7 +284,10 @@ class TOC(object):
nav_label = nav_map.makeelement(NCXNS('navLabel'))
text = nav_map.makeelement(NCXNS('text'))
- text.text = child.name
+ if child.name is not None:
+ text.text = re.sub(r'\n', ' ', child.name)
+ else:
+ text.text = child.name
nav_label.append(text)
nav_point.append(nav_label)
@@ -310,7 +348,7 @@ def chop(main_text):
yield part_xml
last_node_part = True
main_xml_part[:] = [deepcopy(one_part)]
- elif not last_node_part and name in ("naglowek_scena"):
+ elif not last_node_part and name == "naglowek_scena":
yield part_xml
main_xml_part[:] = [deepcopy(one_part)]
else:
@@ -335,7 +373,7 @@ def transform_chunk(chunk_xml, chunk_no, annotations, empty=False, _empty_html_s
toc = TOC()
for element in chunk_xml[0]:
- if element.tag in ("naglowek_czesc"):
+ if element.tag == "naglowek_czesc":
toc.add(node_name(element), "part%d.html#book-text" % chunk_no)
elif element.tag in ("naglowek_rozdzial", "naglowek_akt", "srodtytul"):
toc.add(node_name(element), "part%d.html" % chunk_no)
@@ -370,7 +408,11 @@ def transform(wldoc, verbose=False,
""" processes one input file and proceeds to its children """
replace_characters(wldoc.edoc.getroot())
-
+
+ hyphenator = set_hyph_language(wldoc.edoc.getroot())
+ hyphenate_and_fix_conjunctions(wldoc.edoc.getroot(), hyphenator)
+
+
# every input file will have a TOC entry,
# pointing to starting chunk
toc = TOC(wldoc.book_info.title, "part%d.html" % chunk_counter)
@@ -499,7 +541,7 @@ def transform(wldoc, verbose=False,
' '))
manifest.append(etree.fromstring(
' ' % (cover_name, bound_cover.mime_type())))
- spine.insert(0, etree.fromstring(''))
+ spine.insert(0, etree.fromstring(''))
opf.getroot()[0].append(etree.fromstring(''))
guide.append(etree.fromstring(''))