-# -*- coding: utf-8 -*-
-#
# This file is part of Librarian, licensed under GNU Affero GPLv3 or later.
-# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
+# Copyright © Fundacja Wolne Lektury. See NOTICE for more information.
#
-from __future__ import unicode_literals
-
+from collections import Counter
+import io
+import os
+import re
+from xml.parsers.expat import ExpatError
+from lxml import etree
+from lxml.etree import XMLSyntaxError, XSLTApplyError
from librarian import ValidationError, NoDublinCore, ParseError, NoProvider
from librarian import RDFNS
from librarian.cover import make_cover
from librarian import dcparser
+from .elements import WL_ELEMENTS
-from xml.parsers.expat import ExpatError
-from lxml import etree
-from lxml.etree import XMLSyntaxError, XSLTApplyError
-import os
-import re
-import six
+class WLElementLookup(etree.CustomElementClassLookup):
+ def lookup(self, node_type, document, namespace, name):
+ if node_type != 'element':
+ return
+ if namespace:
+ return
+ try:
+ return WL_ELEMENTS[name]
+ except KeyError:
+ return
+
+
+parser = etree.XMLParser()
+parser.set_element_class_lookup(
+ WLElementLookup()
+)
-class WLDocument(object):
+
+class WLDocument:
+ """Legacy class, to be replaced with documents.WLDocument."""
LINE_SWAP_EXPR = re.compile(r'/\s', re.MULTILINE | re.UNICODE)
provider = None
else:
self.book_info = None
+ def get_statistics(self):
+ def count_text(text, counter, in_fn=False, stanza=False):
+ if text:
+ text = re.sub(r'\s+', ' ', text)
+
+ chars = len(text) if text.strip() else 0
+ words = len(text.split()) if text.strip() else 0
+
+ counter['chars_with_fn'] += chars
+ counter['words_with_fn'] += words
+ if not in_fn:
+ counter['chars'] += chars
+ counter['words'] += words
+ if not stanza:
+ counter['chars_out_verse_with_fn'] += chars
+ if not in_fn:
+ counter['chars_out_verse'] += chars
+
+ def count(elem, counter, in_fn=False, stanza=False):
+ if elem.tag in (RDFNS('RDF'), 'nota_red', 'abstrakt', 'uwaga', 'ekstra'):
+ return
+ if not in_fn and elem.tag in ('pa', 'pe', 'pr', 'pt', 'motyw'):
+ in_fn = True
+ if elem.tag == 'strofa':
+ # count verses now
+ verses = len(elem.findall('.//br')) + 1
+ counter['verses_with_fn'] += verses
+ if not in_fn:
+ counter['verses'] += verses
+ stanza = True
+ count_text(elem.text, counter, in_fn=in_fn, stanza=stanza)
+ for child in elem:
+ count(child, counter, in_fn=in_fn, stanza=stanza)
+ count_text(child.tail, counter, in_fn=in_fn, stanza=stanza)
+
+ self.swap_endlines()
+
+ data = {
+ "self": Counter(),
+ "parts": [],
+ "total": {
+ }
+ }
+
+ count(self.edoc.getroot(), data['self'])
+ for k, v in data['self'].items():
+ data['total'][k] = v
+
+ for part in self.parts(pass_part_errors=True):
+ if isinstance(part, Exception):
+ data['parts'].append((None, {}))
+ else:
+ data['parts'].append((part, part.get_statistics()))
+ for k, v in data['parts'][-1][1]['total'].items():
+ data['total'][k] = data['total'].get(k, 0) + v
+
+ return data
+
@classmethod
def from_bytes(cls, xml, *args, **kwargs):
- return cls.from_file(six.BytesIO(xml), *args, **kwargs)
+ return cls.from_file(io.BytesIO(xml), *args, **kwargs)
@classmethod
def from_file(cls, xmlfile, *args, **kwargs):
# first, prepare for parsing
- if isinstance(xmlfile, six.text_type):
+ if isinstance(xmlfile, str):
file = open(xmlfile, 'rb')
try:
data = file.read()
else:
data = xmlfile.read()
- if not isinstance(data, six.text_type):
+ if not isinstance(data, str):
data = data.decode('utf-8')
- data = data.replace(u'\ufeff', '')
+ data = data.replace('\ufeff', '')
try:
parser = etree.XMLParser(remove_blank_text=False)
- tree = etree.parse(six.BytesIO(data.encode('utf-8')), parser)
+ tree = etree.parse(io.BytesIO(data.encode('utf-8')), parser)
return cls(tree, *args, **kwargs)
except (ExpatError, XMLSyntaxError, XSLTApplyError) as e:
elem.insert(0, ins)
elem.text = chunks.pop(0)
- def parts(self):
+ def parts(self, pass_part_errors=False):
if self.provider is None:
raise NoProvider('No document provider supplied.')
if self.book_info is None:
raise NoDublinCore('No Dublin Core in document.')
for part_uri in self.book_info.parts:
- yield self.from_file(
- self.provider.by_uri(part_uri), provider=self.provider
- )
+ try:
+ with self.provider.by_slug(part_uri.slug) as f:
+ yield self.from_file(f, provider=self.provider)
+ except Exception as e:
+ if pass_part_errors:
+ yield e
+ else:
+ raise
def chunk(self, path):
# convert the path to XPath
node.tag = 'span'
node.tail = tail
+ def fix_pa_akap(self):
+ for pa in ('pa','pe','pr','pt'):
+ for akap in self.edoc.findall(f'//{pa}/akap'):
+ akap.getparent().set('blocks', 'true')
+ if not akap.getparent().index(akap):
+ akap.set('inline', 'true')
+
def editors(self):
"""Returns a set of all editors for book and its children.
from librarian import text
return text.transform(self, *args, **kwargs)
- def as_epub(self, *args, **kwargs):
- from librarian import epub
- return epub.transform(self, *args, **kwargs)
-
def as_pdf(self, *args, **kwargs):
from librarian import pdf
return pdf.transform(self, *args, **kwargs)
- def as_mobi(self, *args, **kwargs):
- from librarian import mobi
- return mobi.transform(self, *args, **kwargs)
-
def as_fb2(self, *args, **kwargs):
from librarian import fb2
return fb2.transform(self, *args, **kwargs)
if make_author_dir:
save_path = os.path.join(
save_path,
- six.text_type(self.book_info.author).encode('utf-8')
+ str(self.book_info.author).encode('utf-8')
)
save_path = os.path.join(save_path, self.book_info.url.slug)
if ext: