X-Git-Url: https://git.mdrn.pl/librarian.git/blobdiff_plain/fefdce4e24f9e397df5538fe6e7f54b5ece4d841..adde01d89c27fb02056fd5c901ffea9ba1d9882f:/src/librarian/parser.py?ds=sidebyside diff --git a/src/librarian/parser.py b/src/librarian/parser.py index efe6e95..484b8f9 100644 --- a/src/librarian/parser.py +++ b/src/librarian/parser.py @@ -1,29 +1,45 @@ -# -*- coding: utf-8 -*- -# # This file is part of Librarian, licensed under GNU Affero GPLv3 or later. -# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information. +# Copyright © Fundacja Wolne Lektury. See NOTICE for more information. # -from __future__ import unicode_literals - +from collections import Counter +import io +import os +import re +from xml.parsers.expat import ExpatError +from lxml import etree +from lxml.etree import XMLSyntaxError, XSLTApplyError from librarian import ValidationError, NoDublinCore, ParseError, NoProvider from librarian import RDFNS from librarian.cover import make_cover from librarian import dcparser +from .elements import WL_ELEMENTS -from xml.parsers.expat import ExpatError -from lxml import etree -from lxml.etree import XMLSyntaxError, XSLTApplyError -import os -import re -import six +class WLElementLookup(etree.CustomElementClassLookup): + def lookup(self, node_type, document, namespace, name): + if node_type != 'element': + return + if namespace: + return + try: + return WL_ELEMENTS[name] + except KeyError: + return + +parser = etree.XMLParser() +parser.set_element_class_lookup( + WLElementLookup() +) -class WLDocument(object): + + +class WLDocument: + """Legacy class, to be replaced with documents.WLDocument.""" LINE_SWAP_EXPR = re.compile(r'/\s', re.MULTILINE | re.UNICODE) provider = None - def __init__(self, edoc, parse_dublincore=True, provider=None, + def __init__(self, edoc, parse_dublincore=True, provider=None, strict=False, meta_fallbacks=None): self.edoc = edoc self.provider = provider @@ -33,28 +49,91 @@ class WLDocument(object): dc_path = './/' + RDFNS('RDF') if root_elem.tag != 'utwor': - raise ValidationError("Invalid root element. Found '%s', should be 'utwor'" % root_elem.tag) + raise ValidationError( + "Invalid root element. Found '%s', should be 'utwor'" + % root_elem.tag + ) if parse_dublincore: self.rdf_elem = root_elem.find(dc_path) if self.rdf_elem is None: - raise NoDublinCore("Document must have a '%s' element." % RDFNS('RDF')) + raise NoDublinCore( + "Document must have a '%s' element." % RDFNS('RDF') + ) self.book_info = dcparser.BookInfo.from_element( - self.rdf_elem, fallbacks=meta_fallbacks, strict=strict) + self.rdf_elem, fallbacks=meta_fallbacks, strict=strict) else: self.book_info = None + def get_statistics(self): + def count_text(text, counter, in_fn=False, stanza=False): + if text: + text = re.sub(r'\s+', ' ', text) + + chars = len(text) if text.strip() else 0 + words = len(text.split()) if text.strip() else 0 + + counter['chars_with_fn'] += chars + counter['words_with_fn'] += words + if not in_fn: + counter['chars'] += chars + counter['words'] += words + if not stanza: + counter['chars_out_verse_with_fn'] += chars + if not in_fn: + counter['chars_out_verse'] += chars + + def count(elem, counter, in_fn=False, stanza=False): + if elem.tag in (RDFNS('RDF'), 'nota_red', 'abstrakt', 'uwaga', 'ekstra'): + return + if not in_fn and elem.tag in ('pa', 'pe', 'pr', 'pt', 'motyw'): + in_fn = True + if elem.tag == 'strofa': + # count verses now + verses = len(elem.findall('.//br')) + 1 + counter['verses_with_fn'] += verses + if not in_fn: + counter['verses'] += verses + stanza = True + count_text(elem.text, counter, in_fn=in_fn, stanza=stanza) + for child in elem: + count(child, counter, in_fn=in_fn, stanza=stanza) + count_text(child.tail, counter, in_fn=in_fn, stanza=stanza) + + self.swap_endlines() + + data = { + "self": Counter(), + "parts": [], + "total": { + } + } + + count(self.edoc.getroot(), data['self']) + for k, v in data['self'].items(): + data['total'][k] = v + + for part in self.parts(pass_part_errors=True): + if isinstance(part, Exception): + data['parts'].append((None, {})) + else: + data['parts'].append((part, part.get_statistics())) + for k, v in data['parts'][-1][1]['total'].items(): + data['total'][k] = data['total'].get(k, 0) + v + + return data + @classmethod def from_bytes(cls, xml, *args, **kwargs): - return cls.from_file(six.BytesIO(xml), *args, **kwargs) + return cls.from_file(io.BytesIO(xml), *args, **kwargs) @classmethod def from_file(cls, xmlfile, *args, **kwargs): # first, prepare for parsing - if isinstance(xmlfile, six.text_type): + if isinstance(xmlfile, str): file = open(xmlfile, 'rb') try: data = file.read() @@ -63,14 +142,14 @@ class WLDocument(object): else: data = xmlfile.read() - if not isinstance(data, six.text_type): + if not isinstance(data, str): data = data.decode('utf-8') - data = data.replace(u'\ufeff', '') + data = data.replace('\ufeff', '') try: parser = etree.XMLParser(remove_blank_text=False) - tree = etree.parse(six.BytesIO(data.encode('utf-8')), parser) + tree = etree.parse(io.BytesIO(data.encode('utf-8')), parser) return cls(tree, *args, **kwargs) except (ExpatError, XMLSyntaxError, XSLTApplyError) as e: @@ -97,13 +176,20 @@ class WLDocument(object): elem.insert(0, ins) elem.text = chunks.pop(0) - def parts(self): + def parts(self, pass_part_errors=False): if self.provider is None: raise NoProvider('No document provider supplied.') if self.book_info is None: raise NoDublinCore('No Dublin Core in document.') for part_uri in self.book_info.parts: - yield self.from_file(self.provider.by_uri(part_uri), provider=self.provider) + try: + with self.provider.by_slug(part_uri.slug) as f: + yield self.from_file(f, provider=self.provider) + except Exception as e: + if pass_part_errors: + yield e + else: + raise def chunk(self, path): # convert the path to XPath @@ -150,7 +236,9 @@ class WLDocument(object): try: xpath = self.path_to_xpath(key) node = self.edoc.xpath(xpath)[0] - repl = etree.fromstring(u"<%s>%s" % (node.tag, data, node.tag)) + repl = etree.fromstring( + "<%s>%s" % (node.tag, data, node.tag) + ) node.getparent().replace(node, repl) except Exception as e: unmerged.append(repr((key, xpath, e))) @@ -160,13 +248,21 @@ class WLDocument(object): def clean_ed_note(self, note_tag='nota_red'): """ deletes forbidden tags from nota_red """ - for node in self.edoc.xpath('|'.join('//%s//%s' % (note_tag, tag) for tag in - ('pa', 'pe', 'pr', 'pt', 'begin', 'end', 'motyw'))): + for node in self.edoc.xpath('|'.join( + '//%s//%s' % (note_tag, tag) for tag in + ('pa', 'pe', 'pr', 'pt', 'begin', 'end', 'motyw'))): tail = node.tail node.clear() node.tag = 'span' node.tail = tail + def fix_pa_akap(self): + for pa in ('pa','pe','pr','pt'): + for akap in self.edoc.findall(f'//{pa}/akap'): + akap.getparent().set('blocks', 'true') + if not akap.getparent().index(akap): + akap.set('inline', 'true') + def editors(self): """Returns a set of all editors for book and its children. @@ -174,7 +270,8 @@ class WLDocument(object): """ if self.book_info is None: raise NoDublinCore('No Dublin Core in document.') - persons = set(self.book_info.editors + self.book_info.technical_editors) + persons = set(self.book_info.editors + + self.book_info.technical_editors) for child in self.parts(): persons.update(child.editors()) if None in persons: @@ -191,18 +288,10 @@ class WLDocument(object): from librarian import text return text.transform(self, *args, **kwargs) - def as_epub(self, *args, **kwargs): - from librarian import epub - return epub.transform(self, *args, **kwargs) - def as_pdf(self, *args, **kwargs): from librarian import pdf return pdf.transform(self, *args, **kwargs) - def as_mobi(self, *args, **kwargs): - from librarian import mobi - return mobi.transform(self, *args, **kwargs) - def as_fb2(self, *args, **kwargs): from librarian import fb2 return fb2.transform(self, *args, **kwargs) @@ -218,11 +307,16 @@ class WLDocument(object): from librarian import pdf return pdf.transform(self, *args, **kwargs) - def save_output_file(self, output_file, output_path=None, output_dir_path=None, make_author_dir=False, ext=None): + def save_output_file(self, output_file, output_path=None, + output_dir_path=None, make_author_dir=False, + ext=None): if output_dir_path: save_path = output_dir_path if make_author_dir: - save_path = os.path.join(save_path, six.text_type(self.book_info.author).encode('utf-8')) + save_path = os.path.join( + save_path, + str(self.book_info.author).encode('utf-8') + ) save_path = os.path.join(save_path, self.book_info.url.slug) if ext: save_path += '.%s' % ext