X-Git-Url: https://git.mdrn.pl/librarian.git/blobdiff_plain/8550d172b829f29b2fcd4723789fb2a5d9fff6eb..eb6b8e8e497922011e8a89f1e1869d68ef8f7066:/src/librarian/parser.py?ds=sidebyside diff --git a/src/librarian/parser.py b/src/librarian/parser.py index 2bb9509..bea67b1 100644 --- a/src/librarian/parser.py +++ b/src/librarian/parser.py @@ -5,6 +5,8 @@ # from __future__ import unicode_literals +from collections import Counter + from librarian import ValidationError, NoDublinCore, ParseError, NoProvider from librarian import RDFNS from librarian.cover import make_cover @@ -19,7 +21,30 @@ import re import six +from .elements import WL_ELEMENTS + + +class WLElementLookup(etree.CustomElementClassLookup): + def lookup(self, node_type, document, namespace, name): + if node_type != 'element': + return + if namespace: + return + try: + return WL_ELEMENTS[name] + except KeyError: + return + + +parser = etree.XMLParser() +parser.set_element_class_lookup( + WLElementLookup() +) + + + class WLDocument(object): + """Legacy class, to be replaced with documents.WLDocument.""" LINE_SWAP_EXPR = re.compile(r'/\s', re.MULTILINE | re.UNICODE) provider = None @@ -51,6 +76,52 @@ class WLDocument(object): else: self.book_info = None + def get_statistics(self): + def count_text(text, counter, in_fn=False): + if text: + text = re.sub(r'\s+', ' ', text) + + chars = len(text) if text.strip() else 0 + words = len(text.split()) if text.strip() else 0 + + counter['chars'] += chars + counter['words'] += words + if not in_fn: + counter['chars_with_fn'] += chars + counter['words_with_fn'] += words + + def count(elem, counter, in_fn=False): + if elem.tag in (RDFNS('RDF'), 'nota_red', 'abstrakt', 'uwaga', 'ekstra'): + return + if not in_fn and elem.tag in ('pa', 'pe', 'pr', 'pt', 'motyw'): + in_fn = True + count_text(elem.text, counter, in_fn=in_fn) + for child in elem: + count(child, counter, in_fn=in_fn) + count_text(child.tail, counter, in_fn=in_fn) + + + data = { + "self": Counter(), + "parts": [], + "total": { + } + } + + count(self.edoc.getroot(), data['self']) + for k, v in data['self'].items(): + data['total'][k] = v + + for part in self.parts(pass_part_errors=True): + if isinstance(part, Exception): + data['parts'].append((None, {})) + else: + data['parts'].append((part, part.get_statistics())) + for k, v in data['parts'][-1][1]['total'].items(): + data['total'][k] = data['total'].get(k, 0) + v + + return data + @classmethod def from_bytes(cls, xml, *args, **kwargs): return cls.from_file(six.BytesIO(xml), *args, **kwargs) @@ -102,15 +173,21 @@ class WLDocument(object): elem.insert(0, ins) elem.text = chunks.pop(0) - def parts(self): + def parts(self, pass_part_errors=False): if self.provider is None: raise NoProvider('No document provider supplied.') if self.book_info is None: raise NoDublinCore('No Dublin Core in document.') for part_uri in self.book_info.parts: - yield self.from_file( - self.provider.by_uri(part_uri), provider=self.provider - ) + try: + yield self.from_file( + self.provider.by_slug(part_uri.slug), provider=self.provider + ) + except Exception as e: + if pass_part_errors: + yield e + else: + raise def chunk(self, path): # convert the path to XPath @@ -177,6 +254,13 @@ class WLDocument(object): node.tag = 'span' node.tail = tail + def fix_pa_akap(self): + for pa in ('pa','pe','pr','pt'): + for akap in self.edoc.findall(f'//{pa}/akap'): + akap.getparent().set('blocks', 'true') + if not akap.getparent().index(akap): + akap.set('inline', 'true') + def editors(self): """Returns a set of all editors for book and its children.