X-Git-Url: https://git.mdrn.pl/librarian.git/blobdiff_plain/52de56522d8d29146b3be2266f57ccef0abe417a..adde01d89c27fb02056fd5c901ffea9ba1d9882f:/src/librarian/parser.py?ds=sidebyside diff --git a/src/librarian/parser.py b/src/librarian/parser.py index 8adde33..484b8f9 100644 --- a/src/librarian/parser.py +++ b/src/librarian/parser.py @@ -1,26 +1,17 @@ -# -*- coding: utf-8 -*- -# # This file is part of Librarian, licensed under GNU Affero GPLv3 or later. -# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information. +# Copyright © Fundacja Wolne Lektury. See NOTICE for more information. # -from __future__ import unicode_literals - from collections import Counter - +import io +import os +import re +from xml.parsers.expat import ExpatError +from lxml import etree +from lxml.etree import XMLSyntaxError, XSLTApplyError from librarian import ValidationError, NoDublinCore, ParseError, NoProvider from librarian import RDFNS from librarian.cover import make_cover from librarian import dcparser - -from xml.parsers.expat import ExpatError -from lxml import etree -from lxml.etree import XMLSyntaxError, XSLTApplyError - -import os -import re -import six - - from .elements import WL_ELEMENTS @@ -30,7 +21,10 @@ class WLElementLookup(etree.CustomElementClassLookup): return if namespace: return - return WL_ELEMENTS[name] + try: + return WL_ELEMENTS[name] + except KeyError: + return parser = etree.XMLParser() @@ -40,7 +34,7 @@ parser.set_element_class_lookup( -class WLDocument(object): +class WLDocument: """Legacy class, to be replaced with documents.WLDocument.""" LINE_SWAP_EXPR = re.compile(r'/\s', re.MULTILINE | re.UNICODE) provider = None @@ -74,30 +68,42 @@ class WLDocument(object): self.book_info = None def get_statistics(self): - def count_text(text, counter, in_fn=False): + def count_text(text, counter, in_fn=False, stanza=False): if text: text = re.sub(r'\s+', ' ', text) chars = len(text) if text.strip() else 0 words = len(text.split()) if text.strip() else 0 - counter['chars'] += chars - counter['words'] += words + counter['chars_with_fn'] += chars + counter['words_with_fn'] += words if not in_fn: - counter['chars_with_fn'] += chars - counter['words_with_fn'] += words + counter['chars'] += chars + counter['words'] += words + if not stanza: + counter['chars_out_verse_with_fn'] += chars + if not in_fn: + counter['chars_out_verse'] += chars - def count(elem, counter, in_fn=False): + def count(elem, counter, in_fn=False, stanza=False): if elem.tag in (RDFNS('RDF'), 'nota_red', 'abstrakt', 'uwaga', 'ekstra'): return if not in_fn and elem.tag in ('pa', 'pe', 'pr', 'pt', 'motyw'): in_fn = True - count_text(elem.text, counter, in_fn=in_fn) + if elem.tag == 'strofa': + # count verses now + verses = len(elem.findall('.//br')) + 1 + counter['verses_with_fn'] += verses + if not in_fn: + counter['verses'] += verses + stanza = True + count_text(elem.text, counter, in_fn=in_fn, stanza=stanza) for child in elem: - count(child, counter, in_fn=in_fn) - count_text(child.tail, counter, in_fn=in_fn) - - + count(child, counter, in_fn=in_fn, stanza=stanza) + count_text(child.tail, counter, in_fn=in_fn, stanza=stanza) + + self.swap_endlines() + data = { "self": Counter(), "parts": [], @@ -115,19 +121,19 @@ class WLDocument(object): else: data['parts'].append((part, part.get_statistics())) for k, v in data['parts'][-1][1]['total'].items(): - data['total'][k] += v + data['total'][k] = data['total'].get(k, 0) + v return data @classmethod def from_bytes(cls, xml, *args, **kwargs): - return cls.from_file(six.BytesIO(xml), *args, **kwargs) + return cls.from_file(io.BytesIO(xml), *args, **kwargs) @classmethod def from_file(cls, xmlfile, *args, **kwargs): # first, prepare for parsing - if isinstance(xmlfile, six.text_type): + if isinstance(xmlfile, str): file = open(xmlfile, 'rb') try: data = file.read() @@ -136,14 +142,14 @@ class WLDocument(object): else: data = xmlfile.read() - if not isinstance(data, six.text_type): + if not isinstance(data, str): data = data.decode('utf-8') - data = data.replace(u'\ufeff', '') + data = data.replace('\ufeff', '') try: parser = etree.XMLParser(remove_blank_text=False) - tree = etree.parse(six.BytesIO(data.encode('utf-8')), parser) + tree = etree.parse(io.BytesIO(data.encode('utf-8')), parser) return cls(tree, *args, **kwargs) except (ExpatError, XMLSyntaxError, XSLTApplyError) as e: @@ -177,9 +183,8 @@ class WLDocument(object): raise NoDublinCore('No Dublin Core in document.') for part_uri in self.book_info.parts: try: - yield self.from_file( - self.provider.by_uri(part_uri), provider=self.provider - ) + with self.provider.by_slug(part_uri.slug) as f: + yield self.from_file(f, provider=self.provider) except Exception as e: if pass_part_errors: yield e @@ -283,18 +288,10 @@ class WLDocument(object): from librarian import text return text.transform(self, *args, **kwargs) - def as_epub(self, *args, **kwargs): - from librarian import epub - return epub.transform(self, *args, **kwargs) - def as_pdf(self, *args, **kwargs): from librarian import pdf return pdf.transform(self, *args, **kwargs) - def as_mobi(self, *args, **kwargs): - from librarian import mobi - return mobi.transform(self, *args, **kwargs) - def as_fb2(self, *args, **kwargs): from librarian import fb2 return fb2.transform(self, *args, **kwargs) @@ -318,7 +315,7 @@ class WLDocument(object): if make_author_dir: save_path = os.path.join( save_path, - six.text_type(self.book_info.author).encode('utf-8') + str(self.book_info.author).encode('utf-8') ) save_path = os.path.join(save_path, self.book_info.url.slug) if ext: