X-Git-Url: https://git.mdrn.pl/redakcja.git/blobdiff_plain/6e88635c94b36eaff84bd4274983af977986b699..97487ee6a55f54bd483c20557fb554fea4f28844:/apps/catalogue/xml_tools.py?ds=inline
diff --git a/apps/catalogue/xml_tools.py b/apps/catalogue/xml_tools.py
old mode 100755
new mode 100644
index 522806b6..7be05fd5
--- a/apps/catalogue/xml_tools.py
+++ b/apps/catalogue/xml_tools.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
from copy import deepcopy
-from functools import wraps
import re
from lxml import etree
@@ -14,112 +13,8 @@ class ParseError(BaseException):
pass
-def obj_memoized(f):
- """
- A decorator that caches return value of object methods.
- The cache is kept with the object, in a _obj_memoized property.
- """
- @wraps(f)
- def wrapper(self, *args, **kwargs):
- if not hasattr(self, '_obj_memoized'):
- self._obj_memoized = {}
- key = (f.__name__,) + args + tuple(sorted(kwargs.iteritems()))
- try:
- return self._obj_memoized[key]
- except TypeError:
- return f(self, *args, **kwargs)
- except KeyError:
- self._obj_memoized[key] = f(self, *args, **kwargs)
- return self._obj_memoized[key]
- return wrapper
-
-
-class GradedText(object):
- _edoc = None
-
- ROOT = 'utwor'
- RDF = '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF'
-
- def __init__(self, text):
- self._text = text
-
- @obj_memoized
- def is_xml(self):
- """
- Determines if it's a well-formed XML.
-
- >>> GradedText("").is_xml()
- True
- >>> GradedText("").is_xml()
- False
- """
- try:
- self._edoc = etree.fromstring(self._text)
- except etree.XMLSyntaxError:
- return False
- return True
-
- @obj_memoized
- def is_wl(self):
- """
- Determines if it's an XML with a and a master tag.
-
- >>> GradedText("").is_wl()
- True
- >>> GradedText("").is_wl()
- False
- """
- if self.is_xml():
- e = self._edoc
- # FIXME: there could be comments
- ret = e.tag == self.ROOT and (
- len(e) == 1 and e[0].tag in MASTERS or
- len(e) == 2 and e[0].tag == self.RDF
- and e[1].tag in MASTERS)
- if ret:
- self._master = e[-1].tag
- del self._edoc
- return ret
- else:
- return False
-
- @obj_memoized
- def is_broken_wl(self):
- """
- Determines if it at least looks like broken WL file
- and not just some untagged text.
-
- >>> GradedText("<").is_broken_wl()
- True
- >>> GradedText("some text").is_broken_wl()
- False
- """
- if self.is_wl():
- return True
- text = self._text.strip()
- return text.startswith('') and text.endswith('')
-
- def master(self):
- """
- Gets the master tag.
-
- >>> GradedText("").master()
- 'powiesc'
- """
- assert self.is_wl()
- return self._master
-
- @obj_memoized
- def has_trim_begin(self):
- return RE_TRIM_BEGIN.search(self._text)
-
- @obj_memoized
- def has_trim_end(self):
- return RE_TRIM_END.search(self._text)
-
-
def _trim(text, trim_begin=True, trim_end=True):
- """
+ """
Cut off everything before RE_TRIM_BEGIN and after RE_TRIM_END, so
that eg. one big XML file can be compiled from many small XML files.
"""
@@ -131,7 +26,7 @@ def _trim(text, trim_begin=True, trim_end=True):
def compile_text(parts):
- """
+ """
Compiles full text from an iterable of parts,
trimming where applicable.
"""
@@ -153,26 +48,6 @@ def compile_text(parts):
return "".join(texts)
-def change_master(text, master):
- """
- Changes the master tag in a WL document.
- """
- e = etree.fromstring(text)
- e[-1].tag = master
- return unicode(etree.tostring(e, encoding="utf-8"), 'utf-8')
-
-
-def basic_structure(text, master):
- e = etree.fromstring('''
-
-
-
-''' % (TRIM_BEGIN, TRIM_END))
- e[0].tag = master
- e[0][0].tail = "\n"*3 + text + "\n"*3
- return unicode(etree.tostring(e, encoding="utf-8"), 'utf-8')
-
-
def add_trim_begin(text):
trim_tag = etree.Comment(TRIM_BEGIN)
e = etree.fromstring(text)
@@ -228,9 +103,9 @@ def split_xml(text):
name_elem = deepcopy(element)
for tag in 'extra', 'motyw', 'pa', 'pe', 'pr', 'pt', 'uwaga':
for a in name_elem.findall('.//' + tag):
- a.text=''
+ a.text = ''
del a[:]
- name = etree.tostring(name_elem, method='text', encoding='utf-8')
+ name = etree.tostring(name_elem, method='text', encoding='utf-8').strip()
# in the original, remove everything from the start of the last chapter
parent = element.getparent()
@@ -248,15 +123,11 @@ def split_xml(text):
while parent[0] is not element:
del parent[0]
element, parent = parent, parent.getparent()
- chunks[:0] = [[name,
- unicode(etree.tostring(copied, encoding='utf-8'), 'utf-8')
- ]]
+ chunks[:0] = [[name, unicode(etree.tostring(copied, encoding='utf-8'), 'utf-8')]]
parts = src.findall('.//naglowek_rozdzial')
- chunks[:0] = [[u'poczÄ
tek',
- unicode(etree.tostring(src, encoding='utf-8'), 'utf-8')
- ]]
+ chunks[:0] = [[u'poczÄ
tek', unicode(etree.tostring(src, encoding='utf-8'), 'utf-8')]]
for ch in chunks[1:]:
ch[1] = add_trim_begin(ch[1])
@@ -264,3 +135,68 @@ def split_xml(text):
ch[1] = add_trim_end(ch[1])
return chunks
+
+
+def wl2_to_wl1(wl2_xml, slug):
+ from lxml import etree
+ import re
+ from StringIO import StringIO
+ from urllib import unquote
+ import os.path
+ from django.conf import settings
+ from fnpdjango.utils.text.slughifi import slughifi
+ from librarian import ParseError, DCNS
+
+ def _register_function(f):
+ """ Register extension function with lxml """
+ ns = etree.FunctionNamespace('http://wolnelektury.pl/functions')
+ ns[f.__name__] = f
+ return f
+
+ @_register_function
+ def slugify(context, text):
+ """Remove unneeded whitespace from beginning and end"""
+ if isinstance(text, list):
+ text = ''.join(text)
+ return slughifi(text)
+
+ @_register_function
+ def rmext(context, text):
+ if isinstance(text, list):
+ text = ''.join(text)
+ text = unquote(text)
+ if '.' in text:
+ name, ext = text.rsplit('.', 1)
+ if ext.lower() in ('doc', 'docx', 'odt', 'pdf', 'jpg', 'jpeg'):
+ text = name
+ return text
+
+ t = etree.parse(os.path.join(settings.PROJECT_ROOT, 'xslt/wl2to1.xslt'))
+ ft = wl2_xml.replace(' ', ' ')
+ f2 = StringIO(ft)
+ i1 = etree.parse(f2)
+
+ for sect in i1.findall('//section'):
+ if sect[0].text and sect[0].text.strip() == u'Przebieg zajÄÄ':
+ # Prostujemy.
+ first = sect.find('section')
+ subs = first.findall('.//section')
+ for sub in subs:
+ sect.append(sub)
+ break
+ else:
+ dc_type = i1.findall('//dc:type', namespaces={'dc': DCNS.uri})
+ if dc_type and dc_type[0] in ('course', 'synthetic'):
+ raise ParseError('Brak przebiegu')
+
+ i1.getroot().attrib['redslug'] = slug
+ i1.getroot().attrib['wlslug'] = slug # THIS!
+ w1t = i1.xslt(t)
+ for h in w1t.findall('//aktywnosc/opis'):
+ if len(h) == 0:
+ raise ParseError('Pusty element aktywnosc/opis')
+ # FIXME assumption that every lesson has at most 9 parts
+ if not h[0].text or not re.match(r'\d\.\s', h[0].text):
+ raise ParseError('Niepoprawny nagÅówek (aktywnosc/opis): %s' % repr(h[0].text))
+ h[0].text = h[0].text[3:]
+ return etree.tostring(w1t, encoding='utf-8')
\ No newline at end of file