import time
from base64 import urlsafe_b64encode
+from django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponsePermanentRedirect
from django.core.files.uploadedfile import UploadedFile
+from django.core.files.base import File
+from django.core.files.storage import DefaultStorage
from django.utils.hashcompat import sha_constructor
from django.conf import settings
from celery.task import task
from os import mkdir, path, unlink
-from errno import EEXIST
+from errno import EEXIST, ENOENT
from fcntl import flock, LOCK_EX
from zipfile import ZipFile
-from librarian import DocProvider
-
+from reporting.utils import read_chunks
+from celery.task import task
+import catalogue.models
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
pass
-class BookImportDocProvider(DocProvider):
- """Used for joined EPUB and PDF files."""
-
- def __init__(self, book):
- self.book = book
-
- def by_slug(self, slug):
- if slug == self.book.slug:
- return self.book.xml_file
- else:
- return type(self.book).objects.get(slug=slug).xml_file
-
-
class LockFile(object):
+ """
+ A file lock monitor class; createas an ${objname}.lock
+ file in directory dir, and locks it exclusively.
+ To be used in 'with' construct.
+ """
def __init__(self, dir, objname):
self.lockname = path.join(dir, objname + ".lock")
- def __entry__(self):
+ def __enter__(self):
self.lock = open(self.lockname, 'w')
flock(self.lock, LOCK_EX)
def __exit__(self, *err):
+ try:
+ unlink(self.lockname)
+ except OSError as oe:
+ if oe.errno != oe.EEXIST:
+ raise oe
self.lock.close()
- unlink(self.lockname)
+@task
def create_zip(paths, zip_slug):
+ """
+ Creates a zip in MEDIA_ROOT/zip directory containing files from path.
+ Resulting archive filename is ${zip_slug}.zip
+ Returns it's path relative to MEDIA_ROOT (no initial slash)
+ """
# directory to store zip files
zip_path = path.join(settings.MEDIA_ROOT, 'zip')
with LockFile(zip_path, zip_slug):
if not path.exists(path.join(zip_path, zip_filename)):
- with ZipFile(path.join(zip_path, zip_filename), 'w') as zipf:
- for p in paths:
- zipf.write(p, path.basename(p))
+ zipf = ZipFile(path.join(zip_path, zip_filename), 'w')
+ try:
+ for arcname, p in paths:
+ if arcname is None:
+ arcname = path.basename(p)
+ zipf.write(p, arcname)
+ finally:
+ zipf.close()
return 'zip/' + zip_filename
def remove_zip(zip_slug):
+ """
+ removes the ${zip_slug}.zip file from zip store.
+ """
zip_file = path.join(settings.MEDIA_ROOT, 'zip', zip_slug + '.zip')
try:
unlink(zip_file)
except OSError as oe:
- if oe.errno != EEXIST:
+ if oe.errno != ENOENT:
raise oe
+class AttachmentHttpResponse(HttpResponse):
+ """Response serving a file to be downloaded.
+ """
+ def __init__ (self, file_path, file_name, mimetype):
+ super(AttachmentHttpResponse, self).__init__(mimetype=mimetype)
+ self['Content-Disposition'] = 'attachment; filename=%s' % file_name
+ self.file_path = file_path
+ self.file_name = file_name
+
+ with open(DefaultStorage().path(self.file_path)) as f:
+ for chunk in read_chunks(f):
+ self.write(chunk)
+
@task
-def create_zip_task(*args):
- return create_zip(*args)
+def async_build_pdf(book_id, customizations, file_name):
+ """
+ A celery task to generate pdf files.
+ Accepts the same args as Book.build_pdf, but with book id as first parameter
+ instead of Book instance
+ """
+ book = catalogue.models.Book.objects.get(id=book_id)
+ print "will gen %s" % DefaultStorage().path(file_name)
+ if not DefaultStorage().exists(file_name):
+ book.build_pdf(customizations=customizations, file_name=file_name)
+ print "done."
+
+
+class MultiQuerySet(object):
+ def __init__(self, *args, **kwargs):
+ self.querysets = args
+ self._count = None
+
+ def count(self):
+ if not self._count:
+ self._count = sum(len(qs) for qs in self.querysets)
+ return self._count
+
+ def __len__(self):
+ return self.count()
+
+ def __getitem__(self, item):
+ try:
+ indices = (offset, stop, step) = item.indices(self.count())
+ except AttributeError:
+ # it's not a slice - make it one
+ return self[item : item + 1][0]
+ items = []
+ total_len = stop - offset
+ for qs in self.querysets:
+ if len(qs) < offset:
+ offset -= len(qs)
+ else:
+ items += list(qs[offset:stop])
+ if len(items) >= total_len:
+ return items
+ else:
+ offset = 0
+ stop = total_len - len(items)
+ continue
\ No newline at end of file