Cleanup of externals.
authorŁukasz Rekucki <lrekucki@gmail.com>
Fri, 26 Mar 2010 19:07:26 +0000 (20:07 +0100)
committerŁukasz Rekucki <lrekucki@gmail.com>
Fri, 26 Mar 2010 19:07:26 +0000 (20:07 +0100)
53 files changed:
.gitignore
README
apps/catalogue/models.py
apps/newtagging/models.py
apps/pagination/__init__.py [deleted file]
apps/pagination/middleware.py [deleted file]
apps/pagination/models.py [deleted file]
apps/pagination/templates/pagination/pagination.html [deleted file]
apps/pagination/templatetags/__init__.py [deleted file]
apps/pagination/templatetags/pagination_tags.py [deleted file]
apps/pagination/tests.py [deleted file]
apps/sorl/thumbnail/base.py [deleted file]
apps/sorl/thumbnail/defaults.py [deleted file]
apps/sorl/thumbnail/fields.py [deleted file]
apps/sorl/thumbnail/main.py [deleted file]
apps/sorl/thumbnail/management/commands/thumbnail_cleanup.py [deleted file]
apps/sorl/thumbnail/models.py [deleted file]
apps/sorl/thumbnail/processors.py [deleted file]
apps/sorl/thumbnail/templatetags/thumbnail.py [deleted file]
apps/sorl/thumbnail/tests/__init__.py [deleted file]
apps/sorl/thumbnail/tests/base.py [deleted file]
apps/sorl/thumbnail/tests/classes.py [deleted file]
apps/sorl/thumbnail/tests/fields.py [deleted file]
apps/sorl/thumbnail/tests/templatetags.py [deleted file]
apps/sorl/thumbnail/tests/utils.py [deleted file]
apps/sorl/thumbnail/utils.py [deleted file]
apps/south/__init__.py [deleted file]
apps/south/db/__init__.py [deleted file]
apps/south/db/generic.py [deleted file]
apps/south/db/mysql.py [deleted file]
apps/south/db/postgresql_psycopg2.py [deleted file]
apps/south/db/sql_server/pyodbc.py [deleted file]
apps/south/db/sqlite3.py [deleted file]
apps/south/docs/CHANGELOG [deleted file]
apps/south/docs/CONTRIBUTING [deleted file]
apps/south/docs/LICENSE [deleted file]
apps/south/docs/README [deleted file]
apps/south/management/commands/migrate.py [deleted file]
apps/south/management/commands/startmigration.py [deleted file]
apps/south/management/commands/syncdb.py [deleted file]
apps/south/management/commands/test.py [deleted file]
apps/south/migration.py [deleted file]
apps/south/models.py [deleted file]
apps/south/setup.py [deleted file]
apps/south/tests/__init__.py [deleted file]
apps/south/tests/db.py [deleted file]
apps/south/tests/fakeapp/migrations/0001_spam.py [deleted file]
apps/south/tests/fakeapp/migrations/0002_eggs.py [deleted file]
apps/south/tests/fakeapp/migrations/0003_alter_spam.py [deleted file]
apps/south/tests/logic.py [deleted file]
lib/feedparser.py [deleted file]
requirements.txt
wolnelektury/templates/catalogue/book_detail.html

index f11a79b..1152ea7 100644 (file)
@@ -1,12 +1,19 @@
 localsettings.py
 dev.sqlite
 localsettings.py
 dev.sqlite
+dev.db
 
 # Python garbage
 *.pyc
 .coverage
 
 # Python garbage
 *.pyc
 .coverage
+pip-log.txt
 
 # Mac OS X garbage
 .DS_Store
 
 # Windows garbage
 thumbs.db
 
 # Mac OS X garbage
 .DS_Store
 
 # Windows garbage
 thumbs.db
+
+# Eclipse
+.project
+.settings
+.pydevproject
diff --git a/README b/README
index fa3064e..53d8fb6 100644 (file)
--- a/README
+++ b/README
@@ -26,6 +26,8 @@ Just execute this commands:
 Używany kod open-source
 =======================
 
 Używany kod open-source
 =======================
 
+Zewnętrzny:
+
 django
 ------
  - Źródła: [djangoproject.com](http://www.djangoproject.com/)
 django
 ------
  - Źródła: [djangoproject.com](http://www.djangoproject.com/)
@@ -33,19 +35,36 @@ django
  - Licencja: [BSD License](http://code.djangoproject.com/browser/django/trunk/LICENSE)
  - Typ: framework
 
  - Licencja: [BSD License](http://code.djangoproject.com/browser/django/trunk/LICENSE)
  - Typ: framework
 
-django-chunks
--------------
- - Źródła: [Google Code](http://code.google.com/p/django-chunks/)
- - Autorzy: Clint Ecker <clintecker@gmail.com>
- - Licencja: [New BSD License](http://www.opensource.org/licenses/bsd-license.php)
- - Typ: biblioteka (aplikacja django)
-
 django-pagination
 -----------------
  - Źródła: [Google Code](http://code.google.com/p/django-pagination/)
  - Autorzy: James Tauber <jtauber@gmail.com>, leidel@gmail.com
  - Licencja: [New BSD License](http://www.opensource.org/licenses/bsd-license.php)
  - Typ: biblioteka (aplikacja django)
 django-pagination
 -----------------
  - Źródła: [Google Code](http://code.google.com/p/django-pagination/)
  - Autorzy: James Tauber <jtauber@gmail.com>, leidel@gmail.com
  - Licencja: [New BSD License](http://www.opensource.org/licenses/bsd-license.php)
  - Typ: biblioteka (aplikacja django)
+Django South
+-----
+- Źródła: [aercode.org](http://south.aeracode.org/)
+- Autorzy: Andrew Godwin <andrew@aeracode.org>, Andy McCurdy <sedrik@gmail.com>
+- Licencja: [Apache License 2.0](http://www.opensource.org/licenses/apache2.0.php)
+- Typ: biblioteka (aplikacja django)
+
+lxml
+----
+ - Żródła: [codespeak.net](http://codespeak.net/lxml/index.html#download)
+ - Autorzy: [wielu autorów](http://codespeak.net/lxml/credits.html)
+ - Licencja: [BSD License](http://codespeak.net/lxml/index.html#license)
+ - Typ: biblioteka
+feedparser
+----------
+ - Źródła: [Google Code](http://code.google.com/p/feedparser/)
+ - Autorzy: Mark Pilgrim <pilgrim@gmail.com>
+ - Licencja: [MIT License](http://www.opensource.org/licenses/mit-license.php)
+ - Typ: biblioteka
+
+
+Wewnętrzny (w źródłach):
 
 django-compress
 ---------------
 
 django-compress
 ---------------
@@ -53,7 +72,14 @@ django-compress
  - Autorzy: Andreas Pelme <andreas.pelme@gmail.com>
  - Licencja: [MIT License](http://www.opensource.org/licenses/mit-license.php)
  - Typ: biblioteka (aplikacja django)
  - Autorzy: Andreas Pelme <andreas.pelme@gmail.com>
  - Licencja: [MIT License](http://www.opensource.org/licenses/mit-license.php)
  - Typ: biblioteka (aplikacja django)
-
+ django-chunks
+-------------
+ - Źródła: [Google Code](http://code.google.com/p/django-chunks/)
+ - Autorzy: Clint Ecker <clintecker@gmail.com>
+ - Licencja: [New BSD License](http://www.opensource.org/licenses/bsd-license.php)
+ - Typ: biblioteka (aplikacja django)
 django-newtagging
 -----------------
  - Źródła: [BitBucket](http://www.bitbucket.org/zuber/django-newtagging/)
 django-newtagging
 -----------------
  - Źródła: [BitBucket](http://www.bitbucket.org/zuber/django-newtagging/)
@@ -61,20 +87,10 @@ django-newtagging
  - Licencja: [MIT License](http://www.opensource.org/licenses/mit-license.php)
  - Typ: biblioteka (aplikacja django)
  - Nota: Aplikacja wzorowana na [django-tagging](http://code.google.com/p/django-tagging/), która jest również wydana na licencji [MIT](http://www.opensource.org/licenses/mit-license.php) Około połowa kodu jest dzielona.
  - Licencja: [MIT License](http://www.opensource.org/licenses/mit-license.php)
  - Typ: biblioteka (aplikacja django)
  - Nota: Aplikacja wzorowana na [django-tagging](http://code.google.com/p/django-tagging/), która jest również wydana na licencji [MIT](http://www.opensource.org/licenses/mit-license.php) Około połowa kodu jest dzielona.
-
-south
------
-- Źródła: [aercode.org](http://south.aeracode.org/)
-- Autorzy: Andrew Godwin <andrew@aeracode.org>, Andy McCurdy <sedrik@gmail.com>
-- Licencja: [Apache License 2.0](http://www.opensource.org/licenses/apache2.0.php)
-- Typ: biblioteka (aplikacja django)
-
-feedparser
-----------
- - Źródła: [Google Code](http://code.google.com/p/feedparser/)
- - Autorzy: Mark Pilgrim <pilgrim@gmail.com>
- - Licencja: [MIT License](http://www.opensource.org/licenses/mit-license.php)
- - Typ: biblioteka
+django-piston (0.2.3rc)
+------------------------
+ - http://bitbucket.org/jespern/django-piston/wiki/Home
 
 markupstring
 ------------
 
 markupstring
 ------------
@@ -82,12 +98,4 @@ markupstring
  - Autorzy: Thomas Hinkle
  - Licencja: [MIT License](http://code.activestate.com/help/terms/)
  - Typ: biblioteka
  - Autorzy: Thomas Hinkle
  - Licencja: [MIT License](http://code.activestate.com/help/terms/)
  - Typ: biblioteka
- - Nota: Zmienione przez Marka Stępniowskiego <marek@stepniowski.com> tak, żeby akceptowało ciągi znaków Unicode
-lxml
-----
- - Żródła: [codespeak.net](http://codespeak.net/lxml/index.html#download)
- - Autorzy: [wielu autorów](http://codespeak.net/lxml/credits.html)
- - Licencja: [BSD License](http://codespeak.net/lxml/index.html#license)
- - Typ: biblioteka
-
+ - Nota: Zmienione przez Marka Stępniowskiego <marek@stepniowski.com> tak, żeby akceptowało ciągi znaków Unicode
\ No newline at end of file
index 2f4e7fe..a9e39de 100644 (file)
@@ -108,7 +108,6 @@ class Book(models.Model):
     objects = models.Manager()
     tagged = managers.ModelTaggedItemManager(Tag)
     tags = managers.TagDescriptor(Tag)
     objects = models.Manager()
     tagged = managers.ModelTaggedItemManager(Tag)
     tags = managers.TagDescriptor(Tag)
-
     
     @property
     def name(self):
     
     @property
     def name(self):
index e121994..e1e92b6 100644 (file)
@@ -1,8 +1,11 @@
 """
 Models and managers for generic tagging.
 """
 """
 Models and managers for generic tagging.
 """
+
 # Python 2.3 compatibility
 # Python 2.3 compatibility
-if not hasattr(__builtins__, 'set'):
+try:
+    set
+except NameError: 
     from sets import Set as set
 
 from django.contrib.contenttypes import generic
     from sets import Set as set
 
 from django.contrib.contenttypes import generic
diff --git a/apps/pagination/__init__.py b/apps/pagination/__init__.py
deleted file mode 100644 (file)
index 8b13789..0000000
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/apps/pagination/middleware.py b/apps/pagination/middleware.py
deleted file mode 100644 (file)
index 0bab767..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-class PaginationMiddleware(object):
-    """
-    Inserts a variable representing the current page onto the request object if
-    it exists in either **GET** or **POST** portions of the request.
-    """
-    def process_request(self, request):
-        try:
-            request.page = int(request.REQUEST['page'])
-        except (KeyError, ValueError):
-            request.page = 1
\ No newline at end of file
diff --git a/apps/pagination/models.py b/apps/pagination/models.py
deleted file mode 100644 (file)
index 8b13789..0000000
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/apps/pagination/templates/pagination/pagination.html b/apps/pagination/templates/pagination/pagination.html
deleted file mode 100644 (file)
index 3799314..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-{% if is_paginated %}
-<div class="pagination">
-    {% if page_obj.has_previous %}
-        <a href="?page={{ page_obj.previous_page_number }}{{ getvars }}" class="prev">&lsaquo;&lsaquo; previous</a>
-    {% else %}
-        <span class="disabled prev">&lsaquo;&lsaquo; previous</span>
-    {% endif %}
-    {% for page in pages %}
-        {% if page %}
-            {% ifequal page page_obj.number %}
-                <span class="current page">{{ page }}</span>
-            {% else %}
-                <a href="?page={{ page }}{{ getvars }}" class="page">{{ page }}</a>
-            {% endifequal %}
-        {% else %}
-            ...
-        {% endif %}
-    {% endfor %}
-    {% if page_obj.has_next %}
-        <a href="?page={{ page_obj.next_page_number }}{{ getvars }}" class="next">next &rsaquo;&rsaquo;</a>
-    {% else %}
-        <span class="disabled next">next &rsaquo;&rsaquo;</span>
-    {% endif %}
-</div>
-{% endif %}
diff --git a/apps/pagination/templatetags/__init__.py b/apps/pagination/templatetags/__init__.py
deleted file mode 100644 (file)
index 8b13789..0000000
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/apps/pagination/templatetags/pagination_tags.py b/apps/pagination/templatetags/pagination_tags.py
deleted file mode 100644 (file)
index 4908421..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-try:
-    set
-except NameError:
-    from sets import Set as set
-from django import template
-from django.db.models.query import QuerySet
-from django.core.paginator import Paginator, QuerySetPaginator, InvalidPage
-
-register = template.Library()
-
-DEFAULT_PAGINATION = 20
-DEFAULT_WINDOW = 4
-DEFAULT_ORPHANS = 0
-
-def do_autopaginate(parser, token):
-    """
-    Splits the arguments to the autopaginate tag and formats them correctly.
-    """
-    split = token.split_contents()
-    if len(split) == 2:
-        return AutoPaginateNode(split[1])
-    elif len(split) == 3:
-        try:
-            paginate_by = int(split[2])
-        except ValueError:
-            raise template.TemplateSyntaxError(u'Got %s, but expected integer.' % split[2])
-        return AutoPaginateNode(split[1], paginate_by=paginate_by)
-    elif len(split) == 4:
-        try:
-            paginate_by = int(split[2])
-        except ValueError:
-            raise template.TemplateSyntaxError(u'Got %s, but expected integer.' % split[2])
-        try:
-            orphans = int(split[3])
-        except ValueError:
-            raise template.TemplateSyntaxError(u'Got %s, but expected integer.' % split[3])           
-        return AutoPaginateNode(split[1], paginate_by=paginate_by, orphans=orphans)
-    else:
-        raise template.TemplateSyntaxError('%r tag takes one required argument and one optional argument' % split[0])
-
-class AutoPaginateNode(template.Node):
-    """
-    Emits the required objects to allow for Digg-style pagination.
-    
-    First, it looks in the current context for the variable specified.  This
-    should be either a QuerySet or a list.
-    
-    1. If it is a QuerySet, this ``AutoPaginateNode`` will emit a 
-       ``QuerySetPaginator`` and the current page object into the context names
-       ``paginator`` and ``page_obj``, respectively.
-    
-    2. If it is a list, this ``AutoPaginateNode`` will emit a simple
-       ``Paginator`` and the current page object into the context names 
-       ``paginator`` and ``page_obj``, respectively.
-    
-    It will then replace the variable specified with only the objects for the
-    current page.
-    
-    .. note::
-        
-        It is recommended to use *{% paginate %}* after using the autopaginate
-        tag.  If you choose not to use *{% paginate %}*, make sure to display the
-        list of availabale pages, or else the application may seem to be buggy.
-    """
-    def __init__(self, queryset_var, paginate_by=DEFAULT_PAGINATION, orphans=DEFAULT_ORPHANS):
-        self.queryset_var = template.Variable(queryset_var)
-        self.paginate_by = paginate_by
-        self.orphans = orphans
-
-    def render(self, context):
-        key = self.queryset_var.var
-        value = self.queryset_var.resolve(context)
-        if issubclass(value.__class__, QuerySet):
-            model = value.model
-            paginator_class = QuerySetPaginator
-        else:
-            value = list(value)
-            try:
-                model = value[0].__class__
-            except IndexError:
-                return u''
-            paginator_class = Paginator
-        paginator = paginator_class(value, self.paginate_by, self.orphans)
-        try:
-            page_obj = paginator.page(context['request'].page)
-        except InvalidPage:
-            # context[key] = []
-            # context['invalid_page'] = True
-            # return u''
-            from django.http import Http404
-            raise Http404
-        context[key] = page_obj.object_list
-        context['paginator'] = paginator
-        context['page_obj'] = page_obj
-        return u''
-
-def paginate(context, window=DEFAULT_WINDOW):
-    """
-    Renders the ``pagination/pagination.html`` template, resulting in a
-    Digg-like display of the available pages, given the current page.  If there
-    are too many pages to be displayed before and after the current page, then
-    elipses will be used to indicate the undisplayed gap between page numbers.
-    
-    Requires one argument, ``context``, which should be a dictionary-like data
-    structure and must contain the following keys:
-    
-    ``paginator``
-        A ``Paginator`` or ``QuerySetPaginator`` object.
-    
-    ``page_obj``
-        This should be the result of calling the page method on the 
-        aforementioned ``Paginator`` or ``QuerySetPaginator`` object, given
-        the current page.
-    
-    This same ``context`` dictionary-like data structure may also include:
-    
-    ``getvars``
-        A dictionary of all of the **GET** parameters in the current request.
-        This is useful to maintain certain types of state, even when requesting
-        a different page.
-        """
-    try:
-        paginator = context['paginator']
-        page_obj = context['page_obj']
-        page_range = paginator.page_range
-        # First and last are simply the first *n* pages and the last *n* pages,
-        # where *n* is the current window size.
-        first = set(page_range[:window])
-        last = set(page_range[-window:])
-        # Now we look around our current page, making sure that we don't wrap
-        # around.
-        current_start = page_obj.number-1-window
-        if current_start < 0:
-            current_start = 0
-        current_end = page_obj.number-1+window
-        if current_end < 0:
-            current_end = 0
-        current = set(page_range[current_start:current_end])
-        pages = []
-        # If there's no overlap between the first set of pages and the current
-        # set of pages, then there's a possible need for elusion.
-        if len(first.intersection(current)) == 0:
-            first_list = sorted(list(first))
-            second_list = sorted(list(current))
-            pages.extend(first_list)
-            diff = second_list[0] - first_list[-1]
-            # If there is a gap of two, between the last page of the first
-            # set and the first page of the current set, then we're missing a
-            # page.
-            if diff == 2:
-                pages.append(second_list[0] - 1)
-            # If the difference is just one, then there's nothing to be done,
-            # as the pages need no elusion and are correct.
-            elif diff == 1:
-                pass
-            # Otherwise, there's a bigger gap which needs to be signaled for
-            # elusion, by pushing a None value to the page list.
-            else:
-                pages.append(None)
-            pages.extend(second_list)
-        else:
-            pages.extend(sorted(list(first.union(current))))
-        # If there's no overlap between the current set of pages and the last
-        # set of pages, then there's a possible need for elusion.
-        if len(current.intersection(last)) == 0:
-            second_list = sorted(list(last))
-            diff = second_list[0] - pages[-1]
-            # If there is a gap of two, between the last page of the current
-            # set and the first page of the last set, then we're missing a 
-            # page.
-            if diff == 2:
-                pages.append(second_list[0] - 1)
-            # If the difference is just one, then there's nothing to be done,
-            # as the pages need no elusion and are correct.
-            elif diff == 1:
-                pass
-            # Otherwise, there's a bigger gap which needs to be signaled for
-            # elusion, by pushing a None value to the page list.
-            else:
-                pages.append(None)
-            pages.extend(second_list)
-        else:
-            pages.extend(sorted(list(last.difference(current))))
-        to_return = {
-            'pages': pages,
-            'page_obj': page_obj,
-            'paginator': paginator,
-            'is_paginated': paginator.count > paginator.per_page,
-        }
-        if 'request' in context:
-            getvars = context['request'].GET.copy()
-            if 'page' in getvars:
-                del getvars['page']
-            if len(getvars.keys()) > 0:
-                to_return['getvars'] = "&%s" % getvars.urlencode()
-            else:
-                to_return['getvars'] = ''
-        return to_return
-    except KeyError:
-        return {}
-register.inclusion_tag('pagination/pagination.html', takes_context=True)(paginate)
-register.tag('autopaginate', do_autopaginate)
\ No newline at end of file
diff --git a/apps/pagination/tests.py b/apps/pagination/tests.py
deleted file mode 100644 (file)
index 837e55c..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
->>> from django.core.paginator import Paginator
->>> from pagination.templatetags.pagination_tags import paginate
->>> from django.template import Template, Context
-
->>> p = Paginator(range(15), 2)
->>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
-[1, 2, 3, 4, 5, 6, 7, 8]
-
->>> p = Paginator(range(17), 2)
->>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
-[1, 2, 3, 4, 5, 6, 7, 8, 9]
-
->>> p = Paginator(range(19), 2)
->>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
-[1, 2, 3, 4, None, 7, 8, 9, 10]
-
->>> p = Paginator(range(21), 2)
->>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
-[1, 2, 3, 4, None, 8, 9, 10, 11]
-
-# Testing orphans
->>> p = Paginator(range(5), 2, 1)
->>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
-[1, 2]
-
->>> p = Paginator(range(21), 2, 1)
->>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
-[1, 2, 3, 4, None, 7, 8, 9, 10]
-
->>> t = Template("{% load pagination_tags %}{% autopaginate var 2 %}{% paginate %}")
-
-# WARNING: Please, please nobody read this portion of the code!
->>> class GetProxy(object):
-...     def __iter__(self): yield self.__dict__.__iter__
-...     def copy(self): return self
-...     def urlencode(self): return u''
-...     def keys(self): return []
->>> class RequestProxy(object):
-...     page = 1
-...     GET = GetProxy()
->>>
-# ENDWARNING
-
->>> t.render(Context({'var': range(21), 'request': RequestProxy()}))
-u'\\n<div class="pagination">...
->>>
->>> t = Template("{% load pagination_tags %}{% autopaginate var %}{% paginate %}")
->>> t.render(Context({'var': range(21), 'request': RequestProxy()}))
-u'\\n<div class="pagination">...
->>>
-"""
\ No newline at end of file
diff --git a/apps/sorl/thumbnail/base.py b/apps/sorl/thumbnail/base.py
deleted file mode 100755 (executable)
index 24f4d97..0000000
+++ /dev/null
@@ -1,285 +0,0 @@
-import os
-from os.path import isfile, isdir, getmtime, dirname, splitext, getsize
-from tempfile import mkstemp
-from shutil import copyfile
-
-from PIL import Image
-
-from sorl.thumbnail import defaults
-from sorl.thumbnail.processors import get_valid_options, dynamic_import
-
-
-class ThumbnailException(Exception):
-    # Stop Django templates from choking if something goes wrong.
-    silent_variable_failure = True
-
-
-class Thumbnail(object):
-    imagemagick_file_types = defaults.IMAGEMAGICK_FILE_TYPES
-
-    def __init__(self, source, requested_size, opts=None, quality=85,
-                 dest=None, convert_path=defaults.CONVERT,
-                 wvps_path=defaults.WVPS, processors=None):
-        # Paths to external commands
-        self.convert_path = convert_path
-        self.wvps_path = wvps_path
-        # Absolute paths to files
-        self.source = source
-        self.dest = dest
-
-        # Thumbnail settings
-        try:
-            x, y = [int(v) for v in requested_size]
-        except (TypeError, ValueError):
-            raise TypeError('Thumbnail received invalid value for size '
-                            'argument: %s' % repr(requested_size))
-        else:
-            self.requested_size = (x, y)
-        try:
-            self.quality = int(quality)
-            if not 0 < quality <= 100:
-                raise ValueError
-        except (TypeError, ValueError):
-            raise TypeError('Thumbnail received invalid value for quality '
-                            'argument: %r' % quality)
-
-        # Processors
-        if processors is None:
-            processors = dynamic_import(defaults.PROCESSORS)
-        self.processors = processors
-
-        # Handle old list format for opts.
-        opts = opts or {}
-        if isinstance(opts, (list, tuple)):
-            opts = dict([(opt, None) for opt in opts])
-
-        # Set Thumbnail opt(ion)s
-        VALID_OPTIONS = get_valid_options(processors)
-        for opt in opts:
-            if not opt in VALID_OPTIONS:
-                raise TypeError('Thumbnail received an invalid option: %s'
-                                % opt)
-        self.opts = opts
-
-        if self.dest is not None:
-            self.generate()
-
-    def generate(self):
-        """
-        Generates the thumbnail if it doesn't exist or if the file date of the
-        source file is newer than that of the thumbnail.
-        """
-        # Ensure dest(ination) attribute is set
-        if not self.dest:
-            raise ThumbnailException("No destination filename set.")
-
-        if not isinstance(self.dest, basestring):
-            # We'll assume dest is a file-like instance if it exists but isn't
-            # a string.
-            self._do_generate()
-        elif not isfile(self.dest) or (self.source_exists and
-            getmtime(self.source) > getmtime(self.dest)):
-
-            # Ensure the directory exists
-            directory = dirname(self.dest)
-            if directory and not isdir(directory):
-                os.makedirs(directory)
-
-            self._do_generate()
-
-    def _check_source_exists(self):
-        """
-        Ensure the source file exists. If source is not a string then it is
-        assumed to be a file-like instance which "exists".
-        """
-        if not hasattr(self, '_source_exists'):
-            self._source_exists = (self.source and
-                                   (not isinstance(self.source, basestring) or
-                                    isfile(self.source)))
-        return self._source_exists
-    source_exists = property(_check_source_exists)
-
-    def _get_source_filetype(self):
-        """
-        Set the source filetype. First it tries to use magic and
-        if import error it will just use the extension
-        """
-        if not hasattr(self, '_source_filetype'):
-            if not isinstance(self.source, basestring):
-                # Assuming a file-like object - we won't know it's type.
-                return None
-            try:
-                import magic
-            except ImportError:
-                self._source_filetype = splitext(self.source)[1].lower().\
-                   replace('.', '').replace('jpeg', 'jpg')
-            else:
-                m = magic.open(magic.MAGIC_NONE)
-                m.load()
-                ftype = m.file(self.source)
-                if ftype.find('Microsoft Office Document') != -1:
-                    self._source_filetype = 'doc'
-                elif ftype.find('PDF document') != -1:
-                    self._source_filetype = 'pdf'
-                elif ftype.find('JPEG') != -1:
-                    self._source_filetype = 'jpg'
-                else:
-                    self._source_filetype = ftype
-        return self._source_filetype
-    source_filetype = property(_get_source_filetype)
-
-    # data property is the image data of the (generated) thumbnail
-    def _get_data(self):
-        if not hasattr(self, '_data'):
-            try:
-                self._data = Image.open(self.dest)
-            except IOError, detail:
-                raise ThumbnailException(detail)
-        return self._data
-
-    def _set_data(self, im):
-        self._data = im
-    data = property(_get_data, _set_data)
-
-    # source_data property is the image data from the source file
-    def _get_source_data(self):
-        if not hasattr(self, '_source_data'):
-            if not self.source_exists:
-                raise ThumbnailException("Source file: '%s' does not exist." %
-                                         self.source)
-            if self.source_filetype == 'doc':
-                self._convert_wvps(self.source)
-            elif self.source_filetype in self.imagemagick_file_types:
-                self._convert_imagemagick(self.source)
-            else:
-                self.source_data = self.source
-        return self._source_data
-
-    def _set_source_data(self, image):
-        if isinstance(image, Image.Image):
-            self._source_data = image
-        else:
-            try:
-                self._source_data = Image.open(image)
-            except IOError, detail:
-                raise ThumbnailException("%s: %s" % (detail, image))
-            except MemoryError:
-                raise ThumbnailException("Memory Error: %s" % image)
-    source_data = property(_get_source_data, _set_source_data)
-
-    def _convert_wvps(self, filename):
-        try:
-            import subprocess
-        except ImportError:
-            raise ThumbnailException('wvps requires the Python 2.4 subprocess '
-                                     'package.')
-        tmp = mkstemp('.ps')[1]
-        try:
-            p = subprocess.Popen((self.wvps_path, filename, tmp),
-                                 stdout=subprocess.PIPE)
-            p.wait()
-        except OSError, detail:
-            os.remove(tmp)
-            raise ThumbnailException('wvPS error: %s' % detail)
-        self._convert_imagemagick(tmp)
-        os.remove(tmp)
-
-    def _convert_imagemagick(self, filename):
-        try:
-            import subprocess
-        except ImportError:
-            raise ThumbnailException('imagemagick requires the Python 2.4 '
-                                     'subprocess package.')
-        tmp = mkstemp('.png')[1]
-        if 'crop' in self.opts or 'autocrop' in self.opts:
-            x, y = [d * 3 for d in self.requested_size]
-        else:
-            x, y = self.requested_size
-        try:
-            p = subprocess.Popen((self.convert_path, '-size', '%sx%s' % (x, y),
-                '-antialias', '-colorspace', 'rgb', '-format', 'PNG24',
-                '%s[0]' % filename, tmp), stdout=subprocess.PIPE)
-            p.wait()
-        except OSError, detail:
-            os.remove(tmp)
-            raise ThumbnailException('ImageMagick error: %s' % detail)
-        self.source_data = tmp
-        os.remove(tmp)
-
-    def _do_generate(self):
-        """
-        Generates the thumbnail image.
-
-        This a semi-private method so it isn't directly available to template
-        authors if this object is passed to the template context.
-        """
-        im = self.source_data
-
-        for processor in self.processors:
-            im = processor(im, self.requested_size, self.opts)
-
-        self.data = im
-
-        filelike = not isinstance(self.dest, basestring)
-        if not filelike:
-            dest_extension = os.path.splitext(self.dest)[1][1:]
-            format = None
-        else:
-            dest_extension = None
-            format = 'JPEG'
-        if (self.source_filetype and self.source_filetype == dest_extension and
-                self.source_data == self.data):
-            copyfile(self.source, self.dest)
-        else:
-            try:
-                im.save(self.dest, format=format, quality=self.quality,
-                        optimize=1)
-            except IOError:
-                # Try again, without optimization (PIL can't optimize an image
-                # larger than ImageFile.MAXBLOCK, which is 64k by default)
-                try:
-                    im.save(self.dest, format=format, quality=self.quality)
-                except IOError, detail:
-                    raise ThumbnailException(detail)
-
-        if filelike:
-            self.dest.seek(0)
-
-    # Some helpful methods
-
-    def _dimension(self, axis):
-        if self.dest is None:
-            return None
-        return self.data.size[axis]
-
-    def width(self):
-        return self._dimension(0)
-
-    def height(self):
-        return self._dimension(1)
-
-    def _get_filesize(self):
-        if self.dest is None:
-            return None
-        if not hasattr(self, '_filesize'):
-            self._filesize = getsize(self.dest)
-        return self._filesize
-    filesize = property(_get_filesize)
-
-    def _source_dimension(self, axis):
-        if self.source_filetype in ['pdf', 'doc']:
-            return None
-        else:
-            return self.source_data.size[axis]
-
-    def source_width(self):
-        return self._source_dimension(0)
-
-    def source_height(self):
-        return self._source_dimension(1)
-
-    def _get_source_filesize(self):
-        if not hasattr(self, '_source_filesize'):
-            self._source_filesize = getsize(self.source)
-        return self._source_filesize
-    source_filesize = property(_get_source_filesize)
diff --git a/apps/sorl/thumbnail/defaults.py b/apps/sorl/thumbnail/defaults.py
deleted file mode 100644 (file)
index b4ae142..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-DEBUG = False
-BASEDIR = ''
-SUBDIR = ''
-PREFIX = ''
-QUALITY = 85
-CONVERT = '/usr/bin/convert'
-WVPS = '/usr/bin/wvPS'
-EXTENSION = 'jpg'
-PROCESSORS = (
-    'sorl.thumbnail.processors.colorspace',
-    'sorl.thumbnail.processors.autocrop',
-    'sorl.thumbnail.processors.scale_and_crop',
-    'sorl.thumbnail.processors.filters',
-)
-IMAGEMAGICK_FILE_TYPES = ('eps', 'pdf', 'psd')
diff --git a/apps/sorl/thumbnail/fields.py b/apps/sorl/thumbnail/fields.py
deleted file mode 100644 (file)
index 1b52743..0000000
+++ /dev/null
@@ -1,228 +0,0 @@
-from UserDict import DictMixin
-try:
-    from cStringIO import StringIO
-except ImportError:
-    from StringIO import StringIO
-
-from django.db.models.fields.files import ImageField, ImageFieldFile
-from django.core.files.base import ContentFile
-from django.utils.safestring import mark_safe
-from django.utils.html import escape
-
-from sorl.thumbnail.base import Thumbnail
-from sorl.thumbnail.main import DjangoThumbnail, build_thumbnail_name
-from sorl.thumbnail.utils import delete_thumbnails
-
-
-REQUIRED_ARGS = ('size',)
-ALL_ARGS = {
-    'size': 'requested_size',
-    'options': 'opts',
-    'quality': 'quality',
-    'basedir': 'basedir',
-    'subdir': 'subdir',
-    'prefix': 'prefix',
-    'extension': 'extension',
-}
-BASE_ARGS = {
-    'size': 'requested_size',
-    'options': 'opts',
-    'quality': 'quality',
-}
-TAG_HTML = '<img src="%(src)s" width="%(width)s" height="%(height)s" alt="" />'
-
-
-class ThumbsDict(object, DictMixin):
-    def __init__(self, descriptor):
-        super(ThumbsDict, self).__init__()
-        self.descriptor = descriptor
-
-    def keys(self):
-        return self.descriptor.field.extra_thumbnails.keys()
-
-
-class LazyThumbs(ThumbsDict):
-    def __init__(self, *args, **kwargs):
-        super(LazyThumbs, self).__init__(*args, **kwargs)
-        self.cached = {}
-
-    def __getitem__(self, key):
-        thumb = self.cached.get(key)
-        if not thumb:
-            args = self.descriptor.field.extra_thumbnails[key]
-            thumb = self.descriptor._build_thumbnail(args)
-            self.cached[key] = thumb
-        return thumb
-
-    def keys(self):
-        return self.descriptor.field.extra_thumbnails.keys()
-
-
-class ThumbTags(ThumbsDict):
-    def __getitem__(self, key):
-        thumb = self.descriptor.extra_thumbnails[key]
-        return self.descriptor._build_thumbnail_tag(thumb)
-
-
-class BaseThumbnailFieldFile(ImageFieldFile):
-    def _build_thumbnail(self, args):
-        # Build the DjangoThumbnail kwargs.
-        kwargs = {}
-        for k, v in args.items():
-            kwargs[ALL_ARGS[k]] = v
-        # Build the destination filename and return the thumbnail.
-        name_kwargs = {}
-        for key in ['size', 'options', 'quality', 'basedir', 'subdir',
-                    'prefix', 'extension']:
-            name_kwargs[key] = args.get(key)
-        source = getattr(self.instance, self.field.name)
-        dest = build_thumbnail_name(source.name, **name_kwargs)
-        return DjangoThumbnail(source, relative_dest=dest, **kwargs)
-
-    def _build_thumbnail_tag(self, thumb):
-        opts = dict(src=escape(thumb), width=thumb.width(),
-                    height=thumb.height())
-        return mark_safe(self.field.thumbnail_tag % opts)
-
-    def _get_extra_thumbnails(self):
-        if self.field.extra_thumbnails is None:
-            return None
-        if not hasattr(self, '_extra_thumbnails'):
-            self._extra_thumbnails = LazyThumbs(self)
-        return self._extra_thumbnails
-    extra_thumbnails = property(_get_extra_thumbnails)
-
-    def _get_extra_thumbnails_tag(self):
-        if self.field.extra_thumbnails is None:
-            return None
-        return ThumbTags(self)
-    extra_thumbnails_tag = property(_get_extra_thumbnails_tag)
-
-    def save(self, *args, **kwargs):
-        # Optionally generate the thumbnails after the image is saved.
-        super(BaseThumbnailFieldFile, self).save(*args, **kwargs)
-        if self.field.generate_on_save:
-            self.generate_thumbnails()
-
-    def delete(self, *args, **kwargs):
-        # Delete any thumbnails too (and not just ones defined here in case
-        # the {% thumbnail %} tag was used or the thumbnail sizes changed).
-        relative_source_path = getattr(self.instance, self.field.name).name
-        delete_thumbnails(relative_source_path)
-        super(BaseThumbnailFieldFile, self).delete(*args, **kwargs)
-
-    def generate_thumbnails(self):
-        # Getting the thumbs generates them.
-        if self.extra_thumbnails:
-            self.extra_thumbnails.values()
-
-
-class ImageWithThumbnailsFieldFile(BaseThumbnailFieldFile):
-    def _get_thumbnail(self):
-        return self._build_thumbnail(self.field.thumbnail)
-    thumbnail = property(_get_thumbnail)
-
-    def _get_thumbnail_tag(self):
-        return self._build_thumbnail_tag(self.thumbnail)
-    thumbnail_tag = property(_get_thumbnail_tag)
-
-    def generate_thumbnails(self, *args, **kwargs):
-        self.thumbnail.generate()
-        Super = super(ImageWithThumbnailsFieldFile, self)
-        return Super.generate_thumbnails(*args, **kwargs)
-
-
-class ThumbnailFieldFile(BaseThumbnailFieldFile):
-    def save(self, name, content, *args, **kwargs):
-        new_content = StringIO()
-        # Build the Thumbnail kwargs.
-        thumbnail_kwargs = {}
-        for k, argk in BASE_ARGS.items():
-            if not k in self.field.thumbnail:
-                continue
-            thumbnail_kwargs[argk] = self.field.thumbnail[k]
-        Thumbnail(source=content, dest=new_content, **thumbnail_kwargs)
-        new_content = ContentFile(new_content.read())
-        super(ThumbnailFieldFile, self).save(name, new_content, *args,
-                                             **kwargs)
-
-    def _get_thumbnail_tag(self):
-        opts = dict(src=escape(self.url), width=self.width,
-                    height=self.height)
-        return mark_safe(self.field.thumbnail_tag % opts)
-    thumbnail_tag = property(_get_thumbnail_tag)
-
-
-class BaseThumbnailField(ImageField):
-    def __init__(self, *args, **kwargs):
-        # The new arguments for this field aren't explicitly defined so that
-        # users can still use normal ImageField positional arguments.
-        self.extra_thumbnails = kwargs.pop('extra_thumbnails', None)
-        self.thumbnail_tag = kwargs.pop('thumbnail_tag', TAG_HTML)
-        self.generate_on_save = kwargs.pop('generate_on_save', False)
-
-        super(BaseThumbnailField, self).__init__(*args, **kwargs)
-        _verify_thumbnail_attrs(self.thumbnail)
-        if self.extra_thumbnails:
-            for extra, attrs in self.extra_thumbnails.items():
-                name = "%r of 'extra_thumbnails'"
-                _verify_thumbnail_attrs(attrs, name)
-
-    def south_field_triple(self):
-        """
-        Return a suitable description of this field for South.
-        """
-        # We'll just introspect ourselves, since we inherit.
-        from south.modelsinspector import introspector
-        field_class = "django.db.models.fields.files.ImageField"
-        args, kwargs = introspector(self)
-        # That's our definition!
-        return (field_class, args, kwargs)
-
-
-class ImageWithThumbnailsField(BaseThumbnailField):
-    """
-    photo = ImageWithThumbnailsField(
-        upload_to='uploads',
-        thumbnail={'size': (80, 80), 'options': ('crop', 'upscale'),
-                   'extension': 'png'},
-        extra_thumbnails={
-            'admin': {'size': (70, 50), 'options': ('sharpen',)},
-        }
-    )
-    """
-    attr_class = ImageWithThumbnailsFieldFile
-
-    def __init__(self, *args, **kwargs):
-        self.thumbnail = kwargs.pop('thumbnail', None)
-        super(ImageWithThumbnailsField, self).__init__(*args, **kwargs)
-
-
-class ThumbnailField(BaseThumbnailField):
-    """
-    avatar = ThumbnailField(
-        upload_to='uploads',
-        size=(200, 200),
-        options=('crop',),
-        extra_thumbnails={
-            'admin': {'size': (70, 50), 'options': (crop, 'sharpen')},
-        }
-    )
-    """
-    attr_class = ThumbnailFieldFile
-
-    def __init__(self, *args, **kwargs):
-        self.thumbnail = {}
-        for attr in ALL_ARGS:
-            if attr in kwargs:
-                self.thumbnail[attr] = kwargs.pop(attr)
-        super(ThumbnailField, self).__init__(*args, **kwargs)
-
-
-def _verify_thumbnail_attrs(attrs, name="'thumbnail'"):
-    for arg in REQUIRED_ARGS:
-        if arg not in attrs:
-            raise TypeError('Required attr %r missing in %s arg' % (arg, name))
-    for attr in attrs:
-        if attr not in ALL_ARGS:
-            raise TypeError('Invalid attr %r found in %s arg' % (arg, name))
diff --git a/apps/sorl/thumbnail/main.py b/apps/sorl/thumbnail/main.py
deleted file mode 100644 (file)
index a59b64f..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-import os
-
-from django.conf import settings
-from django.utils.encoding import iri_to_uri, force_unicode
-
-from sorl.thumbnail.base import Thumbnail
-from sorl.thumbnail.processors import dynamic_import
-from sorl.thumbnail import defaults
-
-
-def get_thumbnail_setting(setting, override=None):
-    """
-    Get a thumbnail setting from Django settings module, falling back to the
-    default.
-
-    If override is not None, it will be used instead of the setting.
-    """
-    if override is not None:
-        return override
-    if hasattr(settings, 'THUMBNAIL_%s' % setting):
-        return getattr(settings, 'THUMBNAIL_%s' % setting)
-    else:
-        return getattr(defaults, setting)
-
-
-def build_thumbnail_name(source_name, size, options=None,
-                         quality=None, basedir=None, subdir=None, prefix=None,
-                         extension=None):
-    quality = get_thumbnail_setting('QUALITY', quality)
-    basedir = get_thumbnail_setting('BASEDIR', basedir)
-    subdir = get_thumbnail_setting('SUBDIR', subdir)
-    prefix = get_thumbnail_setting('PREFIX', prefix)
-    extension = get_thumbnail_setting('EXTENSION', extension)
-    path, filename = os.path.split(source_name)
-    basename, ext = os.path.splitext(filename)
-    name = '%s%s' % (basename, ext.replace(os.extsep, '_'))
-    size = '%sx%s' % tuple(size)
-
-    # Handle old list format for opts.
-    options = options or {}
-    if isinstance(options, (list, tuple)):
-        options = dict([(opt, None) for opt in options])
-
-    opts = options.items()
-    opts.sort()   # options are sorted so the filename is consistent
-    opts = ['%s_' % (v is not None and '%s-%s' % (k, v) or k)
-            for k, v in opts]
-    opts = ''.join(opts)
-    extension = extension and '.%s' % extension
-    thumbnail_filename = '%s%s_%s_%sq%s%s' % (prefix, name, size, opts,
-                                              quality, extension)
-    return os.path.join(basedir, path, subdir, thumbnail_filename)
-
-
-class DjangoThumbnail(Thumbnail):
-    imagemagick_file_types = get_thumbnail_setting('IMAGEMAGICK_FILE_TYPES')
-
-    def __init__(self, relative_source, requested_size, opts=None,
-                 quality=None, basedir=None, subdir=None, prefix=None,
-                 relative_dest=None, processors=None, extension=None):
-        relative_source = force_unicode(relative_source)
-        # Set the absolute filename for the source file
-        source = self._absolute_path(relative_source)
-
-        quality = get_thumbnail_setting('QUALITY', quality)
-        convert_path = get_thumbnail_setting('CONVERT')
-        wvps_path = get_thumbnail_setting('WVPS')
-        if processors is None:
-            processors = dynamic_import(get_thumbnail_setting('PROCESSORS'))
-
-        # Call super().__init__ now to set the opts attribute. generate() won't
-        # get called because we are not setting the dest attribute yet.
-        super(DjangoThumbnail, self).__init__(source, requested_size,
-            opts=opts, quality=quality, convert_path=convert_path,
-            wvps_path=wvps_path, processors=processors)
-
-        # Get the relative filename for the thumbnail image, then set the
-        # destination filename
-        if relative_dest is None:
-            relative_dest = \
-               self._get_relative_thumbnail(relative_source, basedir=basedir,
-                                            subdir=subdir, prefix=prefix,
-                                            extension=extension)
-        filelike = not isinstance(relative_dest, basestring)
-        if filelike:
-            self.dest = relative_dest
-        else:
-            self.dest = self._absolute_path(relative_dest)
-
-        # Call generate now that the dest attribute has been set
-        self.generate()
-
-        # Set the relative & absolute url to the thumbnail
-        if not filelike:
-            self.relative_url = \
-                iri_to_uri('/'.join(relative_dest.split(os.sep)))
-            self.absolute_url = '%s%s' % (settings.MEDIA_URL,
-                                          self.relative_url)
-
-    def _get_relative_thumbnail(self, relative_source,
-                                basedir=None, subdir=None, prefix=None,
-                                extension=None):
-        """
-        Returns the thumbnail filename including relative path.
-        """
-        return build_thumbnail_name(relative_source, self.requested_size,
-                                    self.opts, self.quality, basedir, subdir,
-                                    prefix, extension)
-
-    def _absolute_path(self, filename):
-        absolute_filename = os.path.join(settings.MEDIA_ROOT, filename)
-        return absolute_filename.encode(settings.FILE_CHARSET)
-
-    def __unicode__(self):
-        return self.absolute_url
diff --git a/apps/sorl/thumbnail/management/commands/thumbnail_cleanup.py b/apps/sorl/thumbnail/management/commands/thumbnail_cleanup.py
deleted file mode 100644 (file)
index 690c42c..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-import os
-import re
-from django.db import models
-from django.conf import settings
-from django.core.management.base import NoArgsCommand
-from sorl.thumbnail.main import get_thumbnail_setting
-
-
-try:
-    set
-except NameError:
-    from sets import Set as set     # For Python 2.3
-
-thumb_re = re.compile(r'^%s(.*)_\d{1,}x\d{1,}_[-\w]*q([1-9]\d?|100)\.jpg' %
-                      get_thumbnail_setting('PREFIX'))
-
-
-def get_thumbnail_path(path):
-    basedir = get_thumbnail_setting('BASEDIR')
-    subdir = get_thumbnail_setting('SUBDIR')
-    return os.path.join(basedir, path, subdir)
-
-
-def clean_up():
-    paths = set()
-    for app in models.get_apps():
-        model_list = models.get_models(app)
-        for model in model_list:
-            for field in model._meta.fields:
-                if isinstance(field, models.ImageField):
-                    #TODO: take care of date formatted and callable upload_to.
-                    if (not callable(field.upload_to) and
-                            field.upload_to.find("%") == -1):
-                        paths = paths.union((field.upload_to,))
-    paths = list(paths)
-    for path in paths:
-        thumbnail_path = get_thumbnail_path(path)
-        try:
-            file_list = os.listdir(os.path.join(settings.MEDIA_ROOT,
-                                                thumbnail_path))
-        except OSError:
-            continue # Dir doesn't exists, no thumbnails here.
-        for fn in file_list:
-            m = thumb_re.match(fn)
-            if m:
-                # Due to that the naming of thumbnails replaces the dot before
-                # extension with an underscore we have 2 possibilities for the
-                # original filename. If either present we do not delete
-                # suspected thumbnail.
-                # org_fn is the expected original filename w/o extension
-                # org_fn_alt is the expected original filename with extension
-                org_fn = m.group(1)
-                org_fn_exists = os.path.isfile(
-                            os.path.join(settings.MEDIA_ROOT, path, org_fn))
-
-                usc_pos = org_fn.rfind("_")
-                if usc_pos != -1:
-                    org_fn_alt = "%s.%s" % (org_fn[0:usc_pos],
-                                            org_fn[usc_pos+1:])
-                    org_fn_alt_exists = os.path.isfile(
-                        os.path.join(settings.MEDIA_ROOT, path, org_fn_alt))
-                else:
-                    org_fn_alt_exists = False
-                if not org_fn_exists and not org_fn_alt_exists:
-                    del_me = os.path.join(settings.MEDIA_ROOT,
-                                          thumbnail_path, fn)
-                    os.remove(del_me)
-
-
-class Command(NoArgsCommand):
-    help = "Deletes thumbnails that no longer have an original file."
-    requires_model_validation = False
-
-    def handle_noargs(self, **options):
-        clean_up()
diff --git a/apps/sorl/thumbnail/models.py b/apps/sorl/thumbnail/models.py
deleted file mode 100644 (file)
index ec325fd..0000000
+++ /dev/null
@@ -1 +0,0 @@
-# Needs a models.py file so that tests are picked up.
diff --git a/apps/sorl/thumbnail/processors.py b/apps/sorl/thumbnail/processors.py
deleted file mode 100644 (file)
index a6c1741..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-from PIL import Image, ImageFilter, ImageChops
-from sorl.thumbnail import utils
-import re
-
-
-def dynamic_import(names):
-    imported = []
-    for name in names:
-        # Use rfind rather than rsplit for Python 2.3 compatibility.
-        lastdot = name.rfind('.')
-        modname, attrname = name[:lastdot], name[lastdot + 1:]
-        mod = __import__(modname, {}, {}, [''])
-        imported.append(getattr(mod, attrname))
-    return imported
-
-
-def get_valid_options(processors):
-    """
-    Returns a list containing unique valid options from a list of processors
-    in correct order.
-    """
-    valid_options = []
-    for processor in processors:
-        if hasattr(processor, 'valid_options'):
-            valid_options.extend([opt for opt in processor.valid_options
-                                  if opt not in valid_options])
-    return valid_options
-
-
-def colorspace(im, requested_size, opts):
-    if 'bw' in opts and im.mode != "L":
-        im = im.convert("L")
-    elif im.mode not in ("L", "RGB", "RGBA"):
-        im = im.convert("RGB")
-    return im
-colorspace.valid_options = ('bw',)
-
-
-def autocrop(im, requested_size, opts):
-    if 'autocrop' in opts:
-        bw = im.convert("1")
-        bw = bw.filter(ImageFilter.MedianFilter)
-        # white bg
-        bg = Image.new("1", im.size, 255)
-        diff = ImageChops.difference(bw, bg)
-        bbox = diff.getbbox()
-        if bbox:
-            im = im.crop(bbox)
-    return im
-autocrop.valid_options = ('autocrop',)
-
-
-def scale_and_crop(im, requested_size, opts):
-    x, y = [float(v) for v in im.size]
-    xr, yr = [float(v) for v in requested_size]
-
-    if 'crop' in opts or 'max' in opts:
-        r = max(xr / x, yr / y)
-    else:
-        r = min(xr / x, yr / y)
-
-    if r < 1.0 or (r > 1.0 and 'upscale' in opts):
-        im = im.resize((int(x * r), int(y * r)), resample=Image.ANTIALIAS)
-
-    crop = opts.get('crop') or 'crop' in opts
-    if crop:
-        # Difference (for x and y) between new image size and requested size.
-        x, y = [float(v) for v in im.size]
-        dx, dy = (x - min(x, xr)), (y - min(y, yr))
-        if dx or dy:
-            # Center cropping (default).
-            ex, ey = dx / 2, dy / 2
-            box = [ex, ey, x - ex, y - ey]
-            # See if an edge cropping argument was provided.
-            edge_crop = (isinstance(crop, basestring) and
-                           re.match(r'(?:(-?)(\d+))?,(?:(-?)(\d+))?$', crop))
-            if edge_crop and filter(None, edge_crop.groups()):
-                x_right, x_crop, y_bottom, y_crop = edge_crop.groups()
-                if x_crop:
-                    offset = min(x * int(x_crop) / 100, dx)
-                    if x_right:
-                        box[0] = dx - offset
-                        box[2] = x - offset
-                    else:
-                        box[0] = offset
-                        box[2] = x - (dx - offset)
-                if y_crop:
-                    offset = min(y * int(y_crop) / 100, dy)
-                    if y_bottom:
-                        box[1] = dy - offset
-                        box[3] = y - offset
-                    else:
-                        box[1] = offset
-                        box[3] = y - (dy - offset)
-            # See if the image should be "smart cropped".
-            elif crop == 'smart':
-                left = top = 0
-                right, bottom = x, y
-                while dx:
-                    slice = min(dx, 10)
-                    l_sl = im.crop((0, 0, slice, y))
-                    r_sl = im.crop((x - slice, 0, x, y))
-                    if utils.image_entropy(l_sl) >= utils.image_entropy(r_sl):
-                        right -= slice
-                    else:
-                        left += slice
-                    dx -= slice
-                while dy:
-                    slice = min(dy, 10)
-                    t_sl = im.crop((0, 0, x, slice))
-                    b_sl = im.crop((0, y - slice, x, y))
-                    if utils.image_entropy(t_sl) >= utils.image_entropy(b_sl):
-                        bottom -= slice
-                    else:
-                        top += slice
-                    dy -= slice
-                box = (left, top, right, bottom)
-            # Finally, crop the image!
-            im = im.crop([int(v) for v in box])
-    return im
-scale_and_crop.valid_options = ('crop', 'upscale', 'max')
-
-
-def filters(im, requested_size, opts):
-    if 'detail' in opts:
-        im = im.filter(ImageFilter.DETAIL)
-    if 'sharpen' in opts:
-        im = im.filter(ImageFilter.SHARPEN)
-    return im
-filters.valid_options = ('detail', 'sharpen')
diff --git a/apps/sorl/thumbnail/templatetags/thumbnail.py b/apps/sorl/thumbnail/templatetags/thumbnail.py
deleted file mode 100755 (executable)
index e7c2177..0000000
+++ /dev/null
@@ -1,251 +0,0 @@
-import re
-import math
-from django.template import Library, Node, VariableDoesNotExist, \
-    TemplateSyntaxError
-from sorl.thumbnail.main import DjangoThumbnail, get_thumbnail_setting
-from sorl.thumbnail.processors import dynamic_import, get_valid_options
-from sorl.thumbnail.utils import split_args
-
-register = Library()
-
-size_pat = re.compile(r'(\d+)x(\d+)$')
-
-filesize_formats = ['k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
-filesize_long_formats = {
-    'k': 'kilo', 'M': 'mega', 'G': 'giga', 'T': 'tera', 'P': 'peta',
-    'E': 'exa', 'Z': 'zetta', 'Y': 'yotta',
-}
-
-try:
-    PROCESSORS = dynamic_import(get_thumbnail_setting('PROCESSORS'))
-    VALID_OPTIONS = get_valid_options(PROCESSORS)
-except:
-    if get_thumbnail_setting('DEBUG'):
-        raise
-    else:
-        PROCESSORS = []
-        VALID_OPTIONS = []
-TAG_SETTINGS = ['quality']
-
-
-class ThumbnailNode(Node):
-    def __init__(self, source_var, size_var, opts=None,
-                 context_name=None, **kwargs):
-        self.source_var = source_var
-        self.size_var = size_var
-        self.opts = opts
-        self.context_name = context_name
-        self.kwargs = kwargs
-
-    def render(self, context):
-        # Note that this isn't a global constant because we need to change the
-        # value for tests.
-        DEBUG = get_thumbnail_setting('DEBUG')
-        try:
-            # A file object will be allowed in DjangoThumbnail class
-            relative_source = self.source_var.resolve(context)
-        except VariableDoesNotExist:
-            if DEBUG:
-                raise VariableDoesNotExist("Variable '%s' does not exist." %
-                        self.source_var)
-            else:
-                relative_source = None
-        try:
-            requested_size = self.size_var.resolve(context)
-        except VariableDoesNotExist:
-            if DEBUG:
-                raise TemplateSyntaxError("Size argument '%s' is not a"
-                        " valid size nor a valid variable." % self.size_var)
-            else:
-                requested_size = None
-        # Size variable can be either a tuple/list of two integers or a valid
-        # string, only the string is checked.
-        else:
-            if isinstance(requested_size, basestring):
-                m = size_pat.match(requested_size)
-                if m:
-                    requested_size = (int(m.group(1)), int(m.group(2)))
-                elif DEBUG:
-                    raise TemplateSyntaxError("Variable '%s' was resolved but "
-                            "'%s' is not a valid size." %
-                            (self.size_var, requested_size))
-                else:
-                    requested_size = None
-        if relative_source is None or requested_size is None:
-            thumbnail = ''
-        else:
-            try:
-                kwargs = {}
-                for key, value in self.kwargs.items():
-                    kwargs[key] = value.resolve(context)
-                opts = dict([(k, v and v.resolve(context))
-                             for k, v in self.opts.items()])
-                thumbnail = DjangoThumbnail(relative_source, requested_size,
-                                opts=opts, processors=PROCESSORS, **kwargs)
-            except:
-                if DEBUG:
-                    raise
-                else:
-                    thumbnail = ''
-        # Return the thumbnail class, or put it on the context
-        if self.context_name is None:
-            return thumbnail
-        # We need to get here so we don't have old values in the context
-        # variable.
-        context[self.context_name] = thumbnail
-        return ''
-
-
-def thumbnail(parser, token):
-    """
-    Creates a thumbnail of for an ImageField.
-
-    To just output the absolute url to the thumbnail::
-
-        {% thumbnail image 80x80 %}
-
-    After the image path and dimensions, you can put any options::
-
-        {% thumbnail image 80x80 quality=95 crop %}
-
-    To put the DjangoThumbnail class on the context instead of just rendering
-    the absolute url, finish the tag with ``as [context_var_name]``::
-
-        {% thumbnail image 80x80 as thumb %}
-        {{ thumb.width }} x {{ thumb.height }}
-    """
-    args = token.split_contents()
-    tag = args[0]
-    # Check to see if we're setting to a context variable.
-    if len(args) > 4 and args[-2] == 'as':
-        context_name = args[-1]
-        args = args[:-2]
-    else:
-        context_name = None
-
-    if len(args) < 3:
-        raise TemplateSyntaxError("Invalid syntax. Expected "
-            "'{%% %s source size [option1 option2 ...] %%}' or "
-            "'{%% %s source size [option1 option2 ...] as variable %%}'" %
-            (tag, tag))
-
-    # Get the source image path and requested size.
-    source_var = parser.compile_filter(args[1])
-    # If the size argument was a correct static format, wrap it in quotes so
-    # that it is compiled correctly.
-    m = size_pat.match(args[2])
-    if m:
-        args[2] = '"%s"' % args[2]
-    size_var = parser.compile_filter(args[2])
-
-    # Get the options.
-    args_list = split_args(args[3:]).items()
-
-    # Check the options.
-    opts = {}
-    kwargs = {} # key,values here override settings and defaults
-
-    for arg, value in args_list:
-        value = value and parser.compile_filter(value)
-        if arg in TAG_SETTINGS and value is not None:
-            kwargs[str(arg)] = value
-            continue
-        if arg in VALID_OPTIONS:
-            opts[arg] = value
-        else:
-            raise TemplateSyntaxError("'%s' tag received a bad argument: "
-                                      "'%s'" % (tag, arg))
-    return ThumbnailNode(source_var, size_var, opts=opts,
-                         context_name=context_name, **kwargs)
-
-
-def filesize(bytes, format='auto1024'):
-    """
-    Returns the number of bytes in either the nearest unit or a specific unit
-    (depending on the chosen format method).
-
-    Acceptable formats are:
-
-    auto1024, auto1000
-      convert to the nearest unit, appending the abbreviated unit name to the
-      string (e.g. '2 KiB' or '2 kB').
-      auto1024 is the default format.
-    auto1024long, auto1000long
-      convert to the nearest multiple of 1024 or 1000, appending the correctly
-      pluralized unit name to the string (e.g. '2 kibibytes' or '2 kilobytes').
-    kB, MB, GB, TB, PB, EB, ZB or YB
-      convert to the exact unit (using multiples of 1000).
-    KiB, MiB, GiB, TiB, PiB, EiB, ZiB or YiB
-      convert to the exact unit (using multiples of 1024).
-
-    The auto1024 and auto1000 formats return a string, appending the correct
-    unit to the value. All other formats return the floating point value.
-
-    If an invalid format is specified, the bytes are returned unchanged.
-    """
-    format_len = len(format)
-    # Check for valid format
-    if format_len in (2, 3):
-        if format_len == 3 and format[0] == 'K':
-            format = 'k%s' % format[1:]
-        if not format[-1] == 'B' or format[0] not in filesize_formats:
-            return bytes
-        if format_len == 3 and format[1] != 'i':
-            return bytes
-    elif format not in ('auto1024', 'auto1000',
-                        'auto1024long', 'auto1000long'):
-        return bytes
-    # Check for valid bytes
-    try:
-        bytes = long(bytes)
-    except (ValueError, TypeError):
-        return bytes
-
-    # Auto multiple of 1000 or 1024
-    if format.startswith('auto'):
-        if format[4:8] == '1000':
-            base = 1000
-        else:
-            base = 1024
-        logarithm = bytes and math.log(bytes, base) or 0
-        index = min(int(logarithm) - 1, len(filesize_formats) - 1)
-        if index >= 0:
-            if base == 1000:
-                bytes = bytes and bytes / math.pow(1000, index + 1)
-            else:
-                bytes = bytes >> (10 * (index))
-                bytes = bytes and bytes / 1024.0
-            unit = filesize_formats[index]
-        else:
-            # Change the base to 1000 so the unit will just output 'B' not 'iB'
-            base = 1000
-            unit = ''
-        if bytes >= 10 or ('%.1f' % bytes).endswith('.0'):
-            bytes = '%.0f' % bytes
-        else:
-            bytes = '%.1f' % bytes
-        if format.endswith('long'):
-            unit = filesize_long_formats.get(unit, '')
-            if base == 1024 and unit:
-                unit = '%sbi' % unit[:2]
-            unit = '%sbyte%s' % (unit, bytes != '1' and 's' or '')
-        else:
-            unit = '%s%s' % (base == 1024 and unit.upper() or unit,
-                             base == 1024 and 'iB' or 'B')
-
-        return '%s %s' % (bytes, unit)
-
-    if bytes == 0:
-        return bytes
-    base = filesize_formats.index(format[0]) + 1
-    # Exact multiple of 1000
-    if format_len == 2:
-        return bytes / (1000.0 ** base)
-    # Exact multiple of 1024
-    elif format_len == 3:
-        bytes = bytes >> (10 * (base - 1))
-        return bytes / 1024.0
-
-
-register.tag(thumbnail)
-register.filter(filesize)
diff --git a/apps/sorl/thumbnail/tests/__init__.py b/apps/sorl/thumbnail/tests/__init__.py
deleted file mode 100644 (file)
index 98f1cbd..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-# For these tests to run successfully, two conditions must be met:
-# 1. MEDIA_URL and MEDIA_ROOT must be set in settings
-# 2. The user running the tests must have read/write access to MEDIA_ROOT
-
-# Unit tests:
-from sorl.thumbnail.tests.classes import ThumbnailTest, DjangoThumbnailTest
-from sorl.thumbnail.tests.templatetags import ThumbnailTagTest
-from sorl.thumbnail.tests.fields import FieldTest, \
-    ImageWithThumbnailsFieldTest, ThumbnailFieldTest
-# Doc tests:
-from sorl.thumbnail.tests.utils import utils_tests
-from sorl.thumbnail.tests.templatetags import filesize_tests
-__test__ = {
-    'utils_tests': utils_tests,
-    'filesize_tests': filesize_tests,
-}
diff --git a/apps/sorl/thumbnail/tests/base.py b/apps/sorl/thumbnail/tests/base.py
deleted file mode 100644 (file)
index 44a2fa2..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-import unittest
-import os
-from PIL import Image
-from django.conf import settings
-from sorl.thumbnail.base import Thumbnail
-
-try:
-    set
-except NameError:
-    from sets import Set as set     # For Python 2.3
-
-
-def get_default_settings():
-    from sorl.thumbnail import defaults
-    def_settings = {}
-    for key in dir(defaults):
-        if key == key.upper() and key not in ['WVPS', 'CONVERT']:
-            def_settings[key] = getattr(defaults, key)
-    return def_settings
-
-
-DEFAULT_THUMBNAIL_SETTINGS = get_default_settings()
-RELATIVE_PIC_NAME = "sorl-thumbnail-test_source.jpg"
-PIC_NAME = os.path.join(settings.MEDIA_ROOT, RELATIVE_PIC_NAME)
-THUMB_NAME = os.path.join(settings.MEDIA_ROOT, "sorl-thumbnail-test_%02d.jpg")
-PIC_SIZE = (800, 600)
-
-
-class ChangeSettings:
-    def __init__(self):
-        self.default_settings = DEFAULT_THUMBNAIL_SETTINGS.copy()
-
-    def change(self, override=None):
-        if override is not None:
-            self.default_settings.update(override)
-        for setting, default in self.default_settings.items():
-            settings_s = 'THUMBNAIL_%s' % setting
-            self_s = 'original_%s' % setting
-            if hasattr(settings, settings_s) and not hasattr(self, self_s):
-                setattr(self, self_s, getattr(settings, settings_s))
-            if hasattr(settings, settings_s) or \
-               default != DEFAULT_THUMBNAIL_SETTINGS[setting]:
-                setattr(settings, settings_s, default)
-
-    def revert(self):
-        for setting in self.default_settings:
-            settings_s = 'THUMBNAIL_%s' % setting
-            self_s = 'original_%s' % setting
-            if hasattr(self, self_s):
-                setattr(settings, settings_s, getattr(self, self_s))
-                delattr(self, self_s)
-
-
-class BaseTest(unittest.TestCase):
-    def setUp(self):
-        self.images_to_delete = set()
-        # Create the test image
-        Image.new('RGB', PIC_SIZE).save(PIC_NAME, 'JPEG')
-        self.images_to_delete.add(PIC_NAME)
-        # Change settings so we know they will be constant
-        self.change_settings = ChangeSettings()
-        self.change_settings.change()
-
-    def verify_thumbnail(self, expected_size, thumbnail=None,
-                         expected_filename=None, expected_mode=None):
-        assert thumbnail is not None or expected_filename is not None, \
-            'verify_thumbnail should be passed at least a thumbnail or an' \
-            'expected filename.'
-
-        if thumbnail is not None:
-            # Verify that the templatetag method returned a Thumbnail instance
-            self.assertTrue(isinstance(thumbnail, Thumbnail))
-            thumb_name = thumbnail.dest
-        else:
-            thumb_name = expected_filename
-
-        if isinstance(thumb_name, basestring):
-            # Verify that the thumbnail file exists
-            self.assert_(os.path.isfile(thumb_name),
-                         'Thumbnail file not found')
-
-            # Remember to delete the file
-            self.images_to_delete.add(thumb_name)
-
-            # If we got an expected_filename, check that it is right
-            if expected_filename is not None and thumbnail is not None:
-                self.assertEqual(thumbnail.dest, expected_filename)
-
-        image = Image.open(thumb_name)
-
-        # Verify the thumbnail has the expected dimensions
-        self.assertEqual(image.size, expected_size)
-
-        if expected_mode is not None:
-            self.assertEqual(image.mode, expected_mode)
-
-    def tearDown(self):
-        # Remove all the files that have been created
-        for image in self.images_to_delete:
-            try:
-                os.remove(image)
-            except:
-                pass
-        # Change settings back to original
-        self.change_settings.revert()
diff --git a/apps/sorl/thumbnail/tests/classes.py b/apps/sorl/thumbnail/tests/classes.py
deleted file mode 100644 (file)
index d15dd19..0000000
+++ /dev/null
@@ -1,175 +0,0 @@
-# -*- coding: utf-8 -*-
-import os
-import time
-from StringIO import StringIO
-
-from PIL import Image
-from django.conf import settings
-
-from sorl.thumbnail.base import Thumbnail
-from sorl.thumbnail.main import DjangoThumbnail, get_thumbnail_setting
-from sorl.thumbnail.processors import dynamic_import, get_valid_options
-from sorl.thumbnail.tests.base import BaseTest, RELATIVE_PIC_NAME, PIC_NAME,\
-    THUMB_NAME, PIC_SIZE
-
-
-class ThumbnailTest(BaseTest):
-    def testThumbnails(self):
-        # Thumbnail
-        thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 1,
-                          requested_size=(240, 240))
-        self.verify_thumbnail((240, 180), thumb)
-
-        # Cropped thumbnail
-        thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 2,
-                          requested_size=(240, 240), opts=['crop'])
-        self.verify_thumbnail((240, 240), thumb)
-
-        # Thumbnail with altered JPEG quality
-        thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 3,
-                          requested_size=(240, 240), quality=95)
-        self.verify_thumbnail((240, 180), thumb)
-
-    def testRegeneration(self):
-        # Create thumbnail
-        thumb_name = THUMB_NAME % 4
-        thumb_size = (240, 240)
-        Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size)
-        self.images_to_delete.add(thumb_name)
-        thumb_mtime = os.path.getmtime(thumb_name)
-        time.sleep(1)
-
-        # Create another instance, shouldn't generate a new thumb
-        Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size)
-        self.assertEqual(os.path.getmtime(thumb_name), thumb_mtime)
-
-        # Recreate the source image, then see if a new thumb is generated
-        Image.new('RGB', PIC_SIZE).save(PIC_NAME, 'JPEG')
-        Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size)
-        self.assertNotEqual(os.path.getmtime(thumb_name), thumb_mtime)
-
-    def testFilelikeDest(self):
-        # Thumbnail
-        filelike_dest = StringIO()
-        thumb = Thumbnail(source=PIC_NAME, dest=filelike_dest,
-                          requested_size=(240, 240))
-        self.verify_thumbnail((240, 180), thumb)
-
-    def testRGBA(self):
-        # RGBA image
-        rgba_pic_name = os.path.join(settings.MEDIA_ROOT,
-                                     'sorl-thumbnail-test_rgba_source.png')
-        Image.new('RGBA', PIC_SIZE).save(rgba_pic_name)
-        self.images_to_delete.add(rgba_pic_name)
-        # Create thumb and verify it's still RGBA
-        rgba_thumb_name = os.path.join(settings.MEDIA_ROOT,
-                                       'sorl-thumbnail-test_rgba_dest.png')
-        thumb = Thumbnail(source=rgba_pic_name, dest=rgba_thumb_name,
-                          requested_size=(240, 240))
-        self.verify_thumbnail((240, 180), thumb, expected_mode='RGBA')
-
-
-class DjangoThumbnailTest(BaseTest):
-    def setUp(self):
-        super(DjangoThumbnailTest, self).setUp()
-        # Add another source image in a sub-directory for testing subdir and
-        # basedir.
-        self.sub_dir = os.path.join(settings.MEDIA_ROOT, 'test_thumbnail')
-        try:
-            os.mkdir(self.sub_dir)
-        except OSError:
-            pass
-        self.pic_subdir = os.path.join(self.sub_dir, RELATIVE_PIC_NAME)
-        Image.new('RGB', PIC_SIZE).save(self.pic_subdir, 'JPEG')
-        self.images_to_delete.add(self.pic_subdir)
-
-    def testFilenameGeneration(self):
-        basename = RELATIVE_PIC_NAME.replace('.', '_')
-        # Basic filename
-        thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
-                                requested_size=(240, 120))
-        expected = os.path.join(settings.MEDIA_ROOT, basename)
-        expected += '_240x120_q85.jpg'
-        self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
-
-        # Changed quality and cropped
-        thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
-                                requested_size=(240, 120), opts=['crop'],
-                                quality=95)
-        expected = os.path.join(settings.MEDIA_ROOT, basename)
-        expected += '_240x120_crop_q95.jpg'
-        self.verify_thumbnail((240, 120), thumb, expected_filename=expected)
-
-        # All options on
-        processors = dynamic_import(get_thumbnail_setting('PROCESSORS'))
-        valid_options = get_valid_options(processors)
-
-        thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
-                                requested_size=(240, 120), opts=valid_options)
-        expected = (os.path.join(settings.MEDIA_ROOT, basename) + '_240x120_'
-                    'autocrop_bw_crop_detail_max_sharpen_upscale_q85.jpg')
-        self.verify_thumbnail((240, 120), thumb, expected_filename=expected)
-
-        # Different basedir
-        basedir = 'sorl-thumbnail-test-basedir'
-        self.change_settings.change({'BASEDIR': basedir})
-        thumb = DjangoThumbnail(relative_source=self.pic_subdir,
-                                requested_size=(240, 120))
-        expected = os.path.join(basedir, self.sub_dir, basename)
-        expected += '_240x120_q85.jpg'
-        self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
-        # Different subdir
-        self.change_settings.change({'BASEDIR': '', 'SUBDIR': 'subdir'})
-        thumb = DjangoThumbnail(relative_source=self.pic_subdir,
-                                requested_size=(240, 120))
-        expected = os.path.join(settings.MEDIA_ROOT,
-                                os.path.basename(self.sub_dir), 'subdir',
-                                basename)
-        expected += '_240x120_q85.jpg'
-        self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
-        # Different prefix
-        self.change_settings.change({'SUBDIR': '', 'PREFIX': 'prefix-'})
-        thumb = DjangoThumbnail(relative_source=self.pic_subdir,
-                                requested_size=(240, 120))
-        expected = os.path.join(self.sub_dir, 'prefix-' + basename)
-        expected += '_240x120_q85.jpg'
-        self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
-
-    def testAlternateExtension(self):
-        basename = RELATIVE_PIC_NAME.replace('.', '_')
-        # Control JPG
-        thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
-                                requested_size=(240, 120))
-        expected = os.path.join(settings.MEDIA_ROOT, basename)
-        expected += '_240x120_q85.jpg'
-        expected_jpg = expected
-        self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
-        # Test PNG
-        thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
-                                requested_size=(240, 120), extension='png')
-        expected = os.path.join(settings.MEDIA_ROOT, basename)
-        expected += '_240x120_q85.png'
-        self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
-        # Compare the file size to make sure it's not just saving as a JPG with
-        # a different extension.
-        self.assertNotEqual(os.path.getsize(expected_jpg),
-                            os.path.getsize(expected))
-
-    def testUnicodeName(self):
-        unicode_name = 'sorl-thumbnail-ążśź_source.jpg'
-        unicode_path = os.path.join(settings.MEDIA_ROOT, unicode_name)
-        Image.new('RGB', PIC_SIZE).save(unicode_path)
-        self.images_to_delete.add(unicode_path)
-        thumb = DjangoThumbnail(relative_source=unicode_name,
-                                requested_size=(240, 120))
-        base_name = unicode_name.replace('.', '_')
-        expected = os.path.join(settings.MEDIA_ROOT,
-                                base_name + '_240x120_q85.jpg')
-        self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
-
-    def tearDown(self):
-        super(DjangoThumbnailTest, self).tearDown()
-        subdir = os.path.join(self.sub_dir, 'subdir')
-        if os.path.exists(subdir):
-            os.rmdir(subdir)
-        os.rmdir(self.sub_dir)
diff --git a/apps/sorl/thumbnail/tests/fields.py b/apps/sorl/thumbnail/tests/fields.py
deleted file mode 100644 (file)
index 425f555..0000000
+++ /dev/null
@@ -1,131 +0,0 @@
-import os.path
-
-from django.db import models
-from django.conf import settings
-from django.core.files.base import ContentFile
-
-from sorl.thumbnail.fields import ImageWithThumbnailsField, ThumbnailField
-from sorl.thumbnail.tests.base import BaseTest, RELATIVE_PIC_NAME, PIC_NAME
-
-thumbnail = {
-    'size': (50, 50),
-}
-extra_thumbnails = {
-    'admin': {
-        'size': (30, 30),
-        'options': ('crop',),
-    }
-}
-extension_thumbnail = thumbnail.copy()
-extension_thumbnail['extension'] = 'png'
-
-
-# Temporary models for field_tests
-class TestThumbnailFieldModel(models.Model):
-    avatar = ThumbnailField(upload_to='test', size=(300, 300))
-    photo = ImageWithThumbnailsField(upload_to='test', thumbnail=thumbnail,
-                                     extra_thumbnails=extra_thumbnails)
-
-
-class TestThumbnailFieldExtensionModel(models.Model):
-    photo = ImageWithThumbnailsField(upload_to='test',
-                                     thumbnail=extension_thumbnail,
-                                     extra_thumbnails=extra_thumbnails)
-
-
-class TestThumbnailFieldGenerateModel(models.Model):
-    photo = ImageWithThumbnailsField(upload_to='test', thumbnail=thumbnail,
-                                     extra_thumbnails=extra_thumbnails,
-                                     generate_on_save=True)
-
-
-class FieldTest(BaseTest):
-    """
-    Test the base field functionality. These use an ImageWithThumbnailsField
-    but all the functionality tested is from BaseThumbnailField.
-    """
-    def test_extra_thumbnails(self):
-        model = TestThumbnailFieldModel(photo=RELATIVE_PIC_NAME)
-        self.assertTrue('admin' in model.photo.extra_thumbnails)
-        thumb = model.photo.extra_thumbnails['admin']
-        tag = model.photo.extra_thumbnails_tag['admin']
-        expected_filename = os.path.join(settings.MEDIA_ROOT,
-            'sorl-thumbnail-test_source_jpg_30x30_crop_q85.jpg')
-        self.verify_thumbnail((30, 30), thumb, expected_filename)
-        expected_tag = '<img src="%s" width="30" height="30" alt="" />' % \
-            '/'.join((settings.MEDIA_URL.rstrip('/'),
-                      'sorl-thumbnail-test_source_jpg_30x30_crop_q85.jpg'))
-        self.assertEqual(tag, expected_tag)
-
-    def test_extension(self):
-        model = TestThumbnailFieldExtensionModel(photo=RELATIVE_PIC_NAME)
-        thumb = model.photo.thumbnail
-        tag = model.photo.thumbnail_tag
-        expected_filename = os.path.join(settings.MEDIA_ROOT,
-            'sorl-thumbnail-test_source_jpg_50x50_q85.png')
-        self.verify_thumbnail((50, 37), thumb, expected_filename)
-        expected_tag = '<img src="%s" width="50" height="37" alt="" />' % \
-            '/'.join((settings.MEDIA_URL.rstrip('/'),
-                      'sorl-thumbnail-test_source_jpg_50x50_q85.png'))
-        self.assertEqual(tag, expected_tag)
-
-    def test_delete_thumbnails(self):
-        model = TestThumbnailFieldModel(photo=RELATIVE_PIC_NAME)
-        thumb_file = model.photo.thumbnail.dest
-        open(thumb_file, 'wb').close()
-        self.assert_(os.path.exists(thumb_file))
-        model.photo.delete(save=False)
-        self.assertFalse(os.path.exists(thumb_file))
-
-    def test_generate_on_save(self):
-        main_thumb = os.path.join(settings.MEDIA_ROOT, 'test',
-                        'sorl-thumbnail-test_source_jpg_50x50_q85.jpg')
-        admin_thumb = os.path.join(settings.MEDIA_ROOT, 'test',
-                        'sorl-thumbnail-test_source_jpg_30x30_crop_q85.jpg')
-        self.images_to_delete.add(main_thumb)
-        self.images_to_delete.add(admin_thumb)
-        # Default setting is to only generate when the thumbnail is used.
-        model = TestThumbnailFieldModel()
-        source = ContentFile(open(PIC_NAME).read())
-        model.photo.save(RELATIVE_PIC_NAME, source, save=False)
-        self.images_to_delete.add(model.photo.path)
-        self.assertFalse(os.path.exists(main_thumb))
-        self.assertFalse(os.path.exists(admin_thumb))
-        os.remove(model.photo.path)
-        # But it's easy to set it up the other way...
-        model = TestThumbnailFieldGenerateModel()
-        source = ContentFile(open(PIC_NAME).read())
-        model.photo.save(RELATIVE_PIC_NAME, source, save=False)
-        self.assert_(os.path.exists(main_thumb))
-        self.assert_(os.path.exists(admin_thumb))
-
-
-class ImageWithThumbnailsFieldTest(BaseTest):
-    def test_thumbnail(self):
-        model = TestThumbnailFieldModel(photo=RELATIVE_PIC_NAME)
-        thumb = model.photo.thumbnail
-        tag = model.photo.thumbnail_tag
-        base_name = RELATIVE_PIC_NAME.replace('.', '_')
-        expected_filename = os.path.join(settings.MEDIA_ROOT,
-                                         '%s_50x50_q85.jpg' % base_name)
-        self.verify_thumbnail((50, 37), thumb, expected_filename)
-        expected_tag = ('<img src="%s" width="50" height="37" alt="" />' %
-                        '/'.join([settings.MEDIA_URL.rstrip('/'),
-                                  '%s_50x50_q85.jpg' % base_name]))
-        self.assertEqual(tag, expected_tag)
-
-
-class ThumbnailFieldTest(BaseTest):
-    def test_thumbnail(self):
-        model = TestThumbnailFieldModel()
-        source = ContentFile(open(PIC_NAME).read())
-        dest_name = 'sorl-thumbnail-test_dest.jpg'
-        model.avatar.save(dest_name, source, save=False)
-        expected_filename = os.path.join(model.avatar.path)
-        self.verify_thumbnail((300, 225), expected_filename=expected_filename)
-
-        tag = model.avatar.thumbnail_tag
-        expected_tag = ('<img src="%s" width="300" height="225" alt="" />' %
-                        '/'.join([settings.MEDIA_URL.rstrip('/'), 'test',
-                                  dest_name]))
-        self.assertEqual(tag, expected_tag)
diff --git a/apps/sorl/thumbnail/tests/templatetags.py b/apps/sorl/thumbnail/tests/templatetags.py
deleted file mode 100644 (file)
index 5d1a1cb..0000000
+++ /dev/null
@@ -1,312 +0,0 @@
-import os
-from django.conf import settings
-from django.template import Template, Context, TemplateSyntaxError
-from sorl.thumbnail.tests.classes import BaseTest, RELATIVE_PIC_NAME
-
-
-class ThumbnailTagTest(BaseTest):
-    def render_template(self, source):
-        context = Context({
-            'source': RELATIVE_PIC_NAME,
-            'invalid_source': 'not%s' % RELATIVE_PIC_NAME,
-            'size': (90, 100),
-            'invalid_size': (90, 'fish'),
-            'strsize': '80x90',
-            'invalid_strsize': ('1notasize2'),
-            'invalid_q': 'notanumber'})
-        source = '{% load thumbnail %}' + source
-        return Template(source).render(context)
-
-    def testTagInvalid(self):
-        # No args, or wrong number of args
-        src = '{% thumbnail %}'
-        self.assertRaises(TemplateSyntaxError, self.render_template, src)
-        src = '{% thumbnail source %}'
-        self.assertRaises(TemplateSyntaxError, self.render_template, src)
-        src = '{% thumbnail source 80x80 as variable crop %}'
-        self.assertRaises(TemplateSyntaxError, self.render_template, src)
-
-        # Invalid option
-        src = '{% thumbnail source 240x200 invalid %}'
-        self.assertRaises(TemplateSyntaxError, self.render_template, src)
-
-        # Old comma separated options format can only have an = for quality
-        src = '{% thumbnail source 80x80 crop=1,quality=1 %}'
-        self.assertRaises(TemplateSyntaxError, self.render_template, src)
-
-        # Invalid quality
-        src_invalid = '{% thumbnail source 240x200 quality=invalid_q %}'
-        src_missing = '{% thumbnail source 240x200 quality=missing_q %}'
-        # ...with THUMBNAIL_DEBUG = False
-        self.assertEqual(self.render_template(src_invalid), '')
-        self.assertEqual(self.render_template(src_missing), '')
-        # ...and with THUMBNAIL_DEBUG = True
-        self.change_settings.change({'DEBUG': True})
-        self.assertRaises(TemplateSyntaxError, self.render_template,
-                          src_invalid)
-        self.assertRaises(TemplateSyntaxError, self.render_template,
-                          src_missing)
-
-        # Invalid source
-        src = '{% thumbnail invalid_source 80x80 %}'
-        src_on_context = '{% thumbnail invalid_source 80x80 as thumb %}'
-        # ...with THUMBNAIL_DEBUG = False
-        self.change_settings.change({'DEBUG': False})
-        self.assertEqual(self.render_template(src), '')
-        # ...and with THUMBNAIL_DEBUG = True
-        self.change_settings.change({'DEBUG': True})
-        self.assertRaises(TemplateSyntaxError, self.render_template, src)
-        self.assertRaises(TemplateSyntaxError, self.render_template,
-                          src_on_context)
-
-        # Non-existant source
-        src = '{% thumbnail non_existant_source 80x80 %}'
-        src_on_context = '{% thumbnail non_existant_source 80x80 as thumb %}'
-        # ...with THUMBNAIL_DEBUG = False
-        self.change_settings.change({'DEBUG': False})
-        self.assertEqual(self.render_template(src), '')
-        # ...and with THUMBNAIL_DEBUG = True
-        self.change_settings.change({'DEBUG': True})
-        self.assertRaises(TemplateSyntaxError, self.render_template, src)
-
-        # Invalid size as a tuple:
-        src = '{% thumbnail source invalid_size %}'
-        # ...with THUMBNAIL_DEBUG = False
-        self.change_settings.change({'DEBUG': False})
-        self.assertEqual(self.render_template(src), '')
-        # ...and THUMBNAIL_DEBUG = True
-        self.change_settings.change({'DEBUG': True})
-        self.assertRaises(TemplateSyntaxError, self.render_template, src)
-        # Invalid size as a string:
-        src = '{% thumbnail source invalid_strsize %}'
-        # ...with THUMBNAIL_DEBUG = False
-        self.change_settings.change({'DEBUG': False})
-        self.assertEqual(self.render_template(src), '')
-        # ...and THUMBNAIL_DEBUG = True
-        self.change_settings.change({'DEBUG': True})
-        self.assertRaises(TemplateSyntaxError, self.render_template, src)
-
-        # Non-existant size
-        src = '{% thumbnail source non_existant_size %}'
-        # ...with THUMBNAIL_DEBUG = False
-        self.change_settings.change({'DEBUG': False})
-        self.assertEqual(self.render_template(src), '')
-        # ...and THUMBNAIL_DEBUG = True
-        self.change_settings.change({'DEBUG': True})
-        self.assertRaises(TemplateSyntaxError, self.render_template, src)
-
-    def testTag(self):
-        expected_base = RELATIVE_PIC_NAME.replace('.', '_')
-        # Set DEBUG = True to make it easier to trace any failures
-        self.change_settings.change({'DEBUG': True})
-
-        # Basic
-        output = self.render_template('src="'
-            '{% thumbnail source 240x240 %}"')
-        expected = '%s_240x240_q85.jpg' % expected_base
-        expected_fn = os.path.join(settings.MEDIA_ROOT, expected)
-        self.verify_thumbnail((240, 180), expected_filename=expected_fn)
-        expected_url = ''.join((settings.MEDIA_URL, expected))
-        self.assertEqual(output, 'src="%s"' % expected_url)
-
-        # Size from context variable
-        # as a tuple:
-        output = self.render_template('src="'
-            '{% thumbnail source size %}"')
-        expected = '%s_90x100_q85.jpg' % expected_base
-        expected_fn = os.path.join(settings.MEDIA_ROOT, expected)
-        self.verify_thumbnail((90, 67), expected_filename=expected_fn)
-        expected_url = ''.join((settings.MEDIA_URL, expected))
-        self.assertEqual(output, 'src="%s"' % expected_url)
-        # as a string:
-        output = self.render_template('src="'
-            '{% thumbnail source strsize %}"')
-        expected = '%s_80x90_q85.jpg' % expected_base
-        expected_fn = os.path.join(settings.MEDIA_ROOT, expected)
-        self.verify_thumbnail((80, 60), expected_filename=expected_fn)
-        expected_url = ''.join((settings.MEDIA_URL, expected))
-        self.assertEqual(output, 'src="%s"' % expected_url)
-
-        # On context
-        output = self.render_template('height:'
-            '{% thumbnail source 240x240 as thumb %}{{ thumb.height }}')
-        self.assertEqual(output, 'height:180')
-
-        # With options and quality
-        output = self.render_template('src="'
-            '{% thumbnail source 240x240 sharpen crop quality=95 %}"')
-        # Note that the opts are sorted to ensure a consistent filename.
-        expected = '%s_240x240_crop_sharpen_q95.jpg' % expected_base
-        expected_fn = os.path.join(settings.MEDIA_ROOT, expected)
-        self.verify_thumbnail((240, 240), expected_filename=expected_fn)
-        expected_url = ''.join((settings.MEDIA_URL, expected))
-        self.assertEqual(output, 'src="%s"' % expected_url)
-
-        # With option and quality on context (also using its unicode method to
-        # display the url)
-        output = self.render_template(
-            '{% thumbnail source 240x240 sharpen crop quality=95 as thumb %}'
-            'width:{{ thumb.width }}, url:{{ thumb }}')
-        self.assertEqual(output, 'width:240, url:%s' % expected_url)
-
-        # Old comma separated format for options is still supported.
-        output = self.render_template(
-            '{% thumbnail source 240x240 sharpen,crop,quality=95 as thumb %}'
-            'width:{{ thumb.width }}, url:{{ thumb }}')
-        self.assertEqual(output, 'width:240, url:%s' % expected_url)
-
-filesize_tests = r"""
->>> from sorl.thumbnail.templatetags.thumbnail import filesize
-
->>> filesize('abc')
-'abc'
->>> filesize(100, 'invalid')
-100
-
->>> bytes = 20
->>> filesize(bytes)
-'20 B'
->>> filesize(bytes, 'auto1000')
-'20 B'
-
->>> bytes = 1001
->>> filesize(bytes)
-'1001 B'
->>> filesize(bytes, 'auto1000')
-'1 kB'
-
->>> bytes = 10100
->>> filesize(bytes)
-'9.9 KiB'
-
-# Note that the decimal place is only used if < 10
->>> filesize(bytes, 'auto1000')
-'10 kB'
-
->>> bytes = 190000000
->>> filesize(bytes)
-'181 MiB'
->>> filesize(bytes, 'auto1000')
-'190 MB'
-
-# 'auto*long' methods use pluralisation:
->>> filesize(1, 'auto1024long')
-'1 byte'
->>> filesize(1, 'auto1000long')
-'1 byte'
->>> filesize(2, 'auto1024long')
-'2 bytes'
->>> filesize(0, 'auto1000long')
-'0 bytes'
-
-# Test all 'auto*long' output:
->>> for i in range(1,10):
-...     print '%s, %s' % (filesize(1024**i, 'auto1024long'),
-...                       filesize(1000**i, 'auto1000long'))
-1 kibibyte, 1 kilobyte
-1 mebibyte, 1 megabyte
-1 gibibyte, 1 gigabyte
-1 tebibyte, 1 terabyte
-1 pebibyte, 1 petabyte
-1 exbibyte, 1 exabyte
-1 zebibyte, 1 zettabyte
-1 yobibyte, 1 yottabyte
-1024 yobibytes, 1000 yottabytes
-
-# Test all fixed outputs (eg 'kB' or 'MiB')
->>> from sorl.thumbnail.templatetags.thumbnail import filesize_formats,\
-...    filesize_long_formats
->>> for f in filesize_formats:
-...     print '%s (%siB, %sB):' % (filesize_long_formats[f], f.upper(), f)
-...     for i in range(0, 10):
-...         print ' %s, %s' % (filesize(1024**i, '%siB' % f.upper()),
-...                            filesize(1000**i, '%sB' % f))
-kilo (KiB, kB):
- 0.0009765625, 0.001
- 1.0, 1.0
- 1024.0, 1000.0
- 1048576.0, 1000000.0
- 1073741824.0, 1000000000.0
- 1.09951162778e+12, 1e+12
- 1.12589990684e+15, 1e+15
- 1.15292150461e+18, 1e+18
- 1.18059162072e+21, 1e+21
- 1.20892581961e+24, 1e+24
-mega (MiB, MB):
- 0.0, 1e-06
- 0.0009765625, 0.001
- 1.0, 1.0
- 1024.0, 1000.0
- 1048576.0, 1000000.0
- 1073741824.0, 1000000000.0
- 1.09951162778e+12, 1e+12
- 1.12589990684e+15, 1e+15
- 1.15292150461e+18, 1e+18
- 1.18059162072e+21, 1e+21
-giga (GiB, GB):
- 0.0, 1e-09
- 0.0, 1e-06
- 0.0009765625, 0.001
- 1.0, 1.0
- 1024.0, 1000.0
- 1048576.0, 1000000.0
- 1073741824.0, 1000000000.0
- 1.09951162778e+12, 1e+12
- 1.12589990684e+15, 1e+15
- 1.15292150461e+18, 1e+18
-tera (TiB, TB):
- 0.0, 1e-12
- 0.0, 1e-09
- 0.0, 1e-06
- 0.0009765625, 0.001
- 1.0, 1.0
- 1024.0, 1000.0
- 1048576.0, 1000000.0
- 1073741824.0, 1000000000.0
- 1.09951162778e+12, 1e+12
- 1.12589990684e+15, 1e+15
-peta (PiB, PB):
- 0.0, 1e-15
- 0.0, 1e-12
- 0.0, 1e-09
- 0.0, 1e-06
- 0.0009765625, 0.001
- 1.0, 1.0
- 1024.0, 1000.0
- 1048576.0, 1000000.0
- 1073741824.0, 1000000000.0
- 1.09951162778e+12, 1e+12
-exa (EiB, EB):
- 0.0, 1e-18
- 0.0, 1e-15
- 0.0, 1e-12
- 0.0, 1e-09
- 0.0, 1e-06
- 0.0009765625, 0.001
- 1.0, 1.0
- 1024.0, 1000.0
- 1048576.0, 1000000.0
- 1073741824.0, 1000000000.0
-zetta (ZiB, ZB):
- 0.0, 1e-21
- 0.0, 1e-18
- 0.0, 1e-15
- 0.0, 1e-12
- 0.0, 1e-09
- 0.0, 1e-06
- 0.0009765625, 0.001
- 1.0, 1.0
- 1024.0, 1000.0
- 1048576.0, 1000000.0
-yotta (YiB, YB):
- 0.0, 1e-24
- 0.0, 1e-21
- 0.0, 1e-18
- 0.0, 1e-15
- 0.0, 1e-12
- 0.0, 1e-09
- 0.0, 1e-06
- 0.0009765625, 0.001
- 1.0, 1.0
- 1024.0, 1000.0
-"""
diff --git a/apps/sorl/thumbnail/tests/utils.py b/apps/sorl/thumbnail/tests/utils.py
deleted file mode 100644 (file)
index 3a20cbb..0000000
+++ /dev/null
@@ -1,149 +0,0 @@
-from django.conf import settings
-from sorl.thumbnail.utils import *
-
-try:
-    set
-except NameError:
-    from sets import Set as set     # For Python 2.3
-
-MEDIA_ROOT_LENGTH = len(os.path.normpath(settings.MEDIA_ROOT))
-
-utils_tests = r"""
->>> from sorl.thumbnail.tests.utils import *
->>> from sorl.thumbnail.tests.base import ChangeSettings
->>> from django.conf import settings
-
->>> change_settings = ChangeSettings()
->>> change_settings.change()
-
->>> media_root = settings.MEDIA_ROOT.rstrip('/')
-
-#==============================================================================
-# Set up test images
-#==============================================================================
-
->>> make_image('test-thumbnail-utils/subdir/test_jpg_110x110_q85.jpg')
->>> make_image('test-thumbnail-utils/test_jpg_80x80_q85.jpg')
->>> make_image('test-thumbnail-utils/test_jpg_80x80_q95.jpg')
->>> make_image('test-thumbnail-utils/another_test_jpg_80x80_q85.jpg')
->>> make_image('test-thumbnail-utils/test_with_opts_jpg_80x80_crop_bw_q85.jpg')
->>> make_image('test-thumbnail-basedir/test-thumbnail-utils/test_jpg_100x100_'
-...            'q85.jpg')
->>> make_image('test-thumbnail-utils/prefix-test_jpg_120x120_q85.jpg')
-
-#==============================================================================
-# all_thumbnails()
-#==============================================================================
-
-# Find all thumbs
->>> thumb_dir = os.path.join(settings.MEDIA_ROOT, 'test-thumbnail-utils')
->>> thumbs = all_thumbnails(thumb_dir)
->>> k = thumbs.keys()
->>> k.sort()
->>> [consistent_slash(path) for path in k]
-['another_test.jpg', 'prefix-test.jpg', 'subdir/test.jpg', 'test.jpg',
- 'test_with_opts.jpg']
-
-# Find all thumbs, no recurse
->>> thumbs = all_thumbnails(thumb_dir, recursive=False)
->>> k = thumbs.keys()
->>> k.sort()
->>> k
-['another_test.jpg', 'prefix-test.jpg', 'test.jpg', 'test_with_opts.jpg']
-
-#==============================================================================
-# thumbnails_for_file()
-#==============================================================================
-
->>> output = []
->>> for thumb in thumbs['test.jpg']:
-...     thumb['rel_fn'] = strip_media_root(thumb['filename'])
-...     output.append('%(x)sx%(y)s %(quality)s %(rel_fn)s' % thumb)
->>> output.sort()
->>> output
-['80x80 85 test-thumbnail-utils/test_jpg_80x80_q85.jpg',
- '80x80 95 test-thumbnail-utils/test_jpg_80x80_q95.jpg']
-
-# Thumbnails for file
->>> output = []
->>> for thumb in thumbnails_for_file('test-thumbnail-utils/test.jpg'):
-...    output.append(strip_media_root(thumb['filename']))
->>> output.sort()
->>> output
-['test-thumbnail-utils/test_jpg_80x80_q85.jpg',
- 'test-thumbnail-utils/test_jpg_80x80_q95.jpg']
-
-# Thumbnails for file - shouldn't choke on non-existant file
->>> thumbnails_for_file('test-thumbnail-utils/non-existant.jpg')
-[]
-
-# Thumbnails for file, with basedir setting
->>> change_settings.change({'BASEDIR': 'test-thumbnail-basedir'})
->>> for thumb in thumbnails_for_file('test-thumbnail-utils/test.jpg'):
-...    print strip_media_root(thumb['filename'])
-test-thumbnail-basedir/test-thumbnail-utils/test_jpg_100x100_q85.jpg
-
-# Thumbnails for file, with subdir setting
->>> change_settings.change({'SUBDIR': 'subdir', 'BASEDIR': ''})
->>> for thumb in thumbnails_for_file('test-thumbnail-utils/test.jpg'):
-...    print strip_media_root(thumb['filename'])
-test-thumbnail-utils/subdir/test_jpg_110x110_q85.jpg
-
-# Thumbnails for file, with prefix setting
->>> change_settings.change({'PREFIX': 'prefix-', 'SUBDIR': ''})
->>> for thumb in thumbnails_for_file('test-thumbnail-utils/test.jpg'):
-...    print strip_media_root(thumb['filename'])
-test-thumbnail-utils/prefix-test_jpg_120x120_q85.jpg
-
-#==============================================================================
-# Clean up images / directories
-#==============================================================================
-
->>> clean_up()
-"""
-
-images_to_delete = set()
-dirs_to_delete = []
-
-
-def make_image(relative_image):
-    absolute_image = os.path.join(settings.MEDIA_ROOT, relative_image)
-    make_dirs(os.path.dirname(relative_image))
-    open(absolute_image, 'w').close()
-    images_to_delete.add(absolute_image)
-
-
-def make_dirs(relative_path):
-    if not relative_path:
-        return
-    absolute_path = os.path.join(settings.MEDIA_ROOT, relative_path)
-    if os.path.isdir(absolute_path):
-        return
-    if absolute_path not in dirs_to_delete:
-        dirs_to_delete.append(absolute_path)
-    make_dirs(os.path.dirname(relative_path))
-    os.mkdir(absolute_path)
-
-
-def clean_up():
-    for image in images_to_delete:
-        os.remove(image)
-    for path in dirs_to_delete:
-        os.rmdir(path)
-
-
-def strip_media_root(path):
-    path = os.path.normpath(path)
-    # chop off the MEDIA_ROOT and strip any leading os.sep
-    path = path[MEDIA_ROOT_LENGTH:].lstrip(os.sep)
-    return consistent_slash(path)
-
-
-def consistent_slash(path):
-    """
-    Ensure we're always testing against the '/' os separator (otherwise tests
-    fail against Windows).
-    """
-    if os.sep != '/':
-        path = path.replace(os.sep, '/')
-    return path
diff --git a/apps/sorl/thumbnail/utils.py b/apps/sorl/thumbnail/utils.py
deleted file mode 100644 (file)
index 18b18b0..0000000
+++ /dev/null
@@ -1,170 +0,0 @@
-import math
-import os
-import re
-
-
-re_thumbnail_file = re.compile(r'(?P<source_filename>.+)_(?P<x>\d+)x(?P<y>\d+)'
-                               r'(?:_(?P<options>\w+))?_q(?P<quality>\d+)'
-                               r'(?:.[^.]+)?$')
-re_new_args = re.compile('(?<!quality)=')
-
-
-def all_thumbnails(path, recursive=True, prefix=None, subdir=None):
-    """
-    Return a dictionary referencing all files which match the thumbnail format.
-
-    Each key is a source image filename, relative to path.
-    Each value is a list of dictionaries as explained in `thumbnails_for_file`.
-    """
-    # Fall back to using thumbnail settings. These are local imports so that
-    # there is no requirement of Django to use the utils module.
-    if prefix is None:
-        from sorl.thumbnail.main import get_thumbnail_setting
-        prefix = get_thumbnail_setting('PREFIX')
-    if subdir is None:
-        from sorl.thumbnail.main import get_thumbnail_setting
-        subdir = get_thumbnail_setting('SUBDIR')
-    thumbnail_files = {}
-    if not path.endswith('/'):
-        path = '%s/' % path
-    len_path = len(path)
-    if recursive:
-        all = os.walk(path)
-    else:
-        files = []
-        for file in os.listdir(path):
-            if os.path.isfile(os.path.join(path, file)):
-                files.append(file)
-        all = [(path, [], files)]
-    for dir_, subdirs, files in all:
-        rel_dir = dir_[len_path:]
-        for file in files:
-            thumb = re_thumbnail_file.match(file)
-            if not thumb:
-                continue
-            d = thumb.groupdict()
-            source_filename = d.pop('source_filename')
-            if prefix:
-                source_path, source_filename = os.path.split(source_filename)
-                if not source_filename.startswith(prefix):
-                    continue
-                source_filename = os.path.join(source_path,
-                    source_filename[len(prefix):])
-            d['options'] = d['options'] and d['options'].split('_') or []
-            if subdir and rel_dir.endswith(subdir):
-                rel_dir = rel_dir[:-len(subdir)]
-            # Corner-case bug: if the filename didn't have an extension but did
-            # have an underscore, the last underscore will get converted to a
-            # '.'.
-            m = re.match(r'(.*)_(.*)', source_filename)
-            if m:
-                source_filename = '%s.%s' % m.groups()
-            filename = os.path.join(rel_dir, source_filename)
-            thumbnail_file = thumbnail_files.setdefault(filename, [])
-            d['filename'] = os.path.join(dir_, file)
-            thumbnail_file.append(d)
-    return thumbnail_files
-
-
-def thumbnails_for_file(relative_source_path, root=None, basedir=None,
-                        subdir=None, prefix=None):
-    """
-    Return a list of dictionaries, one for each thumbnail belonging to the
-    source image.
-
-    The following list explains each key of the dictionary:
-
-      `filename`  -- absolute thumbnail path
-      `x` and `y` -- the size of the thumbnail
-      `options`   -- list of options for this thumbnail
-      `quality`   -- quality setting for this thumbnail
-    """
-    # Fall back to using thumbnail settings. These are local imports so that
-    # there is no requirement of Django to use the utils module.
-    if root is None:
-        from django.conf import settings
-        root = settings.MEDIA_ROOT
-    if prefix is None:
-        from sorl.thumbnail.main import get_thumbnail_setting
-        prefix = get_thumbnail_setting('PREFIX')
-    if subdir is None:
-        from sorl.thumbnail.main import get_thumbnail_setting
-        subdir = get_thumbnail_setting('SUBDIR')
-    if basedir is None:
-        from sorl.thumbnail.main import get_thumbnail_setting
-        basedir = get_thumbnail_setting('BASEDIR')
-    source_dir, filename = os.path.split(relative_source_path)
-    thumbs_path = os.path.join(root, basedir, source_dir, subdir)
-    if not os.path.isdir(thumbs_path):
-        return []
-    files = all_thumbnails(thumbs_path, recursive=False, prefix=prefix,
-                           subdir='')
-    return files.get(filename, [])
-
-
-def delete_thumbnails(relative_source_path, root=None, basedir=None,
-                      subdir=None, prefix=None):
-    """
-    Delete all thumbnails for a source image.
-    """
-    thumbs = thumbnails_for_file(relative_source_path, root, basedir, subdir,
-                                 prefix)
-    return _delete_using_thumbs_list(thumbs)
-
-
-def _delete_using_thumbs_list(thumbs):
-    deleted = 0
-    for thumb_dict in thumbs:
-        filename = thumb_dict['filename']
-        try:
-            os.remove(filename)
-        except:
-            pass
-        else:
-            deleted += 1
-    return deleted
-
-
-def delete_all_thumbnails(path, recursive=True):
-    """
-    Delete all files within a path which match the thumbnails pattern.
-
-    By default, matching files from all sub-directories are also removed. To
-    only remove from the path directory, set recursive=False.
-    """
-    total = 0
-    for thumbs in all_thumbnails(path, recursive=recursive).values():
-        total += _delete_using_thumbs_list(thumbs)
-    return total
-
-
-def split_args(args):
-    """
-    Split a list of argument strings into a dictionary where each key is an
-    argument name.
-
-    An argument looks like ``crop``, ``crop="some option"`` or ``crop=my_var``.
-    Arguments which provide no value get a value of ``None``.
-    """
-    if not args:
-        return {}
-    # Handle the old comma separated argument format.
-    if len(args) == 1 and not re_new_args.search(args[0]):
-        args = args[0].split(',')
-    # Separate out the key and value for each argument.
-    args_dict = {}
-    for arg in args:
-        split_arg = arg.split('=', 1)
-        value = len(split_arg) > 1 and split_arg[1] or None
-        args_dict[split_arg[0]] = value
-    return args_dict
-
-
-def image_entropy(im):
-    """
-    Calculate the entropy of an image. Used for "smart cropping".
-    """
-    hist = im.histogram()
-    hist_size = float(sum(hist))
-    hist = [h / hist_size for h in hist]
-    return -sum([p * math.log(p, 2) for p in hist if p != 0])
diff --git a/apps/south/__init__.py b/apps/south/__init__.py
deleted file mode 100644 (file)
index 3e5972e..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-"""
-South - Useable migrations for Django apps
-"""
-
-__version__ = "0.4"
-__authors__ = ["Andrew Godwin <andrew@aeracode.org>", "Andy McCurdy <andy@andymccurdy.com>"]
diff --git a/apps/south/db/__init__.py b/apps/south/db/__init__.py
deleted file mode 100644 (file)
index 8e4d773..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-
-# Establish the common DatabaseOperations instance, which we call 'db'.
-# This code somewhat lifted from django evolution
-from django.conf import settings
-import sys
-module_name = ['south.db', settings.DATABASE_ENGINE]
-try:
-    module = __import__('.'.join(module_name),{},{},[''])
-except ImportError:
-    sys.stderr.write("There is no South database module for the engine '%s'. Please either choose a supported one, or remove South from INSTALLED_APPS.\n" % settings.DATABASE_ENGINE)
-    sys.exit(1)
-db = module.DatabaseOperations()
\ No newline at end of file
diff --git a/apps/south/db/generic.py b/apps/south/db/generic.py
deleted file mode 100644 (file)
index 4a5b512..0000000
+++ /dev/null
@@ -1,543 +0,0 @@
-
-import datetime
-from django.core.management.color import no_style
-from django.db import connection, transaction, models
-from django.db.backends.util import truncate_name
-from django.db.models.fields import NOT_PROVIDED
-from django.dispatch import dispatcher
-from django.conf import settings
-
-
-def alias(attrname):
-    """
-    Returns a function which calls 'attrname' - for function aliasing.
-    We can't just use foo = bar, as this breaks subclassing.
-    """
-    def func(self, *args, **kwds):
-        return getattr(self, attrname)(*args, **kwds)
-    return func
-
-
-class DatabaseOperations(object):
-
-    """
-    Generic SQL implementation of the DatabaseOperations.
-    Some of this code comes from Django Evolution.
-    """
-
-    # We assume the generic DB can handle DDL transactions. MySQL wil change this.
-    has_ddl_transactions = True
-
-    def __init__(self):
-        self.debug = False
-        self.deferred_sql = []
-        self.dry_run = False
-        self.pending_create_signals = []
-
-    def execute(self, sql, params=[]):
-        """
-        Executes the given SQL statement, with optional parameters.
-        If the instance's debug attribute is True, prints out what it executes.
-        """
-        cursor = connection.cursor()
-        if self.debug:
-            print "   = %s" % sql, params
-
-        if self.dry_run:
-            return []
-
-        cursor.execute(sql, params)
-        try:
-            return cursor.fetchall()
-        except:
-            return []
-
-
-    def add_deferred_sql(self, sql):
-        """
-        Add a SQL statement to the deferred list, that won't be executed until
-        this instance's execute_deferred_sql method is run.
-        """
-        self.deferred_sql.append(sql)
-
-
-    def execute_deferred_sql(self):
-        """
-        Executes all deferred SQL, resetting the deferred_sql list
-        """
-        for sql in self.deferred_sql:
-            self.execute(sql)
-
-        self.deferred_sql = []
-
-
-    def clear_deferred_sql(self):
-        """
-        Resets the deferred_sql list to empty.
-        """
-        self.deferred_sql = []
-    
-    
-    def clear_run_data(self):
-        """
-        Resets variables to how they should be before a run. Used for dry runs.
-        """
-        self.clear_deferred_sql()
-        self.pending_create_signals = []
-
-
-    def create_table(self, table_name, fields):
-        """
-        Creates the table 'table_name'. 'fields' is a tuple of fields,
-        each repsented by a 2-part tuple of field name and a
-        django.db.models.fields.Field object
-        """
-        qn = connection.ops.quote_name
-
-        # allow fields to be a dictionary
-        # removed for now - philosophical reasons (this is almost certainly not what you want)
-        #try:
-        #    fields = fields.items()
-        #except AttributeError:
-        #    pass
-
-        columns = [
-            self.column_sql(table_name, field_name, field)
-            for field_name, field in fields
-        ]
-
-        self.execute('CREATE TABLE %s (%s);' % (qn(table_name), ', '.join([col for col in columns if col])))
-
-    add_table = alias('create_table') # Alias for consistency's sake
-
-
-    def rename_table(self, old_table_name, table_name):
-        """
-        Renames the table 'old_table_name' to 'table_name'.
-        """
-        if old_table_name == table_name:
-            # No Operation
-            return
-        qn = connection.ops.quote_name
-        params = (qn(old_table_name), qn(table_name))
-        self.execute('ALTER TABLE %s RENAME TO %s;' % params)
-
-
-    def delete_table(self, table_name):
-        """
-        Deletes the table 'table_name'.
-        """
-        qn = connection.ops.quote_name
-        params = (qn(table_name), )
-        self.execute('DROP TABLE %s;' % params)
-
-    drop_table = alias('delete_table')
-
-
-    def clear_table(self, table_name):
-        """
-        Deletes all rows from 'table_name'.
-        """
-        qn = connection.ops.quote_name
-        params = (qn(table_name), )
-        self.execute('DELETE FROM %s;' % params)
-
-    add_column_string = 'ALTER TABLE %s ADD COLUMN %s;'
-
-    def add_column(self, table_name, name, field, keep_default=True):
-        """
-        Adds the column 'name' to the table 'table_name'.
-        Uses the 'field' paramater, a django.db.models.fields.Field instance,
-        to generate the necessary sql
-
-        @param table_name: The name of the table to add the column to
-        @param name: The name of the column to add
-        @param field: The field to use
-        """
-        qn = connection.ops.quote_name
-        sql = self.column_sql(table_name, name, field)
-        if sql:
-            params = (
-                qn(table_name),
-                sql,
-            )
-            sql = self.add_column_string % params
-            self.execute(sql)
-
-            # Now, drop the default if we need to
-            if not keep_default and field.default:
-                field.default = NOT_PROVIDED
-                self.alter_column(table_name, name, field, explicit_name=False)
-
-    alter_string_set_type = 'ALTER COLUMN %(column)s TYPE %(type)s'
-    alter_string_set_null = 'ALTER COLUMN %(column)s DROP NOT NULL'
-    alter_string_drop_null = 'ALTER COLUMN %(column)s SET NOT NULL'
-    allows_combined_alters = True
-
-    def alter_column(self, table_name, name, field, explicit_name=True):
-        """
-        Alters the given column name so it will match the given field.
-        Note that conversion between the two by the database must be possible.
-        Will not automatically add _id by default; to have this behavour, pass
-        explicit_name=False.
-
-        @param table_name: The name of the table to add the column to
-        @param name: The name of the column to alter
-        @param field: The new field definition to use
-        """
-
-        # hook for the field to do any resolution prior to it's attributes being queried
-        if hasattr(field, 'south_init'):
-            field.south_init()
-
-        qn = connection.ops.quote_name
-        
-        # Add _id or whatever if we need to
-        if not explicit_name:
-            field.set_attributes_from_name(name)
-            name = field.column
-
-        # First, change the type
-        params = {
-            "column": qn(name),
-            "type": field.db_type(),
-        }
-
-        # SQLs is a list of (SQL, values) pairs.
-        sqls = [(self.alter_string_set_type % params, [])]
-
-        # Next, set any default
-        if not field.null and field.has_default():
-            default = field.get_default()
-            sqls.append(('ALTER COLUMN %s SET DEFAULT %%s ' % (qn(name),), [default]))
-        else:
-            sqls.append(('ALTER COLUMN %s DROP DEFAULT' % (qn(name),), []))
-
-
-        # Next, nullity
-        params = {
-            "column": qn(name),
-            "type": field.db_type(),
-        }
-        if field.null:
-            sqls.append((self.alter_string_set_null % params, []))
-        else:
-            sqls.append((self.alter_string_drop_null % params, []))
-
-
-        # TODO: Unique
-
-        if self.allows_combined_alters:
-            sqls, values = zip(*sqls)
-            self.execute(
-                "ALTER TABLE %s %s;" % (qn(table_name), ", ".join(sqls)),
-                flatten(values),
-            )
-        else:
-            # Databases like e.g. MySQL don't like more than one alter at once.
-            for sql, values in sqls:
-                self.execute("ALTER TABLE %s %s;" % (qn(table_name), sql), values)
-
-
-    def column_sql(self, table_name, field_name, field, tablespace=''):
-        """
-        Creates the SQL snippet for a column. Used by add_column and add_table.
-        """
-        qn = connection.ops.quote_name
-
-        field.set_attributes_from_name(field_name)
-
-        # hook for the field to do any resolution prior to it's attributes being queried
-        if hasattr(field, 'south_init'):
-            field.south_init()
-
-        sql = field.db_type()
-        if sql:        
-            field_output = [qn(field.column), sql]
-            field_output.append('%sNULL' % (not field.null and 'NOT ' or ''))
-            if field.primary_key:
-                field_output.append('PRIMARY KEY')
-            elif field.unique:
-                # Instead of using UNIQUE, add a unique index with a predictable name
-                self.add_deferred_sql(
-                    self.create_index_sql(
-                        table_name,
-                        [field.column],
-                        unique = True,
-                        db_tablespace = tablespace,
-                    )
-                )
-
-            tablespace = field.db_tablespace or tablespace
-            if tablespace and connection.features.supports_tablespaces and field.unique:
-                # We must specify the index tablespace inline, because we
-                # won't be generating a CREATE INDEX statement for this field.
-                field_output.append(connection.ops.tablespace_sql(tablespace, inline=True))
-
-            sql = ' '.join(field_output)
-            sqlparams = ()
-            # if the field is "NOT NULL" and a default value is provided, create the column with it
-            # this allows the addition of a NOT NULL field to a table with existing rows
-            if not field.null and field.has_default():
-                default = field.get_default()
-                # If the default is a callable, then call it!
-                if callable(default):
-                    default = default()
-                # Now do some very cheap quoting. TODO: Redesign return values to avoid this.
-                if isinstance(default, basestring):
-                    default = "'%s'" % default.replace("'", "''")
-                elif isinstance(default, datetime.date):
-                    default = "'%s'" % default
-                sql += " DEFAULT %s"
-                sqlparams = (default)
-
-            if field.rel and self.supports_foreign_keys:
-                self.add_deferred_sql(
-                    self.foreign_key_sql(
-                        table_name,
-                        field.column,
-                        field.rel.to._meta.db_table,
-                        field.rel.to._meta.get_field(field.rel.field_name).column
-                    )
-                )
-
-            if field.db_index and not field.unique:
-                self.add_deferred_sql(self.create_index_sql(table_name, [field.column]))
-
-        if hasattr(field, 'post_create_sql'):
-            style = no_style()
-            for stmt in field.post_create_sql(style, table_name):
-                self.add_deferred_sql(stmt)
-
-        if sql:
-            return sql % sqlparams
-        else:
-            return None
-
-
-    supports_foreign_keys = True
-
-    def foreign_key_sql(self, from_table_name, from_column_name, to_table_name, to_column_name):
-        """
-        Generates a full SQL statement to add a foreign key constraint
-        """
-        qn = connection.ops.quote_name
-        constraint_name = '%s_refs_%s_%x' % (from_column_name, to_column_name, abs(hash((from_table_name, to_table_name))))
-        return 'ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' % (
-            qn(from_table_name),
-            qn(truncate_name(constraint_name, connection.ops.max_name_length())),
-            qn(from_column_name),
-            qn(to_table_name),
-            qn(to_column_name),
-            connection.ops.deferrable_sql() # Django knows this
-        )
-
-
-    def create_index_name(self, table_name, column_names):
-        """
-        Generate a unique name for the index
-        """
-        index_unique_name = ''
-        if len(column_names) > 1:
-            index_unique_name = '_%x' % abs(hash((table_name, ','.join(column_names))))
-
-        return '%s_%s%s' % (table_name, column_names[0], index_unique_name)
-
-
-    def create_index_sql(self, table_name, column_names, unique=False, db_tablespace=''):
-        """
-        Generates a create index statement on 'table_name' for a list of 'column_names'
-        """
-        qn = connection.ops.quote_name
-        if not column_names:
-            print "No column names supplied on which to create an index"
-            return ''
-
-        if db_tablespace and connection.features.supports_tablespaces:
-            tablespace_sql = ' ' + connection.ops.tablespace_sql(db_tablespace)
-        else:
-            tablespace_sql = ''
-
-        index_name = self.create_index_name(table_name, column_names)
-        qn = connection.ops.quote_name
-        return 'CREATE %sINDEX %s ON %s (%s)%s;' % (
-            unique and 'UNIQUE ' or '',
-            qn(index_name),
-            qn(table_name),
-            ','.join([qn(field) for field in column_names]),
-            tablespace_sql
-        )
-
-    def create_index(self, table_name, column_names, unique=False, db_tablespace=''):
-        """ Executes a create index statement """
-        sql = self.create_index_sql(table_name, column_names, unique, db_tablespace)
-        self.execute(sql)
-
-
-    drop_index_string = 'DROP INDEX %(index_name)s'
-
-    def delete_index(self, table_name, column_names, db_tablespace=''):
-        """
-        Deletes an index created with create_index.
-        This is possible using only columns due to the deterministic
-        index naming function which relies on column names.
-        """
-        if isinstance(column_names, (str, unicode)):
-            column_names = [column_names]
-        name = self.create_index_name(table_name, column_names)
-        qn = connection.ops.quote_name
-        sql = self.drop_index_string % {"index_name": qn(name), "table_name": qn(table_name)}
-        self.execute(sql)
-
-    drop_index = alias('delete_index')
-
-    delete_column_string = 'ALTER TABLE %s DROP COLUMN %s CASCADE;'
-
-    def delete_column(self, table_name, name):
-        """
-        Deletes the column 'column_name' from the table 'table_name'.
-        """
-        qn = connection.ops.quote_name
-        params = (qn(table_name), qn(name))
-        self.execute(self.delete_column_string % params, [])
-
-    drop_column = alias('delete_column')
-
-
-    def rename_column(self, table_name, old, new):
-        """
-        Renames the column 'old' from the table 'table_name' to 'new'.
-        """
-        raise NotImplementedError("rename_column has no generic SQL syntax")
-
-
-    def start_transaction(self):
-        """
-        Makes sure the following commands are inside a transaction.
-        Must be followed by a (commit|rollback)_transaction call.
-        """
-        if self.dry_run:
-            return
-        transaction.commit_unless_managed()
-        transaction.enter_transaction_management()
-        transaction.managed(True)
-
-
-    def commit_transaction(self):
-        """
-        Commits the current transaction.
-        Must be preceded by a start_transaction call.
-        """
-        if self.dry_run:
-            return
-        transaction.commit()
-        transaction.leave_transaction_management()
-
-
-    def rollback_transaction(self):
-        """
-        Rolls back the current transaction.
-        Must be preceded by a start_transaction call.
-        """
-        if self.dry_run:
-            return
-        transaction.rollback()
-        transaction.leave_transaction_management()
-
-
-    def send_create_signal(self, app_label, model_names):
-        self.pending_create_signals.append((app_label, model_names))
-
-
-    def send_pending_create_signals(self):
-        for (app_label, model_names) in self.pending_create_signals:
-            self.really_send_create_signal(app_label, model_names)
-        self.pending_create_signals = []
-
-
-    def really_send_create_signal(self, app_label, model_names):
-        """
-        Sends a post_syncdb signal for the model specified.
-
-        If the model is not found (perhaps it's been deleted?),
-        no signal is sent.
-
-        TODO: The behavior of django.contrib.* apps seems flawed in that
-        they don't respect created_models.  Rather, they blindly execute
-        over all models within the app sending the signal.  This is a
-        patch we should push Django to make  For now, this should work.
-        """
-        if self.debug:
-            print " - Sending post_syncdb signal for %s: %s" % (app_label, model_names)
-        app = models.get_app(app_label)
-        if not app:
-            return
-
-        created_models = []
-        for model_name in model_names:
-            model = models.get_model(app_label, model_name)
-            if model:
-                created_models.append(model)
-
-        if created_models:
-            # syncdb defaults -- perhaps take these as options?
-            verbosity = 1
-            interactive = True
-
-            if hasattr(dispatcher, "send"):
-                dispatcher.send(signal=models.signals.post_syncdb, sender=app,
-                                app=app, created_models=created_models,
-                                verbosity=verbosity, interactive=interactive)
-            else:
-                models.signals.post_syncdb.send(sender=app,
-                                                app=app, created_models=created_models,
-                                                verbosity=verbosity, interactive=interactive)
-
-    def mock_model(self, model_name, db_table, db_tablespace='', 
-                   pk_field_name='id', pk_field_type=models.AutoField,
-                   pk_field_args=[], pk_field_kwargs={}):
-        """
-        Generates a MockModel class that provides enough information
-        to be used by a foreign key/many-to-many relationship.
-
-        Migrations should prefer to use these rather than actual models
-        as models could get deleted over time, but these can remain in
-        migration files forever.
-        """
-        class MockOptions(object):
-            def __init__(self):
-                self.db_table = db_table
-                self.db_tablespace = db_tablespace or settings.DEFAULT_TABLESPACE
-                self.object_name = model_name
-                self.module_name = model_name.lower()
-
-                if pk_field_type == models.AutoField:
-                    pk_field_kwargs['primary_key'] = True
-
-                self.pk = pk_field_type(*pk_field_args, **pk_field_kwargs)
-                self.pk.set_attributes_from_name(pk_field_name)
-                self.abstract = False
-
-            def get_field_by_name(self, field_name):
-                # we only care about the pk field
-                return (self.pk, self.model, True, False)
-
-            def get_field(self, name):
-                # we only care about the pk field
-                return self.pk
-
-        class MockModel(object):
-            _meta = None
-
-        # We need to return an actual class object here, not an instance
-        MockModel._meta = MockOptions()
-        MockModel._meta.model = MockModel
-        return MockModel
-
-# Single-level flattening of lists
-def flatten(ls):
-    nl = []
-    for l in ls:
-        nl += l
-    return nl
-
diff --git a/apps/south/db/mysql.py b/apps/south/db/mysql.py
deleted file mode 100644 (file)
index a05c071..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-
-from django.db import connection
-from django.conf import settings
-from south.db import generic
-
-class DatabaseOperations(generic.DatabaseOperations):
-
-    """
-    MySQL implementation of database operations.
-    """
-    
-    alter_string_set_type = ''
-    alter_string_set_null = 'MODIFY %(column)s %(type)s NULL;'
-    alter_string_drop_null = 'MODIFY %(column)s %(type)s NOT NULL;'
-    drop_index_string = 'DROP INDEX %(index_name)s ON %(table_name)s'
-    allows_combined_alters = False
-    has_ddl_transactions = False
-    
-    def execute(self, sql, params=[]):
-        if hasattr(settings, "DATABASE_STORAGE_ENGINE") and \
-           settings.DATABASE_STORAGE_ENGINE:
-            generic.DatabaseOperations.execute(self, "SET storage_engine=%s;" %
-                settings.DATABASE_STORAGE_ENGINE)
-        return generic.DatabaseOperations.execute(self, sql, params)
-    execute.__doc__ = generic.DatabaseOperations.execute.__doc__
-
-    def rename_column(self, table_name, old, new):
-        if old == new or self.dry_run:
-            return []
-        
-        qn = connection.ops.quote_name
-        
-        rows = [x for x in self.execute('DESCRIBE %s' % (qn(table_name),)) if x[0] == old]
-        
-        if not rows:
-            raise ValueError("No column '%s' in '%s'." % (old, table_name))
-        
-        params = (
-            qn(table_name),
-            qn(old),
-            qn(new),
-            rows[0][1],
-            rows[0][2] == "YES" and "NULL" or "NOT NULL",
-            rows[0][3] == "PRI" and "PRIMARY KEY" or "",
-            rows[0][4] and "DEFAULT " or "",
-            rows[0][4] and "%s" or "",
-            rows[0][5] or "",
-        )
-        
-        sql = 'ALTER TABLE %s CHANGE COLUMN %s %s %s %s %s %s %s %s;' % params
-        
-        if rows[0][4]:
-            self.execute(sql, (rows[0][4],))
-        else:
-            self.execute(sql)
-    
-    
-    def rename_table(self, old_table_name, table_name):
-        """
-        Renames the table 'old_table_name' to 'table_name'.
-        """
-        if old_table_name == table_name:
-            # No Operation
-            return
-        qn = connection.ops.quote_name
-        params = (qn(old_table_name), qn(table_name))
-        self.execute('RENAME TABLE %s TO %s;' % params)
diff --git a/apps/south/db/postgresql_psycopg2.py b/apps/south/db/postgresql_psycopg2.py
deleted file mode 100644 (file)
index 839b4b1..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-
-from django.db import connection
-from south.db import generic
-
-class DatabaseOperations(generic.DatabaseOperations):
-
-    """
-    PsycoPG2 implementation of database operations.
-    """
-
-    def rename_column(self, table_name, old, new):
-        if old == new:
-            return []
-        qn = connection.ops.quote_name
-        params = (qn(table_name), qn(old), qn(new))
-        self.execute('ALTER TABLE %s RENAME COLUMN %s TO %s;' % params)
-    
-    def rename_table(self, old_table_name, table_name):
-        "will rename the table and an associated ID sequence and primary key index"
-        # First, rename the table
-        generic.DatabaseOperations.rename_table(self, old_table_name, table_name)
-        # Then, try renaming the ID sequence
-        # (if you're using other AutoFields... your problem, unfortunately)
-        self.commit_transaction()
-        self.start_transaction()
-        try:
-            generic.DatabaseOperations.rename_table(self, old_table_name+"_id_seq", table_name+"_id_seq")
-        except:
-            if self.debug:
-                print "   ~ No such sequence (ignoring error)"
-            self.rollback_transaction()
-        else:
-            self.commit_transaction()
-        self.start_transaction()
-
-        # Rename primary key index, will not rename other indices on
-        # the table that are used by django (e.g. foreign keys). Until
-        # figure out how, you need to do this yourself.
-        try:
-            generic.DatabaseOperations.rename_table(self, old_table_name+"_pkey", table_name+ "_pkey")
-        except:
-            if self.debug:
-                print "   ~ No such primary key (ignoring error)"
-            self.rollback_transaction()
-        else:
-            self.commit_transaction()
-        self.start_transaction()
-
-
-    def rename_index(self, old_index_name, index_name):
-        "Rename an index individually"
-        generic.DatabaseOperations.rename_table(self, old_index_name, index_name)
diff --git a/apps/south/db/sql_server/pyodbc.py b/apps/south/db/sql_server/pyodbc.py
deleted file mode 100644 (file)
index 58c5166..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-from django.db import connection
-from django.db.models.fields import *
-from south.db import generic
-
-class DatabaseOperations(generic.DatabaseOperations):
-    """
-    django-pyodbc (sql_server.pyodbc) implementation of database operations.
-    """
-    
-    add_column_string = 'ALTER TABLE %s ADD %s;'
-    alter_string_set_type = 'ALTER COLUMN %(column)s %(type)s'
-    allows_combined_alters = False
-    delete_column_string = 'ALTER TABLE %s DROP COLUMN %s;'
-
-    def create_table(self, table_name, fields):
-        # Tweak stuff as needed
-        for name,f in fields:
-            if isinstance(f, BooleanField):
-                if f.default == True:
-                    f.default = 1
-                if f.default == False:
-                    f.default = 0
-
-        # Run
-        generic.DatabaseOperations.create_table(self, table_name, fields)
diff --git a/apps/south/db/sqlite3.py b/apps/south/db/sqlite3.py
deleted file mode 100644 (file)
index 1fac1b8..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-
-from django.db import connection
-from south.db import generic
-
-class DatabaseOperations(generic.DatabaseOperations):
-
-    """
-    SQLite3 implementation of database operations.
-    """
-
-    # SQLite ignores foreign key constraints. I wish I could.
-    supports_foreign_keys = False
-    
-    # You can't add UNIQUE columns with an ALTER TABLE.
-    def add_column(self, table_name, name, field, *args, **kwds):
-        # Run ALTER TABLE with no unique column
-        unique, field._unique, field.db_index = field.unique, False, False
-        generic.DatabaseOperations.add_column(self, table_name, name, field, *args, **kwds)
-        # If it _was_ unique, make an index on it.
-        if unique:
-            self.create_index(table_name, [name], unique=True)
-    
-    # SQLite doesn't have ALTER COLUMN
-    def alter_column(self, table_name, name, field, explicit_name=True):
-        """
-        Not supported under SQLite.
-        """
-        raise NotImplementedError("SQLite does not support altering columns.")
-    
-    # Nor DROP COLUMN
-    def delete_column(self, table_name, name, field):
-        """
-        Not supported under SQLite.
-        """
-        raise NotImplementedError("SQLite does not support deleting columns.")
-    
-    # Nor RENAME COLUMN
-    def rename_column(self, table_name, old, new):
-        """
-        Not supported under SQLite.
-        """
-        raise NotImplementedError("SQLite does not support renaming columns.")
\ No newline at end of file
diff --git a/apps/south/docs/CHANGELOG b/apps/south/docs/CHANGELOG
deleted file mode 100644 (file)
index fa106f9..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-0.3
-===
-
-The yay-dependencies release.
-
-New features:
-
- - Dependency support for migrations
- - Fields are now used for declaring columns and tables
-
-
-0.2
-===
-
-The oh-i'm-sorry-mysql-users release.
-
-New features:
-
- - MySQL support up to the same level as PostgreSQL
- - New --all option to ./manage.py startmigration, which creates a migration
-   for every model in the given app. For project starts.
- - Project status upgraded to 'beta'. Next up, a colour-coded
-   release level system.
-
-Fixed bugs:
-
- - A few typos in various column methods
- - ManyToMany tables weren't created by startmigration migrations.
-
-Known bugs:
-
- - None
-
-
-0.1
-===
-
-Initial release.
diff --git a/apps/south/docs/CONTRIBUTING b/apps/south/docs/CONTRIBUTING
deleted file mode 100644 (file)
index 56dd525..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-
-Contributing to South
----------------------
-
-Contributions to South are very welcome.
-
-
-You can find more info on our site at http://south.aeracode.org/wiki/Contributing
diff --git a/apps/south/docs/LICENSE b/apps/south/docs/LICENSE
deleted file mode 100644 (file)
index 1914f85..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
-
-"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
-
-   1. You must give any other recipients of the Work or Derivative Works a copy of this License; and
-
-   2. You must cause any modified files to carry prominent notices stating that You changed the files; and
-
-   3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
-
-   4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
-
-You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
diff --git a/apps/south/docs/README b/apps/south/docs/README
deleted file mode 100644 (file)
index 99d3f20..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-This is South, a Django application to provide migrations in a sane way.
-
-By sane, we mean that the status of every migration is tracked individually,
-rather than just the number of the top migration reached; this means South
-can detect when you have an unapplied migration that's sitting in the middle
-of a whole load of applied ones, and will let you apply it straight off,
-or let you roll back to it, and apply from there forward.
-
-Documentation on South is currently available on our project site;
-you can find it at http://south.aeracode.org/wiki/Documentation
diff --git a/apps/south/management/commands/migrate.py b/apps/south/management/commands/migrate.py
deleted file mode 100644 (file)
index 1cc2a29..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-from django.core.management.base import BaseCommand
-from django.core.management.color import no_style
-from django.conf import settings
-from django.db import models
-from optparse import make_option
-from south import migration
-import sys
-
-class Command(BaseCommand):
-    option_list = BaseCommand.option_list + (
-        make_option('--skip', action='store_true', dest='skip', default=False,
-            help='Will skip over out-of-order missing migrations'),
-        make_option('--merge', action='store_true', dest='merge', default=False,
-            help='Will run out-of-order missing migrations as they are - no rollbacks.'),
-        make_option('--only', action='store_true', dest='only', default=False,
-            help='Only runs or rolls back the migration specified, and none around it.'),
-        make_option('--fake', action='store_true', dest='fake', default=False,
-            help="Pretends to do the migrations, but doesn't actually execute them."),
-
-        make_option('--db-dry-run', action='store_true', dest='db_dry_run', default=False,
-            help="Doesn't execute the SQL generated by the db methods, and doesn't store a record that the migration(s) occurred. Useful to test migrations before applying them."),
-    )
-    if '--verbosity' not in [opt.get_opt_string() for opt in BaseCommand.option_list]:
-        option_list += (
-            make_option('--verbosity', action='store', dest='verbosity', default='1',
-            type='choice', choices=['0', '1', '2'],
-            help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
-        )
-    help = "Runs migrations for all apps."
-
-    def handle(self, app=None, target=None, skip=False, merge=False, only=False, backwards=False, fake=False, db_dry_run=False, **options):
-
-        # Work out what the resolve mode is
-        resolve_mode = merge and "merge" or (skip and "skip" or None)
-        # Turn on db debugging
-        from south.db import db
-        db.debug = True
-        
-        # NOTE: THIS IS DUPLICATED FROM django.core.management.commands.syncdb
-        # This code imports any module named 'management' in INSTALLED_APPS.
-        # The 'management' module is the preferred way of listening to post_syncdb
-        # signals, and since we're sending those out with create_table migrations,
-        # we need apps to behave correctly.
-        for app_name in settings.INSTALLED_APPS:
-            try:
-                __import__(app_name + '.management', {}, {}, [''])
-            except ImportError, exc:
-                msg = exc.args[0]
-                if not msg.startswith('No module named') or 'management' not in msg:
-                    raise
-        # END DJANGO DUPE CODE
-        
-        # Migrate each app
-        if app:
-            apps = [migration.get_app(app)]
-        else:
-            apps = migration.get_migrated_apps()
-        silent = options.get('verbosity', 0) == 0
-        for app in apps:
-            migration.migrate_app(
-                app,
-                resolve_mode = resolve_mode,
-                target_name = target,
-                fake = fake,
-                db_dry_run = db_dry_run,
-                silent = silent,
-                load_inital_data = True,
-            )
diff --git a/apps/south/management/commands/startmigration.py b/apps/south/management/commands/startmigration.py
deleted file mode 100644 (file)
index 1a8da99..0000000
+++ /dev/null
@@ -1,491 +0,0 @@
-from django.core.management.base import BaseCommand
-from django.core.management.color import no_style
-from django.db import models
-from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
-from django.contrib.contenttypes.generic import GenericRelation
-from django.db.models.fields import FieldDoesNotExist
-from optparse import make_option
-from south import migration
-import sys
-import os
-import re
-import string
-import random
-import inspect
-import parser
-
-class Command(BaseCommand):
-    option_list = BaseCommand.option_list + (
-        make_option('--model', action='append', dest='model_list', type='string',
-            help='Generate a Create Table migration for the specified model.  Add multiple models to this migration with subsequent --model parameters.'),
-        make_option('--add-field', action='append', dest='field_list', type='string',
-            help='Generate an Add Column migration for the specified modelname.fieldname - you can use this multiple times to add more than one column.'),
-        make_option('--initial', action='store_true', dest='initial', default=False,
-            help='Generate the initial schema for the app.'),
-    )
-    help = "Creates a new template migration for the given app"
-    
-    def handle(self, app=None, name="", model_list=None, field_list=None, initial=False, **options):
-        
-        # If model_list is None, then it's an empty list
-        model_list = model_list or []
-        
-        # If field_list is None, then it's an empty list
-        field_list = field_list or []
-        
-        # make sure --model and --all aren't both specified
-        if initial and (model_list or field_list):
-            print "You cannot use --initial and other options together"
-            return
-            
-        # specify the default name 'initial' if a name wasn't specified and we're
-        # doing a migration for an entire app
-        if not name and initial:
-            name = 'initial'
-            
-        # if not name, there's an error
-        if not name:
-            print "You must name this migration"
-            return
-        
-        if not app:
-            print "Please provide an app in which to create the migration."
-            return
-            
-        # See if the app exists
-        app_models_module = models.get_app(app)
-        if not app_models_module:
-            print "App '%s' doesn't seem to exist, isn't in INSTALLED_APPS, or has no models." % app
-            return
-            
-        # Determine what models should be included in this migration.
-        models_to_migrate = []
-        if initial:
-            models_to_migrate = models.get_models(app_models_module)
-            if not models_to_migrate:
-                print "No models found in app '%s'" % (app)
-                return
-        else:
-            for model_name in model_list:
-                model = models.get_model(app, model_name)
-                if not model:
-                    print "Couldn't find model '%s' in app '%s'" % (model_name, app)
-                    return
-                    
-                models_to_migrate.append(model)
-        
-        # See what fields need to be included
-        fields_to_add = []
-        for field_spec in field_list:
-            model_name, field_name = field_spec.split(".", 1)
-            model = models.get_model(app, model_name)
-            if not model:
-                print "Couldn't find model '%s' in app '%s'" % (model_name, app)
-                return
-            try:
-                field = model._meta.get_field(field_name)
-            except FieldDoesNotExist:
-                print "Model '%s' doesn't have a field '%s'" % (model_name, field_name)
-                return
-            fields_to_add.append((model, field_name, field))
-        
-        # Make the migrations directory if it's not there
-        app_module_path = app_models_module.__name__.split('.')[0:-1]
-        try:
-            app_module = __import__('.'.join(app_module_path), {}, {}, [''])
-        except ImportError:
-            print "Couldn't find path to App '%s'." % app
-            return
-            
-        migrations_dir = os.path.join(
-            os.path.dirname(app_module.__file__),
-            "migrations",
-        )
-        # Make sure there's a migrations directory and __init__.py
-        if not os.path.isdir(migrations_dir):
-            print "Creating migrations directory at '%s'..." % migrations_dir
-            os.mkdir(migrations_dir)
-        init_path = os.path.join(migrations_dir, "__init__.py")
-        if not os.path.isfile(init_path):
-            # Touch the init py file
-            print "Creating __init__.py in '%s'..." % migrations_dir
-            open(init_path, "w").close()
-        # See what filename is next in line. We assume they use numbers.
-        migrations = migration.get_migration_names(migration.get_app(app))
-        highest_number = 0
-        for migration_name in migrations:
-            try:
-                number = int(migration_name.split("_")[0])
-                highest_number = max(highest_number, number)
-            except ValueError:
-                pass
-        # Make the new filename
-        new_filename = "%04i%s_%s.py" % (
-            highest_number + 1,
-            "".join([random.choice(string.letters.lower()) for i in range(0)]), # Possible random stuff insertion
-            name,
-        )
-        # If there's a model, make the migration skeleton, else leave it bare
-        forwards, backwards = '', ''
-        if fields_to_add:
-            # First, do the added fields
-            for model, field_name, field in fields_to_add:
-                field_definition = generate_field_definition(model, field)
-                
-                if isinstance(field, models.ManyToManyField):
-                    # Make a mock model for each side
-                    mock_model = "\n".join([
-                        create_mock_model(model, "        "), 
-                        create_mock_model(field.rel.to, "        ")
-                    ])
-                    # And a field defn, that's actually a table creation
-                    forwards += '''
-        # Mock Model
-%s
-        # Adding ManyToManyField '%s.%s'
-        db.create_table('%s', (
-            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
-            ('%s', models.ForeignKey(%s, null=False)),
-            ('%s', models.ForeignKey(%s, null=False))
-        )) ''' % (
-                mock_model,
-                model._meta.object_name,
-                field.name,
-                field.m2m_db_table(),
-                field.m2m_column_name()[:-3], # strip off the '_id' at the end
-                model._meta.object_name,
-                field.m2m_reverse_name()[:-3], # strip off the '_id' at the ned
-                field.rel.to._meta.object_name
-                )
-                    backwards += '''
-        # Dropping ManyToManyField '%s.%s'
-        db.drop_table('%s')''' % (
-                        model._meta.object_name,
-                        field.name,
-                        field.m2m_db_table()
-                    )
-                    continue
-                elif field.rel: # ForeignKey, etc.
-                    mock_model = create_mock_model(field.rel.to, "        ")
-                    field_definition = related_field_definition(field, field_definition)
-                else:
-                    mock_model = None
-                
-                # If we can't get it (inspect madness?) then insert placeholder
-                if not field_definition:
-                    print "Warning: Could not generate field definition for %s.%s, manual editing of migration required." % \
-                                (model._meta.object_name, field.name)
-                    field_definition = '<<< REPLACE THIS WITH FIELD DEFINITION FOR %s.%s >>>' % (model._meta.object_name, f.name)
-                
-                if mock_model:
-                    forwards += '''
-        # Mock model
-%s
-        ''' % (mock_model)
-                
-                forwards += '''
-        # Adding field '%s.%s'
-        db.add_column(%r, %r, %s)
-        ''' % (
-            model._meta.object_name,
-            field.name,
-            model._meta.db_table,
-            field.name,
-            field_definition,
-        )
-                backwards += '''
-        # Deleting field '%s.%s'
-        db.delete_column(%r, %r)
-        ''' % (
-            model._meta.object_name,
-            field.name,
-            model._meta.db_table,
-            field.column,
-        )
-        
-        if models_to_migrate:
-            # Now, do the added models
-            for model in models_to_migrate:
-                table_name = model._meta.db_table
-                mock_models = []
-                fields = []
-                for f in model._meta.local_fields:
-                    
-                    # Look up the field definition to see how this was created
-                    field_definition = generate_field_definition(model, f)
-                    
-                    # If it's a OneToOneField, and ends in _ptr, just use it
-                    if isinstance(f, models.OneToOneField) and f.name.endswith("_ptr"):
-                        mock_models.append(create_mock_model(f.rel.to, "        "))
-                        field_definition = "models.OneToOneField(%s)" % f.rel.to.__name__
-                    
-                    # It's probably normal then
-                    elif field_definition:
-                        
-                        if isinstance(f, models.ForeignKey):
-                            mock_models.append(create_mock_model(f.rel.to, "        "))
-                            field_definition = related_field_definition(f, field_definition)
-                    
-                    # Oh noes, no defn found
-                    else:
-                        print "Warning: Could not generate field definition for %s.%s, manual editing of migration required." % \
-                                (model._meta.object_name, f.name)
-                        print f, type(f)
-                                
-                        field_definition = '<<< REPLACE THIS WITH FIELD DEFINITION FOR %s.%s >>>' % (model._meta.object_name, f.name)
-                                                
-                    fields.append((f.name, field_definition))
-                    
-                if mock_models:
-                    forwards += '''
-        
-        # Mock Models
-%s
-        ''' % "\n".join(mock_models)
-        
-                forwards += '''
-        # Model '%s'
-        db.create_table(%r, (
-            %s
-        ))''' % (
-                    model._meta.object_name,
-                    table_name,
-                    "\n            ".join(["('%s', %s)," % (f[0], f[1]) for f in fields]),
-                )
-
-                backwards = ('''db.delete_table('%s')
-        ''' % table_name) + backwards
-        
-                # Now go through local M2Ms and add extra stuff for them
-                for m in model._meta.local_many_to_many:
-                    # ignore generic relations
-                    if isinstance(m, GenericRelation):
-                        continue
-
-                    # if the 'through' option is specified, the table will
-                    # be created through the normal model creation above.
-                    if m.rel.through:
-                        continue
-                        
-                    mock_models = [create_mock_model(model, "        "), create_mock_model(m.rel.to, "        ")]
-                    
-                    forwards += '''
-        # Mock Models
-%s
-        
-        # M2M field '%s.%s'
-        db.create_table('%s', (
-            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
-            ('%s', models.ForeignKey(%s, null=False)),
-            ('%s', models.ForeignKey(%s, null=False))
-        )) ''' % (
-                        "\n".join(mock_models),
-                        model._meta.object_name,
-                        m.name,
-                        m.m2m_db_table(),
-                        m.m2m_column_name()[:-3], # strip off the '_id' at the end
-                        model._meta.object_name,
-                        m.m2m_reverse_name()[:-3], # strip off the '_id' at the ned
-                        m.rel.to._meta.object_name
-                )
-                
-                    backwards = '''db.delete_table('%s')
-        ''' % m.m2m_db_table() + backwards
-                
-                if model._meta.unique_together:
-                    ut = model._meta.unique_together
-                    if not isinstance(ut[0], (list, tuple)):
-                        ut = (ut,)
-                        
-                    for unique in ut:
-                        columns = ["'%s'" % model._meta.get_field(f).column for f in unique]
-                        
-                        forwards += '''
-        db.create_index('%s', [%s], unique=True, db_tablespace='%s')
-        ''' %   (
-                        table_name,
-                        ','.join(columns),
-                        model._meta.db_tablespace
-                )
-                
-                
-            forwards += '''
-        
-        db.send_create_signal('%s', ['%s'])''' % (
-                app, 
-                "','".join(model._meta.object_name for model in models_to_migrate)
-                )
-        
-        # Try sniffing the encoding using PEP 0263's method
-        encoding = None
-        first_two_lines = inspect.getsourcelines(app_models_module)[0][:2]
-        for line in first_two_lines:
-            if re.search("coding[:=]\s*([-\w.]+)", line):
-                encoding = line
-        
-        if (not forwards) and (not backwards):
-            forwards = '"Write your forwards migration here"'
-            backwards = '"Write your backwards migration here"'
-        fp = open(os.path.join(migrations_dir, new_filename), "w")
-        fp.write("""%s
-from south.db import db
-from django.db import models
-from %s.models import *
-
-class Migration:
-    
-    def forwards(self):
-        %s
-    
-    def backwards(self):
-        %s
-""" % (encoding or "", '.'.join(app_module_path), forwards, backwards))
-        fp.close()
-        print "Created %s." % new_filename
-
-
-def generate_field_definition(model, field):
-    """
-    Inspects the source code of 'model' to find the code used to generate 'field'
-    """
-    def test_field(field_definition):
-        try:
-            parser.suite(field_definition)
-            return True
-        except SyntaxError:
-            return False
-            
-    def strip_comments(field_definition):
-        # remove any comments at the end of the field definition string.
-        field_definition = field_definition.strip()
-        if '#' not in field_definition:
-            return field_definition
-            
-        index = field_definition.index('#')
-        while index:
-            stripped_definition = field_definition[:index].strip()
-            # if the stripped definition is parsable, then we've removed
-            # the correct comment.
-            if test_field(stripped_definition):
-                return stripped_definition
-            
-            try:    
-                index = field_definition.index('#', index+1)
-            except ValueError:
-                break
-            
-        return field_definition
-        
-    # give field subclasses a chance to do anything tricky
-    # with the field definition
-    if hasattr(field, 'south_field_definition'):
-        return field.south_field_definition()
-    
-    field_pieces = []
-    found_field = False
-    source = inspect.getsourcelines(model)
-    if not source:
-        raise Exception("Could not find source to model: '%s'" % (model.__name__))
-    
-    # look for a line starting with the field name
-    start_field_re = re.compile(r'\s*%s\s*=\s*(.*)' % field.name)
-    for line in source[0]:
-        # if the field was found during a previous iteration, 
-        # we're here because the field spans across multiple lines
-        # append the current line and try again
-        if found_field:
-            field_pieces.append(line.strip())
-            if test_field(' '.join(field_pieces)):
-                return strip_comments(' '.join(field_pieces))
-            continue
-        
-        match = start_field_re.match(line)
-        if match:
-            found_field = True
-            field_pieces.append(match.groups()[0].strip())
-            if test_field(' '.join(field_pieces)):
-                return strip_comments(' '.join(field_pieces))
-    
-    # the 'id' field never gets defined, so return what django does by default
-    # django.db.models.options::_prepare
-    if field.name == 'id' and field.__class__ == models.AutoField:
-        return "models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)"
-    
-    # search this classes parents
-    for base in model.__bases__:
-        # we don't want to scan the django base model
-        if base == models.Model:
-            continue
-            
-        field_definition = generate_field_definition(base, field)
-        if field_definition:
-            return field_definition
-            
-    return None
-    
-def replace_model_string(field_definition, search_string, model_name):
-    # wrap 'search_string' in both ' and " chars when searching
-    quotes = ["'", '"']
-    for quote in quotes:
-        test = "%s%s%s" % (quote, search_string, quote)
-        if test in field_definition:
-            return field_definition.replace(test, model_name)
-            
-    return None
-        
-def related_field_definition(field, field_definition):
-    # if the field definition contains any of the following strings,
-    # replace them with the model definition:
-    #   applabel.modelname
-    #   modelname
-    #   django.db.models.fields.related.RECURSIVE_RELATIONSHIP_CONSTANT
-    strings = [
-        '%s.%s' % (field.rel.to._meta.app_label, field.rel.to._meta.object_name),
-        '%s' % field.rel.to._meta.object_name,
-        RECURSIVE_RELATIONSHIP_CONSTANT
-    ]
-    
-    for test in strings:
-        fd = replace_model_string(field_definition, test, field.rel.to._meta.object_name)
-        if fd:
-            return fd
-    
-    return field_definition
-
-def create_mock_model(model, indent="        "):
-    # produce a string representing the python syntax necessary for creating
-    # a mock model using the supplied real model
-    if not model._meta.pk.__class__.__module__.startswith('django.db.models.fields'):
-        # we can fix this with some clever imports, but it doesn't seem necessary to
-        # spend time on just yet
-        print "Can't generate a mock model for %s because it's primary key isn't a default django field; it's type %s." % (model, model._meta.pk.__class__)
-        sys.exit()
-    
-    pk_field_args = []
-    pk_field_kwargs = {}
-    other_mocks = []
-    # If it's a OneToOneField or ForeignKey, take it's first arg
-    if model._meta.pk.__class__.__name__ in ["OneToOneField", "ForeignKey"]:
-        if model._meta.pk.rel.to == model:
-            pk_field_args += ["'self'"]
-        else:
-            pk_field_args += [model._meta.pk.rel.to._meta.object_name]
-            other_mocks += [model._meta.pk.rel.to]
-    
-    # Perhaps it has a max_length set?
-    if model._meta.pk.max_length:
-        pk_field_kwargs["max_length"] = model._meta.pk.max_length
-    
-    return "%s%s%s = db.mock_model(model_name='%s', db_table='%s', db_tablespace='%s', pk_field_name='%s', pk_field_type=models.%s, pk_field_args=[%s], pk_field_kwargs=%r)" % \
-        (
-        "\n".join([create_mock_model(m, indent) for m in other_mocks]+[""]),
-        indent,
-        model._meta.object_name,
-        model._meta.object_name,
-        model._meta.db_table,
-        model._meta.db_tablespace,
-        model._meta.pk.name,
-        model._meta.pk.__class__.__name__,
-        ", ".join(pk_field_args),
-        pk_field_kwargs,
-        )
diff --git a/apps/south/management/commands/syncdb.py b/apps/south/management/commands/syncdb.py
deleted file mode 100644 (file)
index 7b160c2..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-from django.core.management.base import NoArgsCommand, BaseCommand 
-from django.core.management.color import no_style
-from django.utils.datastructures import SortedDict
-from optparse import make_option
-from south import migration
-from django.core.management.commands import syncdb
-from django.conf import settings
-from django.db import models
-from django.db.models.loading import cache
-from django.core import management
-import sys
-
-def get_app_name(app):
-    return '.'.join( app.__name__.split('.')[0:-1] )
-
-class Command(NoArgsCommand):
-    option_list = NoArgsCommand.option_list + (
-        make_option('--noinput', action='store_false', dest='interactive', default=True,
-            help='Tells Django to NOT prompt the user for input of any kind.'),
-        make_option('--migrate', action='store_true', dest='migrate', default=False,
-            help='Tells South to also perform migrations after the sync. Default for during testing, and other internal calls.'),
-    )
-    if '--verbosity' not in [opt.get_opt_string() for opt in BaseCommand.option_list]:
-        option_list += (
-            make_option('--verbosity', action='store', dest='verbosity', default='1',
-            type='choice', choices=['0', '1', '2'],
-            help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
-        )
-    help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created, except those which use migrations."
-
-    def handle_noargs(self, **options):
-        # Work out what uses migrations and so doesn't need syncing
-        apps_needing_sync = []
-        apps_migrated = []
-        for app in models.get_apps():
-            app_name = get_app_name(app)
-            migrations = migration.get_app(app)
-            if migrations is None:
-                apps_needing_sync.append(app_name)
-            else:
-                # This is a migrated app, leave it
-                apps_migrated.append(app_name)
-        verbosity = int(options.get('verbosity', 0))
-        # Run syncdb on only the ones needed
-        if verbosity > 0:
-            print "Syncing..."
-        old_installed, settings.INSTALLED_APPS = settings.INSTALLED_APPS, apps_needing_sync
-        old_app_store, cache.app_store = cache.app_store, SortedDict([
-            (k, v) for (k, v) in cache.app_store.items()
-            if get_app_name(k) in apps_needing_sync
-        ])
-        syncdb.Command().execute(**options)
-        settings.INSTALLED_APPS = old_installed
-        cache.app_store = old_app_store
-        # Migrate if needed
-        if options.get('migrate', True):
-            if verbosity > 0:
-                print "Migrating..."
-            management.call_command('migrate', **options)
-        # Be obvious about what we did
-        if verbosity > 0:
-            print "\nSynced:\n > %s" % "\n > ".join(apps_needing_sync)
-        
-        if options.get('migrate', True):
-            if verbosity > 0:
-                print "\nMigrated:\n - %s" % "\n - ".join(apps_migrated)
-        else:
-            if verbosity > 0:
-                print "\nNot synced (use migrations):\n - %s" % "\n - ".join(apps_migrated)
-                print "(use ./manage.py migrate to migrate these)"
diff --git a/apps/south/management/commands/test.py b/apps/south/management/commands/test.py
deleted file mode 100644 (file)
index eef8f31..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-from django.core import management
-from django.core.management.commands import test
-from django.core.management.commands import syncdb
-
-class Command(test.Command):
-    
-    def handle(self, *args, **kwargs):
-        # point at the core syncdb command when creating tests
-        # tests should always be up to date with the most recent model structure
-        management.get_commands()
-        management._commands['syncdb'] = 'django.core'
-        super(Command, self).handle(*args, **kwargs)
\ No newline at end of file
diff --git a/apps/south/migration.py b/apps/south/migration.py
deleted file mode 100644 (file)
index 6452442..0000000
+++ /dev/null
@@ -1,528 +0,0 @@
-
-import datetime
-import os
-import sys
-import traceback
-from django.conf import settings
-from django.db import models
-from django.core.exceptions import ImproperlyConfigured
-from django.core.management import call_command
-from models import MigrationHistory
-from south.db import db
-
-
-def get_app(app):
-    """
-    Returns the migrations module for the given app model name/module, or None
-    if it does not use migrations.
-    """
-    if isinstance(app, (str, unicode)):
-        # If it's a string, use the models module
-        app = models.get_app(app)
-    mod = __import__(app.__name__[:-7], {}, {}, ['migrations'])
-    if hasattr(mod, 'migrations'):
-        return getattr(mod, 'migrations')
-
-
-def get_migrated_apps():
-    """
-    Returns all apps with migrations.
-    """
-    for mapp in models.get_apps():
-        app = get_app(mapp)
-        if app:
-            yield app
-
-
-def get_app_name(app):
-    """
-    Returns the _internal_ app name for the given app module.
-    i.e. for <module django.contrib.auth.models> will return 'auth'
-    """
-    return app.__name__.split('.')[-2]
-
-
-def get_app_fullname(app):
-    """
-    Returns the full python name of an app - e.g. django.contrib.auth
-    """
-    return app.__name__[:-11]
-
-
-def short_from_long(app_name):
-    return app_name.split(".")[-1]
-
-
-def get_migration_names(app):
-    """
-    Returns a list of migration file names for the given app.
-    """
-    return sorted([
-        filename[:-3]
-        for filename in os.listdir(os.path.dirname(app.__file__))
-        if filename.endswith(".py") and filename != "__init__.py" and not filename.startswith(".")
-    ])
-
-
-def get_migration_classes(app):
-    """
-    Returns a list of migration classes (one for each migration) for the app.
-    """
-    for name in get_migration_names(app):
-        yield get_migration(app, name)
-
-
-def get_migration(app, name):
-    """
-    Returns the migration class implied by 'name'.
-    """
-    try:
-        module = __import__(app.__name__ + "." + name, '', '', ['Migration'])
-        return module.Migration
-    except ImportError:
-        print " ! Migration %s:%s probably doesn't exist." % (get_app_name(app), name)
-        print " - Traceback:"
-        raise
-
-
-def all_migrations():
-    return dict([
-        (app, dict([(name, get_migration(app, name)) for name in get_migration_names(app)]))
-        for app in get_migrated_apps()
-    ])
-
-
-def dependency_tree():
-    tree = all_migrations()
-    
-    # Annotate tree with 'backwards edges'
-    for app, classes in tree.items():
-        for name, cls in classes.items():
-            cls.needs = []
-            if not hasattr(cls, "needed_by"):
-                cls.needed_by = []
-            if hasattr(cls, "depends_on"):
-                for dapp, dname in cls.depends_on:
-                    dapp = get_app(dapp)
-                    if dapp not in tree:
-                        print "Migration %s in app %s depends on unmigrated app %s." % (
-                            name,
-                            get_app_name(app),
-                            dapp,
-                        )
-                        sys.exit(1)
-                    if dname not in tree[dapp]:
-                        print "Migration %s in app %s depends on nonexistent migration %s in app %s." % (
-                            name,
-                            get_app_name(app),
-                            dname,
-                            get_app_name(dapp),
-                        )
-                        sys.exit(1)
-                    cls.needs.append((dapp, dname))
-                    if not hasattr(tree[dapp][dname], "needed_by"):
-                        tree[dapp][dname].needed_by = []
-                    tree[dapp][dname].needed_by.append((app, name))
-    
-    # Sanity check whole tree
-    for app, classes in tree.items():
-        for name, cls in classes.items():
-            cls.dependencies = dependencies(tree, app, name)
-    
-    return tree
-
-
-def nice_trace(trace):
-    return " -> ".join([str((get_app_name(a), n)) for a, n in trace])
-
-
-def dependencies(tree, app, name, trace=[]):
-    # Copy trace to stop pass-by-ref problems
-    trace = trace[:]
-    # Sanity check
-    for papp, pname in trace:
-        if app == papp:
-            if pname == name:
-                print "Found circular dependency: %s" % nice_trace(trace + [(app,name)])
-                sys.exit(1)
-            else:
-                # See if they depend in the same app the wrong way
-                migrations = get_migration_names(app)
-                if migrations.index(name) > migrations.index(pname):
-                    print "Found a lower migration (%s) depending on a higher migration (%s) in the same app (%s)." % (pname, name, get_app_name(app))
-                    print "Path: %s" % nice_trace(trace + [(app,name)])
-                    sys.exit(1)
-    # Get the dependencies of a migration
-    deps = []
-    migration = tree[app][name]
-    for dapp, dname in migration.needs:
-        deps.extend(
-            dependencies(tree, dapp, dname, trace+[(app,name)])
-        )
-    return deps
-
-
-def remove_duplicates(l):
-    m = []
-    for x in l:
-        if x not in m:
-            m.append(x)
-    return m
-
-
-def needed_before_forwards(tree, app, name, sameapp=True):
-    """
-    Returns a list of migrations that must be applied before (app, name),
-    in the order they should be applied.
-    Used to make sure a migration can be applied (and to help apply up to it).
-    """
-    app_migrations = get_migration_names(app)
-    needed = []
-    if sameapp:
-        for aname in app_migrations[:app_migrations.index(name)]:
-            needed += needed_before_forwards(tree, app, aname, False)
-            needed += [(app, aname)]
-    for dapp, dname in tree[app][name].needs:
-        needed += needed_before_forwards(tree, dapp, dname)
-        needed += [(dapp, dname)]
-    return remove_duplicates(needed)
-
-
-def needed_before_backwards(tree, app, name, sameapp=True):
-    """
-    Returns a list of migrations that must be unapplied before (app, name) is,
-    in the order they should be unapplied.
-    Used to make sure a migration can be unapplied (and to help unapply up to it).
-    """
-    app_migrations = get_migration_names(app)
-    needed = []
-    if sameapp:
-        for aname in reversed(app_migrations[app_migrations.index(name)+1:]):
-            needed += needed_before_backwards(tree, app, aname, False)
-            needed += [(app, aname)]
-    for dapp, dname in tree[app][name].needed_by:
-        needed += needed_before_backwards(tree, dapp, dname)
-        needed += [(dapp, dname)]
-    return remove_duplicates(needed)
-
-
-def run_migrations(toprint, torun, recorder, app, migrations, fake=False, db_dry_run=False, silent=False):
-    """
-    Runs the specified migrations forwards, in order.
-    """
-    for migration in migrations:
-        app_name = get_app_name(app)
-        if not silent:
-            print toprint % (app_name, migration)
-        klass = get_migration(app, migration)
-
-        if fake:
-            if not silent:
-                print "   (faked)"
-        else:
-            
-            # If the database doesn't support running DDL inside a transaction
-            # *cough*MySQL*cough* then do a dry run first.
-            if not db.has_ddl_transactions:
-                db.dry_run = True
-                db.debug, old_debug = False, db.debug
-                try:
-                    getattr(klass(), torun)()
-                except:
-                    traceback.print_exc()
-                    print " ! Error found during dry run of migration! Aborting."
-                    return False
-                db.debug = old_debug
-                db.clear_run_data()
-            
-            db.dry_run = bool(db_dry_run)
-            
-            if db.has_ddl_transactions:
-                db.start_transaction()
-            try:
-                getattr(klass(), torun)()
-                db.execute_deferred_sql()
-            except:
-                if db.has_ddl_transactions:
-                    db.rollback_transaction()
-                    raise
-                else:
-                    traceback.print_exc()
-                    print " ! Error found during real run of migration! Aborting."
-                    print
-                    print " ! Since you have a database that does not support running"
-                    print " ! schema-altering statements in transactions, we have had to"
-                    print " ! leave it in an interim state between migrations."
-                    if torun == "forwards":
-                        print
-                        print " ! You *might* be able to recover with:"
-                        db.debug = db.dry_run = True
-                        klass().backwards()
-                    print
-                    print " ! The South developers regret this has happened, and would"
-                    print " ! like to gently persuade you to consider a slightly"
-                    print " ! easier-to-deal-with DBMS."
-                    return False
-            else:
-                if db.has_ddl_transactions:
-                    db.commit_transaction()
-
-        if not db_dry_run:
-            # Record us as having done this
-            recorder(app_name, migration)
-
-
-def run_forwards(app, migrations, fake=False, db_dry_run=False, silent=False):
-    """
-    Runs the specified migrations forwards, in order.
-    """
-    
-    def record(app_name, migration):
-        # Record us as having done this
-        record = MigrationHistory.for_migration(app_name, migration)
-        record.applied = datetime.datetime.utcnow()
-        record.save()
-    
-    return run_migrations(
-        toprint = " > %s: %s",
-        torun = "forwards",
-        recorder = record,
-        app = app,
-        migrations = migrations,
-        fake = fake,
-        db_dry_run = db_dry_run,
-        silent = silent,
-    )
-
-
-def run_backwards(app, migrations, ignore=[], fake=False, db_dry_run=False, silent=False):
-    """
-    Runs the specified migrations backwards, in order, skipping those
-    migrations in 'ignore'.
-    """
-    
-    def record(app_name, migration):
-        # Record us as having not done this
-        record = MigrationHistory.for_migration(app_name, migration)
-        record.delete()
-    
-    return run_migrations(
-        toprint = " < %s: %s",
-        torun = "backwards",
-        recorder = record,
-        app = app,
-        migrations = [x for x in migrations if x not in ignore],
-        fake = fake,
-        db_dry_run = db_dry_run,
-        silent = silent,
-    )
-
-
-def right_side_of(x, y):
-    return left_side_of(reversed(x), reversed(y))
-
-
-def left_side_of(x, y):
-    return list(y)[:len(x)] == list(x)
-
-
-def forwards_problems(tree, forwards, done, silent=False):
-    problems = []
-    for app, name in forwards:
-        if (app, name) not in done:
-            for dapp, dname in needed_before_backwards(tree, app, name):
-                if (dapp, dname) in done:
-                    if not silent:
-                        print " ! Migration (%s, %s) should not have been applied before (%s, %s) but was." % (get_app_name(dapp), dname, get_app_name(app), name)
-                    problems.append(((app, name), (dapp, dname)))
-    return problems
-
-
-
-def backwards_problems(tree, backwards, done, silent=False):
-    problems = []
-    for app, name in backwards:
-        if (app, name) in done:
-            for dapp, dname in needed_before_forwards(tree, app, name):
-                if (dapp, dname) not in done:
-                    if not silent:
-                        print " ! Migration (%s, %s) should have been applied before (%s, %s) but wasn't." % (get_app_name(dapp), dname, get_app_name(app), name)
-                    problems.append(((app, name), (dapp, dname)))
-    return problems
-
-
-def migrate_app(app, target_name=None, resolve_mode=None, fake=False, db_dry_run=False, yes=False, silent=False, load_inital_data=False):
-    
-    app_name = get_app_name(app)
-    
-    db.debug = not silent
-    
-    # If any of their app names in the DB contain a ., they're 0.2 or below, so migrate em
-    longuns = MigrationHistory.objects.filter(app_name__contains=".")
-    if longuns:
-        for mh in longuns:
-            mh.app_name = short_from_long(mh.app_name)
-            mh.save()
-        if not silent:
-            print "- Updated your South 0.2 database."
-    
-    # Find out what delightful migrations we have
-    tree = dependency_tree()
-    migrations = get_migration_names(app)
-    
-    # If there aren't any, quit quizically
-    if not migrations:
-        if not silent:
-            print "? You have no migrations for the '%s' app. You might want some." % app_name
-        return
-    
-    if target_name not in migrations and target_name not in ["zero", None]:
-        matches = [x for x in migrations if x.startswith(target_name)]
-        if len(matches) == 1:
-            target = migrations.index(matches[0]) + 1
-            if not silent:
-                print " - Soft matched migration %s to %s." % (
-                    target_name,
-                    matches[0]
-                )
-            target_name = matches[0]
-        elif len(matches) > 1:
-            if not silent:
-                print " - Prefix %s matches more than one migration:" % target_name
-                print "     " + "\n     ".join(matches)
-            return
-        else:
-            if not silent:
-                print " ! '%s' is not a migration." % target_name
-            return
-    
-    # Check there's no strange ones in the database
-    ghost_migrations = []
-    for m in MigrationHistory.objects.filter(applied__isnull = False):
-        try:
-            if get_app(m.app_name) not in tree or m.migration not in tree[get_app(m.app_name)]:
-                ghost_migrations.append(m)
-        except ImproperlyConfigured:
-            pass
-            
-        
-    if ghost_migrations:
-        if not silent:
-            print " ! These migrations are in the database but not on disk:"
-            print "   - " + "\n   - ".join(["%s: %s" % (x.app_name, x.migration) for x in ghost_migrations])
-            print " ! I'm not trusting myself; fix this yourself by fiddling"
-            print " ! with the south_migrationhistory table."
-        return
-    
-    # Say what we're doing
-    if not silent:
-        print "Running migrations for %s:" % app_name
-    
-    # Get the forwards and reverse dependencies for this target
-    if target_name == None:
-        target_name = migrations[-1]
-    if target_name == "zero":
-        forwards = []
-        backwards = needed_before_backwards(tree, app, migrations[0]) + [(app, migrations[0])]
-    else:
-        forwards = needed_before_forwards(tree, app, target_name) + [(app, target_name)]
-        # When migrating backwards we want to remove up to and including
-        # the next migration up in this app (not the next one, that includes other apps)
-        try:
-            migration_before_here = migrations[migrations.index(target_name)+1]
-            backwards = needed_before_backwards(tree, app, migration_before_here) + [(app, migration_before_here)]
-        except IndexError:
-            backwards = []
-    
-    # Get the list of currently applied migrations from the db
-    current_migrations = []
-    for m in MigrationHistory.objects.filter(applied__isnull = False):
-        try:
-            current_migrations.append((get_app(m.app_name), m.migration))
-        except ImproperlyConfigured:
-            pass
-    
-    direction = None
-    bad = False
-    
-    # Work out the direction
-    applied_for_this_app = list(MigrationHistory.objects.filter(app_name=app_name, applied__isnull=False).order_by("migration"))
-    if target_name == "zero":
-        direction = -1
-    elif not applied_for_this_app:
-        direction = 1
-    elif migrations.index(target_name) > migrations.index(applied_for_this_app[-1].migration):
-        direction = 1
-    elif migrations.index(target_name) < migrations.index(applied_for_this_app[-1].migration):
-        direction = -1
-    else:
-        direction = None
-    
-    # Is the whole forward branch applied?
-    missing = [step for step in forwards if step not in current_migrations]
-    # If they're all applied, we only know it's not backwards
-    if not missing:
-        direction = None
-    # If the remaining migrations are strictly a right segment of the forwards
-    # trace, we just need to go forwards to our target (and check for badness)
-    else:
-        problems = forwards_problems(tree, forwards, current_migrations, silent=silent)
-        if problems:
-            bad = True
-        direction = 1
-    
-    # What about the whole backward trace then?
-    if not bad:
-        missing = [step for step in backwards if step not in current_migrations]
-        # If they're all missing, stick with the forwards decision
-        if missing == backwards:
-            pass
-        # If what's missing is a strict left segment of backwards (i.e.
-        # all the higher migrations) then we need to go backwards
-        else:
-            problems = backwards_problems(tree, backwards, current_migrations, silent=silent)
-            if problems:
-                bad = True
-            direction = -1
-    
-    if bad and resolve_mode not in ['merge']:
-        if not silent:
-            print " ! Inconsistent migration history"
-            print " ! The following options are available:"
-            print "    --merge: will just attempt the migration ignoring any potential dependency conflicts."
-        sys.exit(1)
-    
-    if direction == 1:
-        if not silent:
-            print " - Migrating forwards to %s." % target_name
-        try:
-            for mapp, mname in forwards:
-                if (mapp, mname) not in current_migrations:
-                    result = run_forwards(mapp, [mname], fake=fake, db_dry_run=db_dry_run, silent=silent)
-                    if result is False: # The migrations errored, but nicely.
-                        return
-        finally:
-            # Call any pending post_syncdb signals
-            db.send_pending_create_signals()
-        # Now load initial data, only if we're really doing things and ended up at current
-        if not fake and not db_dry_run and load_inital_data and target_name == migrations[-1]:
-            print " - Loading initial data for %s." % app_name
-            # Override Django's get_apps call temporarily to only load from the
-            # current app
-            old_get_apps, models.get_apps = (
-                models.get_apps,
-                lambda: [models.get_app(get_app_name(app))],
-            )
-            # Load the initial fixture
-            call_command('loaddata', 'initial_data', verbosity=1)
-            # Un-override
-            models.get_apps = old_get_apps
-    elif direction == -1:
-        if not silent:
-            print " - Migrating backwards to just after %s." % target_name
-        for mapp, mname in backwards:
-            if (mapp, mname) in current_migrations:
-                run_backwards(mapp, [mname], fake=fake, db_dry_run=db_dry_run, silent=silent)
-    else:
-        if not silent:
-            print "- Nothing to migrate."
diff --git a/apps/south/models.py b/apps/south/models.py
deleted file mode 100644 (file)
index e95c79a..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-from django.db import models
-
-class MigrationHistory(models.Model):
-    app_name = models.CharField(max_length=255)
-    migration = models.CharField(max_length=255)
-    applied = models.DateTimeField(blank=True, null=True)
-
-    @classmethod
-    def for_migration(cls, app_name, migration):
-        try:
-            return cls.objects.get(
-                app_name = app_name,
-                migration = migration,
-            )
-        except cls.DoesNotExist:
-            return cls(
-                app_name = app_name,
-                migration = migration,
-            )
\ No newline at end of file
diff --git a/apps/south/setup.py b/apps/south/setup.py
deleted file mode 100755 (executable)
index 9e09583..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/python
-
-from setuptools import setup, find_packages
-
-setup(
-    name='South',
-    version='0.4',
-    description='South: Migrations for Django',
-    long_description='South is an intelligent database migrations library for the Django web framework. It is database-independent and DVCS-friendly, as well as a whole host of other features.',
-    author='Andrew Godwin & Andy McCurdy',
-    author_email='south@aeracode.org',
-    url='http://south.aeracode.org/',
-    download_url='http://south.aeracode.org/wiki/Download',
-    classifiers=[
-        "Development Status :: 5 - Production/Stable",
-        "Framework :: Django",
-        "Intended Audience :: Developers",
-        "Intended Audience :: System Administrators",
-        "Intended Audience :: System Administrators",
-        "License :: OSI Approved :: Apache Software License",
-        "Operating System :: OS Independent",
-        "Topic :: Software Development"
-    ],
-    packages=["south", "south.db", "south.management", "south.management.commands", "south.tests", "south.tests.fakeapp", "south.tests.fakeapp.migrations"],
-    package_dir = {"south" : ""},
-)
diff --git a/apps/south/tests/__init__.py b/apps/south/tests/__init__.py
deleted file mode 100644 (file)
index d8953fe..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-
-from django.conf import settings
-
-try:
-    skiptest = settings.SKIP_SOUTH_TESTS
-except:
-    skiptest = False
-
-if not skiptest:
-    from south.tests.db import *
-    from south.tests.logic import *
\ No newline at end of file
diff --git a/apps/south/tests/db.py b/apps/south/tests/db.py
deleted file mode 100644 (file)
index b7bb145..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-import unittest
-
-from south.db import db
-from django.db import connection, models
-
-# Create a list of error classes from the various database libraries
-errors = []
-try:
-    from psycopg2 import ProgrammingError
-    errors.append(ProgrammingError)
-except ImportError:
-    pass
-errors = tuple(errors)
-
-class TestOperations(unittest.TestCase):
-
-    """
-    Tests if the various DB abstraction calls work.
-    Can only test a limited amount due to DB differences.
-    """
-
-    def setUp(self):
-        db.debug = False
-        db.clear_deferred_sql()
-
-    def test_create(self):
-        """
-        Test creation and deletion of tables.
-        """
-        cursor = connection.cursor()
-        # It needs to take at least 2 args
-        self.assertRaises(TypeError, db.create_table)
-        self.assertRaises(TypeError, db.create_table, "test1")
-        # Empty tables (i.e. no columns) are not fine, so make at least 1
-        db.create_table("test1", [('email_confirmed', models.BooleanField(default=False))])
-        db.start_transaction()
-        # And should exist
-        cursor.execute("SELECT * FROM test1")
-        # Make sure we can't do the same query on an empty table
-        try:
-            cursor.execute("SELECT * FROM nottheretest1")
-            self.fail("Non-existent table could be selected!")
-        except:
-            pass
-        # Clear the dirty transaction
-        db.rollback_transaction()
-        db.start_transaction()
-        # Remove the table
-        db.drop_table("test1")
-        # Make sure it went
-        try:
-            cursor.execute("SELECT * FROM test1")
-            self.fail("Just-deleted table could be selected!")
-        except:
-            pass
-        # Clear the dirty transaction
-        db.rollback_transaction()
-        db.start_transaction()
-        # Try deleting a nonexistent one
-        try:
-            db.delete_table("nottheretest1")
-            self.fail("Non-existent table could be deleted!")
-        except:
-            pass
-        db.rollback_transaction()
-    
-    def test_foreign_keys(self):
-        """
-        Tests foreign key creation, especially uppercase (see #61)
-        """
-        Test = db.mock_model(model_name='Test', db_table='test5a',
-                             db_tablespace='', pk_field_name='ID',
-                             pk_field_type=models.AutoField, pk_field_args=[])
-        cursor = connection.cursor()
-        db.start_transaction()
-        db.create_table("test5a", [('ID', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True))])
-        db.create_table("test5b", [
-            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
-            ('UNIQUE', models.ForeignKey(Test)),
-        ])
-        db.execute_deferred_sql()
-        db.rollback_transaction()
-    
-    def test_rename(self):
-        """
-        Test column renaming
-        """
-        cursor = connection.cursor()
-        db.create_table("test2", [('spam', models.BooleanField(default=False))])
-        db.start_transaction()
-        # Make sure we can select the column
-        cursor.execute("SELECT spam FROM test2")
-        # Rename it
-        db.rename_column("test2", "spam", "eggs")
-        cursor.execute("SELECT eggs FROM test2")
-        try:
-            cursor.execute("SELECT spam FROM test2")
-            self.fail("Just-renamed column could be selected!")
-        except:
-            pass
-        db.rollback_transaction()
-        db.delete_table("test2")
-    
-    def test_dry_rename(self):
-        """
-        Test column renaming while --dry-run is turned on (should do nothing)
-        See ticket #65
-        """
-        cursor = connection.cursor()
-        db.create_table("test2", [('spam', models.BooleanField(default=False))])
-        db.start_transaction()
-        # Make sure we can select the column
-        cursor.execute("SELECT spam FROM test2")
-        # Rename it
-        db.dry_run = True
-        db.rename_column("test2", "spam", "eggs")
-        db.dry_run = False
-        cursor.execute("SELECT spam FROM test2")
-        try:
-            cursor.execute("SELECT eggs FROM test2")
-            self.fail("Dry-renamed new column could be selected!")
-        except:
-            pass
-        db.rollback_transaction()
-        db.delete_table("test2")
-    
-    def test_table_rename(self):
-        """
-        Test column renaming
-        """
-        cursor = connection.cursor()
-        db.create_table("testtr", [('spam', models.BooleanField(default=False))])
-        db.start_transaction()
-        # Make sure we can select the column
-        cursor.execute("SELECT spam FROM testtr")
-        # Rename it
-        db.rename_table("testtr", "testtr2")
-        cursor.execute("SELECT spam FROM testtr2")
-        try:
-            cursor.execute("SELECT spam FROM testtr")
-            self.fail("Just-renamed column could be selected!")
-        except:
-            pass
-        db.rollback_transaction()
-        db.delete_table("testtr2")
-    
-    def test_index(self):
-        """
-        Test the index operations
-        """
-        db.create_table("test3", [
-            ('SELECT', models.BooleanField(default=False)),
-            ('eggs', models.IntegerField(unique=True)),
-        ])
-        db.execute_deferred_sql()
-        db.start_transaction()
-        # Add an index on that column
-        db.create_index("test3", ["SELECT"])
-        # Add another index on two columns
-        db.create_index("test3", ["SELECT", "eggs"])
-        # Delete them both
-        db.delete_index("test3", ["SELECT"])
-        db.delete_index("test3", ["SELECT", "eggs"])
-        # Delete the unique index
-        db.delete_index("test3", ["eggs"])
-        db.rollback_transaction()
-        db.delete_table("test3")
-    
-    def test_alter(self):
-        """
-        Test altering columns/tables
-        """
-        db.create_table("test4", [
-            ('spam', models.BooleanField(default=False)),
-            ('eggs', models.IntegerField()),
-        ])
-        db.start_transaction()
-        # Add a column
-        db.add_column("test4", "add1", models.IntegerField(default=3), keep_default=False)
-        # Add a FK with keep_default=False (#69)
-        User = db.mock_model(model_name='User', db_table='auth_user', db_tablespace='', pk_field_name='id', pk_field_type=models.AutoField, pk_field_args=[], pk_field_kwargs={})
-        db.add_column("test4", "user", models.ForeignKey(User), keep_default=False)
-        
-        db.rollback_transaction()
-        db.delete_table("test4")
\ No newline at end of file
diff --git a/apps/south/tests/fakeapp/migrations/0001_spam.py b/apps/south/tests/fakeapp/migrations/0001_spam.py
deleted file mode 100644 (file)
index d814548..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-from south.db import db
-from django.db import models
-
-class Migration:
-    
-    def forwards(self):
-        
-        # Model 'Spam'
-        db.create_table("southtest_spam", (
-            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
-            ('weight', models.FloatField()),
-            ('expires', models.DateTimeField()),
-            ('name', models.CharField(max_length=255))
-        ))
-    
-    def backwards(self):
-        
-        db.delete_table("southtest_spam")
-
diff --git a/apps/south/tests/fakeapp/migrations/0002_eggs.py b/apps/south/tests/fakeapp/migrations/0002_eggs.py
deleted file mode 100644 (file)
index 3ec8399..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-from south.db import db
-from django.db import models
-
-class Migration:
-    
-    def forwards(self):
-        
-        Spam = db.mock_model(model_name='Spam', db_table='southtest_spam', db_tablespace='', pk_field_name='id', pk_field_type=models.AutoField)
-        
-        db.create_table("southtest_eggs", (
-            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
-            ('size', models.FloatField()),
-            ('quantity', models.IntegerField()),
-            ('spam', models.ForeignKey(Spam)),
-        ))
-    
-    def backwards(self):
-        
-        db.delete_table("southtest_eggs")
-
diff --git a/apps/south/tests/fakeapp/migrations/0003_alter_spam.py b/apps/south/tests/fakeapp/migrations/0003_alter_spam.py
deleted file mode 100644 (file)
index 3a9aea4..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-from south.db import db
-from django.db import models
-
-class Migration:
-    
-    def forwards(self):
-        
-        db.alter_column("southtest_spam", 'name', models.CharField(max_length=255, null=True))
-    
-    def backwards(self):
-        
-        db.alter_column("southtest_spam", 'name', models.CharField(max_length=255))
diff --git a/apps/south/tests/logic.py b/apps/south/tests/logic.py
deleted file mode 100644 (file)
index 862c52d..0000000
+++ /dev/null
@@ -1,292 +0,0 @@
-import unittest
-import datetime
-import sys
-import os
-
-from south import migration
-
-# Add the tests directory so fakeapp is on sys.path
-test_root = os.path.dirname(__file__)
-sys.path.append(test_root)
-
-
-class TestMigrationLogic(unittest.TestCase):
-
-    """
-    Tests if the various logic functions in migration actually work.
-    """
-
-    def create_fake_app(self, name):
-        
-        class Fake:
-            pass
-        
-        fake = Fake()
-        fake.__name__ = name
-        return fake
-
-
-    def create_test_app(self):
-        
-        class Fake:
-            pass
-        
-        fake = Fake()
-        fake.__name__ = "fakeapp.migrations"
-        fake.__file__ = os.path.join(test_root, "fakeapp", "migrations", "__init__.py")
-        return fake
-    
-    
-    def monkeypatch(self):
-        """Swaps out various Django calls for fake ones for our own nefarious purposes."""
-        
-        def new_get_apps():
-            return ['fakeapp']
-        
-        from django.db import models
-        from django.conf import settings
-        models.get_apps_old, models.get_apps = models.get_apps, new_get_apps
-        settings.INSTALLED_APPS, settings.OLD_INSTALLED_APPS = (
-            ["fakeapp"],
-            settings.INSTALLED_APPS,
-        )
-        self.redo_app_cache()
-    setUp = monkeypatch
-    
-    
-    def unmonkeypatch(self):
-        """Undoes what monkeypatch did."""
-        
-        from django.db import models
-        from django.conf import settings
-        models.get_apps = models.get_apps_old
-        settings.INSTALLED_APPS = settings.OLD_INSTALLED_APPS
-        self.redo_app_cache()
-    tearDown = unmonkeypatch
-    
-    
-    def redo_app_cache(self):
-        from django.db.models.loading import AppCache
-        a = AppCache()
-        a.loaded = False
-        a._populate()
-    
-
-    def test_get_app_name(self):
-        self.assertEqual(
-            "southtest",
-            migration.get_app_name(self.create_fake_app("southtest.migrations")),
-        )
-        self.assertEqual(
-            "baz",
-            migration.get_app_name(self.create_fake_app("foo.bar.baz.migrations")),
-        )
-    
-    
-    def test_get_migrated_apps(self):
-        
-        P1 = __import__("fakeapp.migrations", {}, {}, [''])
-        
-        self.assertEqual(
-            [P1],
-            list(migration.get_migrated_apps()),
-        )
-    
-    
-    def test_get_app(self):
-        
-        P1 = __import__("fakeapp.migrations", {}, {}, [''])
-        
-        self.assertEqual(P1, migration.get_app("fakeapp"))
-        self.assertEqual(P1, migration.get_app(self.create_fake_app("fakeapp.models")))
-    
-    
-    def test_get_app_fullname(self):
-        self.assertEqual(
-            "southtest",
-            migration.get_app_fullname(self.create_fake_app("southtest.migrations")),
-        )
-        self.assertEqual(
-            "foo.bar.baz",
-            migration.get_app_fullname(self.create_fake_app("foo.bar.baz.migrations")),
-        )
-    
-    
-    def test_get_migration_names(self):
-        
-        app = self.create_test_app()
-        
-        self.assertEqual(
-            ["0001_spam", "0002_eggs", "0003_alter_spam"],
-            migration.get_migration_names(app),
-        )
-    
-    
-    def test_get_migration_classes(self):
-        
-        app = self.create_test_app()
-        
-        # Can't use vanilla import, modules beginning with numbers aren't in grammar
-        M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
-        M2 = __import__("fakeapp.migrations.0002_eggs", {}, {}, ['Migration']).Migration
-        M3 = __import__("fakeapp.migrations.0003_alter_spam", {}, {}, ['Migration']).Migration
-        
-        self.assertEqual(
-            [M1, M2, M3],
-            list(migration.get_migration_classes(app)),
-        )
-    
-    
-    def test_get_migration(self):
-        
-        app = self.create_test_app()
-        
-        # Can't use vanilla import, modules beginning with numbers aren't in grammar
-        M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
-        M2 = __import__("fakeapp.migrations.0002_eggs", {}, {}, ['Migration']).Migration
-        
-        self.assertEqual(M1, migration.get_migration(app, "0001_spam"))
-        self.assertEqual(M2, migration.get_migration(app, "0002_eggs"))
-        
-        self.assertRaises((ImportError, ValueError), migration.get_migration, app, "0001_jam")
-    
-    
-    def test_all_migrations(self):
-        
-        app = migration.get_app("fakeapp")
-        
-        self.assertEqual(
-            {app: {
-                "0001_spam": migration.get_migration(app, "0001_spam"),
-                "0002_eggs": migration.get_migration(app, "0002_eggs"),
-                "0003_alter_spam": migration.get_migration(app, "0003_alter_spam"),
-            }},
-            migration.all_migrations(),
-        )
-    
-    
-    def assertListEqual(self, list1, list2):
-        list1 = list(list1)
-        list2 = list(list2)
-        list1.sort()
-        list2.sort()
-        return self.assertEqual(list1, list2)
-    
-    
-    def test_apply_migrations(self):
-        
-        app = migration.get_app("fakeapp")
-        
-        # We should start with no migrations
-        self.assertEqual(list(migration.MigrationHistory.objects.all()), [])
-        
-        # Apply them normally
-        migration.migrate_app(app, target_name=None, resolve_mode=None, fake=False, silent=True)
-        
-        # We should finish with all migrations
-        self.assertListEqual(
-            (
-                (u"fakeapp", u"0001_spam"),
-                (u"fakeapp", u"0002_eggs"),
-                (u"fakeapp", u"0003_alter_spam"),
-            ),
-            migration.MigrationHistory.objects.values_list("app_name", "migration"),
-        )
-        
-        # Now roll them backwards
-        migration.migrate_app(app, target_name="zero", resolve_mode=None, fake=False, silent=True)
-        
-        # Finish with none
-        self.assertEqual(list(migration.MigrationHistory.objects.all()), [])
-    
-    
-    def test_migration_merge_forwards(self):
-        
-        app = migration.get_app("fakeapp")
-        
-        # We should start with no migrations
-        self.assertEqual(list(migration.MigrationHistory.objects.all()), [])
-        
-        # Insert one in the wrong order
-        migration.MigrationHistory.objects.create(
-            app_name = "fakeapp",
-            migration = "0002_eggs",
-            applied = datetime.datetime.now(),
-        )
-        
-        # Did it go in?
-        self.assertListEqual(
-            (
-                (u"fakeapp", u"0002_eggs"),
-            ),
-            migration.MigrationHistory.objects.values_list("app_name", "migration"),
-        )
-        
-        # Apply them normally
-        try:
-            migration.migrate_app(app, target_name=None, resolve_mode=None, fake=False, silent=True)
-        except SystemExit:
-            pass
-        
-        # Nothing should have changed (no merge mode!)
-        self.assertListEqual(
-            (
-                (u"fakeapp", u"0002_eggs"),
-            ),
-            migration.MigrationHistory.objects.values_list("app_name", "migration"),
-        )
-        
-        # Apply with merge
-        migration.migrate_app(app, target_name=None, resolve_mode="merge", fake=False, silent=True)
-        
-        # We should finish with all migrations
-        self.assertListEqual(
-            (
-                (u"fakeapp", u"0001_spam"),
-                (u"fakeapp", u"0002_eggs"),
-                (u"fakeapp", u"0003_alter_spam"),
-            ),
-            migration.MigrationHistory.objects.values_list("app_name", "migration"),
-        )
-        
-        # Now roll them backwards
-        migration.migrate_app(app, target_name="0002", resolve_mode=None, fake=False, silent=True)
-        migration.migrate_app(app, target_name="0001", resolve_mode=None, fake=True, silent=True)
-        migration.migrate_app(app, target_name="zero", resolve_mode=None, fake=False, silent=True)
-        
-        # Finish with none
-        self.assertEqual(list(migration.MigrationHistory.objects.all()), [])
-    
-    def test_alter_column_null(self):
-        def null_ok():
-            from django.db import connection, transaction
-            # the DBAPI introspection module fails on postgres NULLs.
-            cursor = connection.cursor()
-            try:
-                cursor.execute("INSERT INTO southtest_spam (id, weight, expires, name) VALUES (100, 10.1, now(), NULL);")
-            except:
-                transaction.rollback()
-                return False
-            else:
-                cursor.execute("DELETE FROM southtest_spam")
-                transaction.commit()
-                return True
-        
-        app = migration.get_app("fakeapp")
-        self.assertEqual(list(migration.MigrationHistory.objects.all()), [])
-        
-        # by default name is NOT NULL
-        migration.migrate_app(app, target_name="0002", resolve_mode=None, fake=False, silent=True)
-        self.failIf(null_ok())
-        
-        # after 0003, it should be NULL
-        migration.migrate_app(app, target_name="0003", resolve_mode=None, fake=False, silent=True)
-        self.assert_(null_ok())
-
-        # make sure it is NOT NULL again
-        migration.migrate_app(app, target_name="0002", resolve_mode=None, fake=False, silent=True)
-        self.failIf(null_ok(), 'name not null after migration')
-        
-        # finish with no migrations, otherwise other tests fail...
-        migration.migrate_app(app, target_name="zero", resolve_mode=None, fake=False, silent=True)
-        self.assertEqual(list(migration.MigrationHistory.objects.all()), [])
\ No newline at end of file
diff --git a/lib/feedparser.py b/lib/feedparser.py
deleted file mode 100644 (file)
index bb802df..0000000
+++ /dev/null
@@ -1,2858 +0,0 @@
-#!/usr/bin/env python
-"""Universal feed parser
-
-Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
-
-Visit http://feedparser.org/ for the latest version
-Visit http://feedparser.org/docs/ for the latest documentation
-
-Required: Python 2.1 or later
-Recommended: Python 2.3 or later
-Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
-"""
-
-__version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs"
-__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice,
-  this list of conditions and the following disclaimer.
-* Redistributions in binary form must reproduce the above copyright notice,
-  this list of conditions and the following disclaimer in the documentation
-  and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE."""
-__author__ = "Mark Pilgrim <http://diveintomark.org/>"
-__contributors__ = ["Jason Diamond <http://injektilo.org/>",
-                    "John Beimler <http://john.beimler.org/>",
-                    "Fazal Majid <http://www.majid.info/mylos/weblog/>",
-                    "Aaron Swartz <http://aaronsw.com/>",
-                    "Kevin Marks <http://epeus.blogspot.com/>"]
-_debug = 0
-
-# HTTP "User-Agent" header to send to servers when downloading feeds.
-# If you are embedding feedparser in a larger application, you should
-# change this to your application name and URL.
-USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
-
-# HTTP "Accept" header to send to servers when downloading feeds.  If you don't
-# want to send an Accept header, set this to None.
-ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
-
-# List of preferred XML parsers, by SAX driver name.  These will be tried first,
-# but if they're not installed, Python will keep searching through its own list
-# of pre-installed parsers until it finds one that supports everything we need.
-PREFERRED_XML_PARSERS = ["drv_libxml2"]
-
-# If you want feedparser to automatically run HTML markup through HTML Tidy, set
-# this to 1.  Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
-# or utidylib <http://utidylib.berlios.de/>.
-TIDY_MARKUP = 0
-
-# List of Python interfaces for HTML Tidy, in order of preference.  Only useful
-# if TIDY_MARKUP = 1
-PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
-
-# ---------- required modules (should come with any Python distribution) ----------
-import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
-try:
-    from cStringIO import StringIO as _StringIO
-except:
-    from StringIO import StringIO as _StringIO
-
-# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
-
-# gzip is included with most Python distributions, but may not be available if you compiled your own
-try:
-    import gzip
-except:
-    gzip = None
-try:
-    import zlib
-except:
-    zlib = None
-
-# If a real XML parser is available, feedparser will attempt to use it.  feedparser has
-# been tested with the built-in SAX parser, PyXML, and libxml2.  On platforms where the
-# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
-# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
-try:
-    import xml.sax
-    xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
-    from xml.sax.saxutils import escape as _xmlescape
-    _XML_AVAILABLE = 1
-except:
-    _XML_AVAILABLE = 0
-    def _xmlescape(data):
-        data = data.replace('&', '&amp;')
-        data = data.replace('>', '&gt;')
-        data = data.replace('<', '&lt;')
-        return data
-
-# base64 support for Atom feeds that contain embedded binary data
-try:
-    import base64, binascii
-except:
-    base64 = binascii = None
-
-# cjkcodecs and iconv_codec provide support for more character encodings.
-# Both are available from http://cjkpython.i18n.org/
-try:
-    import cjkcodecs.aliases
-except:
-    pass
-try:
-    import iconv_codec
-except:
-    pass
-
-# chardet library auto-detects character encodings
-# Download from http://chardet.feedparser.org/
-try:
-    import chardet
-    if _debug:
-        import chardet.constants
-        chardet.constants._debug = 1
-except:
-    chardet = None
-
-# ---------- don't touch these ----------
-class ThingsNobodyCaresAboutButMe(Exception): pass
-class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
-class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
-class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
-class UndeclaredNamespace(Exception): pass
-
-sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
-sgmllib.special = re.compile('<!')
-sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')
-
-SUPPORTED_VERSIONS = {'': 'unknown',
-                      'rss090': 'RSS 0.90',
-                      'rss091n': 'RSS 0.91 (Netscape)',
-                      'rss091u': 'RSS 0.91 (Userland)',
-                      'rss092': 'RSS 0.92',
-                      'rss093': 'RSS 0.93',
-                      'rss094': 'RSS 0.94',
-                      'rss20': 'RSS 2.0',
-                      'rss10': 'RSS 1.0',
-                      'rss': 'RSS (unknown version)',
-                      'atom01': 'Atom 0.1',
-                      'atom02': 'Atom 0.2',
-                      'atom03': 'Atom 0.3',
-                      'atom10': 'Atom 1.0',
-                      'atom': 'Atom (unknown version)',
-                      'cdf': 'CDF',
-                      'hotrss': 'Hot RSS'
-                      }
-
-try:
-    UserDict = dict
-except NameError:
-    # Python 2.1 does not have dict
-    from UserDict import UserDict
-    def dict(aList):
-        rc = {}
-        for k, v in aList:
-            rc[k] = v
-        return rc
-
-class FeedParserDict(UserDict):
-    keymap = {'channel': 'feed',
-              'items': 'entries',
-              'guid': 'id',
-              'date': 'updated',
-              'date_parsed': 'updated_parsed',
-              'description': ['subtitle', 'summary'],
-              'url': ['href'],
-              'modified': 'updated',
-              'modified_parsed': 'updated_parsed',
-              'issued': 'published',
-              'issued_parsed': 'published_parsed',
-              'copyright': 'rights',
-              'copyright_detail': 'rights_detail',
-              'tagline': 'subtitle',
-              'tagline_detail': 'subtitle_detail'}
-    def __getitem__(self, key):
-        if key == 'category':
-            return UserDict.__getitem__(self, 'tags')[0]['term']
-        if key == 'categories':
-            return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
-        realkey = self.keymap.get(key, key)
-        if type(realkey) == types.ListType:
-            for k in realkey:
-                if UserDict.has_key(self, k):
-                    return UserDict.__getitem__(self, k)
-        if UserDict.has_key(self, key):
-            return UserDict.__getitem__(self, key)
-        return UserDict.__getitem__(self, realkey)
-
-    def __setitem__(self, key, value):
-        for k in self.keymap.keys():
-            if key == k:
-                key = self.keymap[k]
-                if type(key) == types.ListType:
-                    key = key[0]
-        return UserDict.__setitem__(self, key, value)
-
-    def get(self, key, default=None):
-        if self.has_key(key):
-            return self[key]
-        else:
-            return default
-
-    def setdefault(self, key, value):
-        if not self.has_key(key):
-            self[key] = value
-        return self[key]
-        
-    def has_key(self, key):
-        try:
-            return hasattr(self, key) or UserDict.has_key(self, key)
-        except AttributeError:
-            return False
-        
-    def __getattr__(self, key):
-        try:
-            return self.__dict__[key]
-        except KeyError:
-            pass
-        try:
-            assert not key.startswith('_')
-            return self.__getitem__(key)
-        except:
-            raise AttributeError, "object has no attribute '%s'" % key
-
-    def __setattr__(self, key, value):
-        if key.startswith('_') or key == 'data':
-            self.__dict__[key] = value
-        else:
-            return self.__setitem__(key, value)
-
-    def __contains__(self, key):
-        return self.has_key(key)
-
-def zopeCompatibilityHack():
-    global FeedParserDict
-    del FeedParserDict
-    def FeedParserDict(aDict=None):
-        rc = {}
-        if aDict:
-            rc.update(aDict)
-        return rc
-
-_ebcdic_to_ascii_map = None
-def _ebcdic_to_ascii(s):
-    global _ebcdic_to_ascii_map
-    if not _ebcdic_to_ascii_map:
-        emap = (
-            0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
-            16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
-            128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
-            144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
-            32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
-            38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
-            45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
-            186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
-            195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
-            202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
-            209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
-            216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
-            123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
-            125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
-            92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
-            48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
-            )
-        import string
-        _ebcdic_to_ascii_map = string.maketrans( \
-            ''.join(map(chr, range(256))), ''.join(map(chr, emap)))
-    return s.translate(_ebcdic_to_ascii_map)
-
-_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
-def _urljoin(base, uri):
-    uri = _urifixer.sub(r'\1\3', uri)
-    return urlparse.urljoin(base, uri)
-
-class _FeedParserMixin:
-    namespaces = {'': '',
-                  'http://backend.userland.com/rss': '',
-                  'http://blogs.law.harvard.edu/tech/rss': '',
-                  'http://purl.org/rss/1.0/': '',
-                  'http://my.netscape.com/rdf/simple/0.9/': '',
-                  'http://example.com/newformat#': '',
-                  'http://example.com/necho': '',
-                  'http://purl.org/echo/': '',
-                  'uri/of/echo/namespace#': '',
-                  'http://purl.org/pie/': '',
-                  'http://purl.org/atom/ns#': '',
-                  'http://www.w3.org/2005/Atom': '',
-                  'http://purl.org/rss/1.0/modules/rss091#': '',
-                  
-                  'http://webns.net/mvcb/':                               'admin',
-                  'http://purl.org/rss/1.0/modules/aggregation/':         'ag',
-                  'http://purl.org/rss/1.0/modules/annotate/':            'annotate',
-                  'http://media.tangent.org/rss/1.0/':                    'audio',
-                  'http://backend.userland.com/blogChannelModule':        'blogChannel',
-                  'http://web.resource.org/cc/':                          'cc',
-                  'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
-                  'http://purl.org/rss/1.0/modules/company':              'co',
-                  'http://purl.org/rss/1.0/modules/content/':             'content',
-                  'http://my.theinfo.org/changed/1.0/rss/':               'cp',
-                  'http://purl.org/dc/elements/1.1/':                     'dc',
-                  'http://purl.org/dc/terms/':                            'dcterms',
-                  'http://purl.org/rss/1.0/modules/email/':               'email',
-                  'http://purl.org/rss/1.0/modules/event/':               'ev',
-                  'http://rssnamespace.org/feedburner/ext/1.0':           'feedburner',
-                  'http://freshmeat.net/rss/fm/':                         'fm',
-                  'http://xmlns.com/foaf/0.1/':                           'foaf',
-                  'http://www.w3.org/2003/01/geo/wgs84_pos#':             'geo',
-                  'http://postneo.com/icbm/':                             'icbm',
-                  'http://purl.org/rss/1.0/modules/image/':               'image',
-                  'http://www.itunes.com/DTDs/PodCast-1.0.dtd':           'itunes',
-                  'http://example.com/DTDs/PodCast-1.0.dtd':              'itunes',
-                  'http://purl.org/rss/1.0/modules/link/':                'l',
-                  'http://search.yahoo.com/mrss':                         'media',
-                  'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
-                  'http://prismstandard.org/namespaces/1.2/basic/':       'prism',
-                  'http://www.w3.org/1999/02/22-rdf-syntax-ns#':          'rdf',
-                  'http://www.w3.org/2000/01/rdf-schema#':                'rdfs',
-                  'http://purl.org/rss/1.0/modules/reference/':           'ref',
-                  'http://purl.org/rss/1.0/modules/richequiv/':           'reqv',
-                  'http://purl.org/rss/1.0/modules/search/':              'search',
-                  'http://purl.org/rss/1.0/modules/slash/':               'slash',
-                  'http://schemas.xmlsoap.org/soap/envelope/':            'soap',
-                  'http://purl.org/rss/1.0/modules/servicestatus/':       'ss',
-                  'http://hacks.benhammersley.com/rss/streaming/':        'str',
-                  'http://purl.org/rss/1.0/modules/subscription/':        'sub',
-                  'http://purl.org/rss/1.0/modules/syndication/':         'sy',
-                  'http://purl.org/rss/1.0/modules/taxonomy/':            'taxo',
-                  'http://purl.org/rss/1.0/modules/threading/':           'thr',
-                  'http://purl.org/rss/1.0/modules/textinput/':           'ti',
-                  'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
-                  'http://wellformedweb.org/commentAPI/':                 'wfw',
-                  'http://purl.org/rss/1.0/modules/wiki/':                'wiki',
-                  'http://www.w3.org/1999/xhtml':                         'xhtml',
-                  'http://www.w3.org/XML/1998/namespace':                 'xml',
-                  'http://schemas.pocketsoap.com/rss/myDescModule/':      'szf'
-}
-    _matchnamespaces = {}
-
-    can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
-    can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
-    can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
-    html_types = ['text/html', 'application/xhtml+xml']
-    
-    def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
-        if _debug: sys.stderr.write('initializing FeedParser\n')
-        if not self._matchnamespaces:
-            for k, v in self.namespaces.items():
-                self._matchnamespaces[k.lower()] = v
-        self.feeddata = FeedParserDict() # feed-level data
-        self.encoding = encoding # character encoding
-        self.entries = [] # list of entry-level data
-        self.version = '' # feed type/version, see SUPPORTED_VERSIONS
-        self.namespacesInUse = {} # dictionary of namespaces defined by the feed
-
-        # the following are used internally to track state;
-        # this is really out of control and should be refactored
-        self.infeed = 0
-        self.inentry = 0
-        self.incontent = 0
-        self.intextinput = 0
-        self.inimage = 0
-        self.inauthor = 0
-        self.incontributor = 0
-        self.inpublisher = 0
-        self.insource = 0
-        self.sourcedata = FeedParserDict()
-        self.contentparams = FeedParserDict()
-        self._summaryKey = None
-        self.namespacemap = {}
-        self.elementstack = []
-        self.basestack = []
-        self.langstack = []
-        self.baseuri = baseuri or ''
-        self.lang = baselang or None
-        if baselang:
-            self.feeddata['language'] = baselang
-
-    def unknown_starttag(self, tag, attrs):
-        if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
-        # normalize attrs
-        attrs = [(k.lower(), v) for k, v in attrs]
-        attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
-        
-        # track xml:base and xml:lang
-        attrsD = dict(attrs)
-        baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
-        self.baseuri = _urljoin(self.baseuri, baseuri)
-        lang = attrsD.get('xml:lang', attrsD.get('lang'))
-        if lang == '':
-            # xml:lang could be explicitly set to '', we need to capture that
-            lang = None
-        elif lang is None:
-            # if no xml:lang is specified, use parent lang
-            lang = self.lang
-        if lang:
-            if tag in ('feed', 'rss', 'rdf:RDF'):
-                self.feeddata['language'] = lang
-        self.lang = lang
-        self.basestack.append(self.baseuri)
-        self.langstack.append(lang)
-        
-        # track namespaces
-        for prefix, uri in attrs:
-            if prefix.startswith('xmlns:'):
-                self.trackNamespace(prefix[6:], uri)
-            elif prefix == 'xmlns':
-                self.trackNamespace(None, uri)
-
-        # track inline content
-        if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
-            # element declared itself as escaped markup, but it isn't really
-            self.contentparams['type'] = 'application/xhtml+xml'
-        if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
-            # Note: probably shouldn't simply recreate localname here, but
-            # our namespace handling isn't actually 100% correct in cases where
-            # the feed redefines the default namespace (which is actually
-            # the usual case for inline content, thanks Sam), so here we
-            # cheat and just reconstruct the element based on localname
-            # because that compensates for the bugs in our namespace handling.
-            # This will horribly munge inline content with non-empty qnames,
-            # but nobody actually does that, so I'm not fixing it.
-            tag = tag.split(':')[-1]
-            return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0)
-
-        # match namespaces
-        if tag.find(':') <> -1:
-            prefix, suffix = tag.split(':', 1)
-        else:
-            prefix, suffix = '', tag
-        prefix = self.namespacemap.get(prefix, prefix)
-        if prefix:
-            prefix = prefix + '_'
-
-        # special hack for better tracking of empty textinput/image elements in illformed feeds
-        if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
-            self.intextinput = 0
-        if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
-            self.inimage = 0
-        
-        # call special handler (if defined) or default handler
-        methodname = '_start_' + prefix + suffix
-        try:
-            method = getattr(self, methodname)
-            return method(attrsD)
-        except AttributeError:
-            return self.push(prefix + suffix, 1)
-
-    def unknown_endtag(self, tag):
-        if _debug: sys.stderr.write('end %s\n' % tag)
-        # match namespaces
-        if tag.find(':') <> -1:
-            prefix, suffix = tag.split(':', 1)
-        else:
-            prefix, suffix = '', tag
-        prefix = self.namespacemap.get(prefix, prefix)
-        if prefix:
-            prefix = prefix + '_'
-
-        # call special handler (if defined) or default handler
-        methodname = '_end_' + prefix + suffix
-        try:
-            method = getattr(self, methodname)
-            method()
-        except AttributeError:
-            self.pop(prefix + suffix)
-
-        # track inline content
-        if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
-            # element declared itself as escaped markup, but it isn't really
-            self.contentparams['type'] = 'application/xhtml+xml'
-        if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
-            tag = tag.split(':')[-1]
-            self.handle_data('</%s>' % tag, escape=0)
-
-        # track xml:base and xml:lang going out of scope
-        if self.basestack:
-            self.basestack.pop()
-            if self.basestack and self.basestack[-1]:
-                self.baseuri = self.basestack[-1]
-        if self.langstack:
-            self.langstack.pop()
-            if self.langstack: # and (self.langstack[-1] is not None):
-                self.lang = self.langstack[-1]
-
-    def handle_charref(self, ref):
-        # called for each character reference, e.g. for '&#160;', ref will be '160'
-        if not self.elementstack: return
-        ref = ref.lower()
-        if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
-            text = '&#%s;' % ref
-        else:
-            if ref[0] == 'x':
-                c = int(ref[1:], 16)
-            else:
-                c = int(ref)
-            text = unichr(c).encode('utf-8')
-        self.elementstack[-1][2].append(text)
-
-    def handle_entityref(self, ref):
-        # called for each entity reference, e.g. for '&copy;', ref will be 'copy'
-        if not self.elementstack: return
-        if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
-        if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
-            text = '&%s;' % ref
-        else:
-            # entity resolution graciously donated by Aaron Swartz
-            def name2cp(k):
-                import htmlentitydefs
-                if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3
-                    return htmlentitydefs.name2codepoint[k]
-                k = htmlentitydefs.entitydefs[k]
-                if k.startswith('&#') and k.endswith(';'):
-                    return int(k[2:-1]) # not in latin-1
-                return ord(k)
-            try: name2cp(ref)
-            except KeyError: text = '&%s;' % ref
-            else: text = unichr(name2cp(ref)).encode('utf-8')
-        self.elementstack[-1][2].append(text)
-
-    def handle_data(self, text, escape=1):
-        # called for each block of plain text, i.e. outside of any tag and
-        # not containing any character or entity references
-        if not self.elementstack: return
-        if escape and self.contentparams.get('type') == 'application/xhtml+xml':
-            text = _xmlescape(text)
-        self.elementstack[-1][2].append(text)
-
-    def handle_comment(self, text):
-        # called for each comment, e.g. <!-- insert message here -->
-        pass
-
-    def handle_pi(self, text):
-        # called for each processing instruction, e.g. <?instruction>
-        pass
-
-    def handle_decl(self, text):
-        pass
-
-    def parse_declaration(self, i):
-        # override internal declaration handler to handle CDATA blocks
-        if _debug: sys.stderr.write('entering parse_declaration\n')
-        if self.rawdata[i:i+9] == '<![CDATA[':
-            k = self.rawdata.find(']]>', i)
-            if k == -1: k = len(self.rawdata)
-            self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
-            return k+3
-        else:
-            k = self.rawdata.find('>', i)
-            return k+1
-
-    def mapContentType(self, contentType):
-        contentType = contentType.lower()
-        if contentType == 'text':
-            contentType = 'text/plain'
-        elif contentType == 'html':
-            contentType = 'text/html'
-        elif contentType == 'xhtml':
-            contentType = 'application/xhtml+xml'
-        return contentType
-    
-    def trackNamespace(self, prefix, uri):
-        loweruri = uri.lower()
-        if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
-            self.version = 'rss090'
-        if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
-            self.version = 'rss10'
-        if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
-            self.version = 'atom10'
-        if loweruri.find('backend.userland.com/rss') <> -1:
-            # match any backend.userland.com namespace
-            uri = 'http://backend.userland.com/rss'
-            loweruri = uri
-        if self._matchnamespaces.has_key(loweruri):
-            self.namespacemap[prefix] = self._matchnamespaces[loweruri]
-            self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
-        else:
-            self.namespacesInUse[prefix or ''] = uri
-
-    def resolveURI(self, uri):
-        return _urljoin(self.baseuri or '', uri)
-    
-    def decodeEntities(self, element, data):
-        return data
-
-    def push(self, element, expectingText):
-        self.elementstack.append([element, expectingText, []])
-
-    def pop(self, element, stripWhitespace=1):
-        if not self.elementstack: return
-        if self.elementstack[-1][0] != element: return
-        
-        element, expectingText, pieces = self.elementstack.pop()
-        output = ''.join(pieces)
-        if stripWhitespace:
-            output = output.strip()
-        if not expectingText: return output
-
-        # decode base64 content
-        if base64 and self.contentparams.get('base64', 0):
-            try:
-                output = base64.decodestring(output)
-            except binascii.Error:
-                pass
-            except binascii.Incomplete:
-                pass
-                
-        # resolve relative URIs
-        if (element in self.can_be_relative_uri) and output:
-            output = self.resolveURI(output)
-        
-        # decode entities within embedded markup
-        if not self.contentparams.get('base64', 0):
-            output = self.decodeEntities(element, output)
-
-        # remove temporary cruft from contentparams
-        try:
-            del self.contentparams['mode']
-        except KeyError:
-            pass
-        try:
-            del self.contentparams['base64']
-        except KeyError:
-            pass
-
-        # resolve relative URIs within embedded markup
-        if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
-            if element in self.can_contain_relative_uris:
-                output = _resolveRelativeURIs(output, self.baseuri, self.encoding)
-        
-        # sanitize embedded markup
-        if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
-            if element in self.can_contain_dangerous_markup:
-                output = _sanitizeHTML(output, self.encoding)
-
-        if self.encoding and type(output) != type(u''):
-            try:
-                output = unicode(output, self.encoding)
-            except:
-                pass
-
-        # categories/tags/keywords/whatever are handled in _end_category
-        if element == 'category':
-            return output
-        
-        # store output in appropriate place(s)
-        if self.inentry and not self.insource:
-            if element == 'content':
-                self.entries[-1].setdefault(element, [])
-                contentparams = copy.deepcopy(self.contentparams)
-                contentparams['value'] = output
-                self.entries[-1][element].append(contentparams)
-            elif element == 'link':
-                self.entries[-1][element] = output
-                if output:
-                    self.entries[-1]['links'][-1]['href'] = output
-            else:
-                if element == 'description':
-                    element = 'summary'
-                self.entries[-1][element] = output
-                if self.incontent:
-                    contentparams = copy.deepcopy(self.contentparams)
-                    contentparams['value'] = output
-                    self.entries[-1][element + '_detail'] = contentparams
-        elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage):
-            context = self._getContext()
-            if element == 'description':
-                element = 'subtitle'
-            context[element] = output
-            if element == 'link':
-                context['links'][-1]['href'] = output
-            elif self.incontent:
-                contentparams = copy.deepcopy(self.contentparams)
-                contentparams['value'] = output
-                context[element + '_detail'] = contentparams
-        return output
-
-    def pushContent(self, tag, attrsD, defaultContentType, expectingText):
-        self.incontent += 1
-        self.contentparams = FeedParserDict({
-            'type': self.mapContentType(attrsD.get('type', defaultContentType)),
-            'language': self.lang,
-            'base': self.baseuri})
-        self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
-        self.push(tag, expectingText)
-
-    def popContent(self, tag):
-        value = self.pop(tag)
-        self.incontent -= 1
-        self.contentparams.clear()
-        return value
-        
-    def _mapToStandardPrefix(self, name):
-        colonpos = name.find(':')
-        if colonpos <> -1:
-            prefix = name[:colonpos]
-            suffix = name[colonpos+1:]
-            prefix = self.namespacemap.get(prefix, prefix)
-            name = prefix + ':' + suffix
-        return name
-        
-    def _getAttribute(self, attrsD, name):
-        return attrsD.get(self._mapToStandardPrefix(name))
-
-    def _isBase64(self, attrsD, contentparams):
-        if attrsD.get('mode', '') == 'base64':
-            return 1
-        if self.contentparams['type'].startswith('text/'):
-            return 0
-        if self.contentparams['type'].endswith('+xml'):
-            return 0
-        if self.contentparams['type'].endswith('/xml'):
-            return 0
-        return 1
-
-    def _itsAnHrefDamnIt(self, attrsD):
-        href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
-        if href:
-            try:
-                del attrsD['url']
-            except KeyError:
-                pass
-            try:
-                del attrsD['uri']
-            except KeyError:
-                pass
-            attrsD['href'] = href
-        return attrsD
-    
-    def _save(self, key, value):
-        context = self._getContext()
-        context.setdefault(key, value)
-
-    def _start_rss(self, attrsD):
-        versionmap = {'0.91': 'rss091u',
-                      '0.92': 'rss092',
-                      '0.93': 'rss093',
-                      '0.94': 'rss094'}
-        if not self.version:
-            attr_version = attrsD.get('version', '')
-            version = versionmap.get(attr_version)
-            if version:
-                self.version = version
-            elif attr_version.startswith('2.'):
-                self.version = 'rss20'
-            else:
-                self.version = 'rss'
-    
-    def _start_dlhottitles(self, attrsD):
-        self.version = 'hotrss'
-
-    def _start_channel(self, attrsD):
-        self.infeed = 1
-        self._cdf_common(attrsD)
-    _start_feedinfo = _start_channel
-
-    def _cdf_common(self, attrsD):
-        if attrsD.has_key('lastmod'):
-            self._start_modified({})
-            self.elementstack[-1][-1] = attrsD['lastmod']
-            self._end_modified()
-        if attrsD.has_key('href'):
-            self._start_link({})
-            self.elementstack[-1][-1] = attrsD['href']
-            self._end_link()
-    
-    def _start_feed(self, attrsD):
-        self.infeed = 1
-        versionmap = {'0.1': 'atom01',
-                      '0.2': 'atom02',
-                      '0.3': 'atom03'}
-        if not self.version:
-            attr_version = attrsD.get('version')
-            version = versionmap.get(attr_version)
-            if version:
-                self.version = version
-            else:
-                self.version = 'atom'
-
-    def _end_channel(self):
-        self.infeed = 0
-    _end_feed = _end_channel
-    
-    def _start_image(self, attrsD):
-        self.inimage = 1
-        self.push('image', 0)
-        context = self._getContext()
-        context.setdefault('image', FeedParserDict())
-            
-    def _end_image(self):
-        self.pop('image')
-        self.inimage = 0
-
-    def _start_textinput(self, attrsD):
-        self.intextinput = 1
-        self.push('textinput', 0)
-        context = self._getContext()
-        context.setdefault('textinput', FeedParserDict())
-    _start_textInput = _start_textinput
-    
-    def _end_textinput(self):
-        self.pop('textinput')
-        self.intextinput = 0
-    _end_textInput = _end_textinput
-
-    def _start_author(self, attrsD):
-        self.inauthor = 1
-        self.push('author', 1)
-    _start_managingeditor = _start_author
-    _start_dc_author = _start_author
-    _start_dc_creator = _start_author
-    _start_itunes_author = _start_author
-
-    def _end_author(self):
-        self.pop('author')
-        self.inauthor = 0
-        self._sync_author_detail()
-    _end_managingeditor = _end_author
-    _end_dc_author = _end_author
-    _end_dc_creator = _end_author
-    _end_itunes_author = _end_author
-
-    def _start_itunes_owner(self, attrsD):
-        self.inpublisher = 1
-        self.push('publisher', 0)
-
-    def _end_itunes_owner(self):
-        self.pop('publisher')
-        self.inpublisher = 0
-        self._sync_author_detail('publisher')
-
-    def _start_contributor(self, attrsD):
-        self.incontributor = 1
-        context = self._getContext()
-        context.setdefault('contributors', [])
-        context['contributors'].append(FeedParserDict())
-        self.push('contributor', 0)
-
-    def _end_contributor(self):
-        self.pop('contributor')
-        self.incontributor = 0
-
-    def _start_dc_contributor(self, attrsD):
-        self.incontributor = 1
-        context = self._getContext()
-        context.setdefault('contributors', [])
-        context['contributors'].append(FeedParserDict())
-        self.push('name', 0)
-
-    def _end_dc_contributor(self):
-        self._end_name()
-        self.incontributor = 0
-
-    def _start_name(self, attrsD):
-        self.push('name', 0)
-    _start_itunes_name = _start_name
-
-    def _end_name(self):
-        value = self.pop('name')
-        if self.inpublisher:
-            self._save_author('name', value, 'publisher')
-        elif self.inauthor:
-            self._save_author('name', value)
-        elif self.incontributor:
-            self._save_contributor('name', value)
-        elif self.intextinput:
-            context = self._getContext()
-            context['textinput']['name'] = value
-    _end_itunes_name = _end_name
-
-    def _start_width(self, attrsD):
-        self.push('width', 0)
-
-    def _end_width(self):
-        value = self.pop('width')
-        try:
-            value = int(value)
-        except:
-            value = 0
-        if self.inimage:
-            context = self._getContext()
-            context['image']['width'] = value
-
-    def _start_height(self, attrsD):
-        self.push('height', 0)
-
-    def _end_height(self):
-        value = self.pop('height')
-        try:
-            value = int(value)
-        except:
-            value = 0
-        if self.inimage:
-            context = self._getContext()
-            context['image']['height'] = value
-
-    def _start_url(self, attrsD):
-        self.push('href', 1)
-    _start_homepage = _start_url
-    _start_uri = _start_url
-
-    def _end_url(self):
-        value = self.pop('href')
-        if self.inauthor:
-            self._save_author('href', value)
-        elif self.incontributor:
-            self._save_contributor('href', value)
-        elif self.inimage:
-            context = self._getContext()
-            context['image']['href'] = value
-        elif self.intextinput:
-            context = self._getContext()
-            context['textinput']['link'] = value
-    _end_homepage = _end_url
-    _end_uri = _end_url
-
-    def _start_email(self, attrsD):
-        self.push('email', 0)
-    _start_itunes_email = _start_email
-
-    def _end_email(self):
-        value = self.pop('email')
-        if self.inpublisher:
-            self._save_author('email', value, 'publisher')
-        elif self.inauthor:
-            self._save_author('email', value)
-        elif self.incontributor:
-            self._save_contributor('email', value)
-    _end_itunes_email = _end_email
-
-    def _getContext(self):
-        if self.insource:
-            context = self.sourcedata
-        elif self.inentry:
-            context = self.entries[-1]
-        else:
-            context = self.feeddata
-        return context
-
-    def _save_author(self, key, value, prefix='author'):
-        context = self._getContext()
-        context.setdefault(prefix + '_detail', FeedParserDict())
-        context[prefix + '_detail'][key] = value
-        self._sync_author_detail()
-
-    def _save_contributor(self, key, value):
-        context = self._getContext()
-        context.setdefault('contributors', [FeedParserDict()])
-        context['contributors'][-1][key] = value
-
-    def _sync_author_detail(self, key='author'):
-        context = self._getContext()
-        detail = context.get('%s_detail' % key)
-        if detail:
-            name = detail.get('name')
-            email = detail.get('email')
-            if name and email:
-                context[key] = '%s (%s)' % (name, email)
-            elif name:
-                context[key] = name
-            elif email:
-                context[key] = email
-        else:
-            author = context.get(key)
-            if not author: return
-            emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author)
-            if not emailmatch: return
-            email = emailmatch.group(0)
-            # probably a better way to do the following, but it passes all the tests
-            author = author.replace(email, '')
-            author = author.replace('()', '')
-            author = author.strip()
-            if author and (author[0] == '('):
-                author = author[1:]
-            if author and (author[-1] == ')'):
-                author = author[:-1]
-            author = author.strip()
-            context.setdefault('%s_detail' % key, FeedParserDict())
-            context['%s_detail' % key]['name'] = author
-            context['%s_detail' % key]['email'] = email
-
-    def _start_subtitle(self, attrsD):
-        self.pushContent('subtitle', attrsD, 'text/plain', 1)
-    _start_tagline = _start_subtitle
-    _start_itunes_subtitle = _start_subtitle
-
-    def _end_subtitle(self):
-        self.popContent('subtitle')
-    _end_tagline = _end_subtitle
-    _end_itunes_subtitle = _end_subtitle
-            
-    def _start_rights(self, attrsD):
-        self.pushContent('rights', attrsD, 'text/plain', 1)
-    _start_dc_rights = _start_rights
-    _start_copyright = _start_rights
-
-    def _end_rights(self):
-        self.popContent('rights')
-    _end_dc_rights = _end_rights
-    _end_copyright = _end_rights
-
-    def _start_item(self, attrsD):
-        self.entries.append(FeedParserDict())
-        self.push('item', 0)
-        self.inentry = 1
-        self.guidislink = 0
-        id = self._getAttribute(attrsD, 'rdf:about')
-        if id:
-            context = self._getContext()
-            context['id'] = id
-        self._cdf_common(attrsD)
-    _start_entry = _start_item
-    _start_product = _start_item
-
-    def _end_item(self):
-        self.pop('item')
-        self.inentry = 0
-    _end_entry = _end_item
-
-    def _start_dc_language(self, attrsD):
-        self.push('language', 1)
-    _start_language = _start_dc_language
-
-    def _end_dc_language(self):
-        self.lang = self.pop('language')
-    _end_language = _end_dc_language
-
-    def _start_dc_publisher(self, attrsD):
-        self.push('publisher', 1)
-    _start_webmaster = _start_dc_publisher
-
-    def _end_dc_publisher(self):
-        self.pop('publisher')
-        self._sync_author_detail('publisher')
-    _end_webmaster = _end_dc_publisher
-
-    def _start_published(self, attrsD):
-        self.push('published', 1)
-    _start_dcterms_issued = _start_published
-    _start_issued = _start_published
-
-    def _end_published(self):
-        value = self.pop('published')
-        self._save('published_parsed', _parse_date(value))
-    _end_dcterms_issued = _end_published
-    _end_issued = _end_published
-
-    def _start_updated(self, attrsD):
-        self.push('updated', 1)
-    _start_modified = _start_updated
-    _start_dcterms_modified = _start_updated
-    _start_pubdate = _start_updated
-    _start_dc_date = _start_updated
-
-    def _end_updated(self):
-        value = self.pop('updated')
-        parsed_value = _parse_date(value)
-        self._save('updated_parsed', parsed_value)
-    _end_modified = _end_updated
-    _end_dcterms_modified = _end_updated
-    _end_pubdate = _end_updated
-    _end_dc_date = _end_updated
-
-    def _start_created(self, attrsD):
-        self.push('created', 1)
-    _start_dcterms_created = _start_created
-
-    def _end_created(self):
-        value = self.pop('created')
-        self._save('created_parsed', _parse_date(value))
-    _end_dcterms_created = _end_created
-
-    def _start_expirationdate(self, attrsD):
-        self.push('expired', 1)
-
-    def _end_expirationdate(self):
-        self._save('expired_parsed', _parse_date(self.pop('expired')))
-
-    def _start_cc_license(self, attrsD):
-        self.push('license', 1)
-        value = self._getAttribute(attrsD, 'rdf:resource')
-        if value:
-            self.elementstack[-1][2].append(value)
-        self.pop('license')
-        
-    def _start_creativecommons_license(self, attrsD):
-        self.push('license', 1)
-
-    def _end_creativecommons_license(self):
-        self.pop('license')
-
-    def _addTag(self, term, scheme, label):
-        context = self._getContext()
-        tags = context.setdefault('tags', [])
-        if (not term) and (not scheme) and (not label): return
-        value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
-        if value not in tags:
-            tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label}))
-
-    def _start_category(self, attrsD):
-        if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
-        term = attrsD.get('term')
-        scheme = attrsD.get('scheme', attrsD.get('domain'))
-        label = attrsD.get('label')
-        self._addTag(term, scheme, label)
-        self.push('category', 1)
-    _start_dc_subject = _start_category
-    _start_keywords = _start_category
-        
-    def _end_itunes_keywords(self):
-        for term in self.pop('itunes_keywords').split():
-            self._addTag(term, 'http://www.itunes.com/', None)
-        
-    def _start_itunes_category(self, attrsD):
-        self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
-        self.push('category', 1)
-        
-    def _end_category(self):
-        value = self.pop('category')
-        if not value: return
-        context = self._getContext()
-        tags = context['tags']
-        if value and len(tags) and not tags[-1]['term']:
-            tags[-1]['term'] = value
-        else:
-            self._addTag(value, None, None)
-    _end_dc_subject = _end_category
-    _end_keywords = _end_category
-    _end_itunes_category = _end_category
-
-    def _start_cloud(self, attrsD):
-        self._getContext()['cloud'] = FeedParserDict(attrsD)
-        
-    def _start_link(self, attrsD):
-        attrsD.setdefault('rel', 'alternate')
-        attrsD.setdefault('type', 'text/html')
-        attrsD = self._itsAnHrefDamnIt(attrsD)
-        if attrsD.has_key('href'):
-            attrsD['href'] = self.resolveURI(attrsD['href'])
-        expectingText = self.infeed or self.inentry or self.insource
-        context = self._getContext()
-        context.setdefault('links', [])
-        context['links'].append(FeedParserDict(attrsD))
-        if attrsD['rel'] == 'enclosure':
-            self._start_enclosure(attrsD)
-        if attrsD.has_key('href'):
-            expectingText = 0
-            if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
-                context['link'] = attrsD['href']
-        else:
-            self.push('link', expectingText)
-    _start_producturl = _start_link
-
-    def _end_link(self):
-        value = self.pop('link')
-        context = self._getContext()
-        if self.intextinput:
-            context['textinput']['link'] = value
-        if self.inimage:
-            context['image']['link'] = value
-    _end_producturl = _end_link
-
-    def _start_guid(self, attrsD):
-        self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
-        self.push('id', 1)
-
-    def _end_guid(self):
-        value = self.pop('id')
-        self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
-        if self.guidislink:
-            # guid acts as link, but only if 'ispermalink' is not present or is 'true',
-            # and only if the item doesn't already have a link element
-            self._save('link', value)
-
-    def _start_title(self, attrsD):
-        self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
-    _start_dc_title = _start_title
-    _start_media_title = _start_title
-
-    def _end_title(self):
-        value = self.popContent('title')
-        context = self._getContext()
-        if self.intextinput:
-            context['textinput']['title'] = value
-        elif self.inimage:
-            context['image']['title'] = value
-    _end_dc_title = _end_title
-    _end_media_title = _end_title
-
-    def _start_description(self, attrsD):
-        context = self._getContext()
-        if context.has_key('summary'):
-            self._summaryKey = 'content'
-            self._start_content(attrsD)
-        else:
-            self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
-
-    def _start_abstract(self, attrsD):
-        self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
-
-    def _end_description(self):
-        if self._summaryKey == 'content':
-            self._end_content()
-        else:
-            value = self.popContent('description')
-            context = self._getContext()
-            if self.intextinput:
-                context['textinput']['description'] = value
-            elif self.inimage:
-                context['image']['description'] = value
-        self._summaryKey = None
-    _end_abstract = _end_description
-
-    def _start_info(self, attrsD):
-        self.pushContent('info', attrsD, 'text/plain', 1)
-    _start_feedburner_browserfriendly = _start_info
-
-    def _end_info(self):
-        self.popContent('info')
-    _end_feedburner_browserfriendly = _end_info
-
-    def _start_generator(self, attrsD):
-        if attrsD:
-            attrsD = self._itsAnHrefDamnIt(attrsD)
-            if attrsD.has_key('href'):
-                attrsD['href'] = self.resolveURI(attrsD['href'])
-        self._getContext()['generator_detail'] = FeedParserDict(attrsD)
-        self.push('generator', 1)
-
-    def _end_generator(self):
-        value = self.pop('generator')
-        context = self._getContext()
-        if context.has_key('generator_detail'):
-            context['generator_detail']['name'] = value
-            
-    def _start_admin_generatoragent(self, attrsD):
-        self.push('generator', 1)
-        value = self._getAttribute(attrsD, 'rdf:resource')
-        if value:
-            self.elementstack[-1][2].append(value)
-        self.pop('generator')
-        self._getContext()['generator_detail'] = FeedParserDict({'href': value})
-
-    def _start_admin_errorreportsto(self, attrsD):
-        self.push('errorreportsto', 1)
-        value = self._getAttribute(attrsD, 'rdf:resource')
-        if value:
-            self.elementstack[-1][2].append(value)
-        self.pop('errorreportsto')
-        
-    def _start_summary(self, attrsD):
-        context = self._getContext()
-        if context.has_key('summary'):
-            self._summaryKey = 'content'
-            self._start_content(attrsD)
-        else:
-            self._summaryKey = 'summary'
-            self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
-    _start_itunes_summary = _start_summary
-
-    def _end_summary(self):
-        if self._summaryKey == 'content':
-            self._end_content()
-        else:
-            self.popContent(self._summaryKey or 'summary')
-        self._summaryKey = None
-    _end_itunes_summary = _end_summary
-        
-    def _start_enclosure(self, attrsD):
-        attrsD = self._itsAnHrefDamnIt(attrsD)
-        self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD))
-        href = attrsD.get('href')
-        if href:
-            context = self._getContext()
-            if not context.get('id'):
-                context['id'] = href
-            
-    def _start_source(self, attrsD):
-        self.insource = 1
-
-    def _end_source(self):
-        self.insource = 0
-        self._getContext()['source'] = copy.deepcopy(self.sourcedata)
-        self.sourcedata.clear()
-
-    def _start_content(self, attrsD):
-        self.pushContent('content', attrsD, 'text/plain', 1)
-        src = attrsD.get('src')
-        if src:
-            self.contentparams['src'] = src
-        self.push('content', 1)
-
-    def _start_prodlink(self, attrsD):
-        self.pushContent('content', attrsD, 'text/html', 1)
-
-    def _start_body(self, attrsD):
-        self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
-    _start_xhtml_body = _start_body
-
-    def _start_content_encoded(self, attrsD):
-        self.pushContent('content', attrsD, 'text/html', 1)
-    _start_fullitem = _start_content_encoded
-
-    def _end_content(self):
-        copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
-        value = self.popContent('content')
-        if copyToDescription:
-            self._save('description', value)
-    _end_body = _end_content
-    _end_xhtml_body = _end_content
-    _end_content_encoded = _end_content
-    _end_fullitem = _end_content
-    _end_prodlink = _end_content
-
-    def _start_itunes_image(self, attrsD):
-        self.push('itunes_image', 0)
-        self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
-    _start_itunes_link = _start_itunes_image
-        
-    def _end_itunes_block(self):
-        value = self.pop('itunes_block', 0)
-        self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
-
-    def _end_itunes_explicit(self):
-        value = self.pop('itunes_explicit', 0)
-        self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
-
-if _XML_AVAILABLE:
-    class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
-        def __init__(self, baseuri, baselang, encoding):
-            if _debug: sys.stderr.write('trying StrictFeedParser\n')
-            xml.sax.handler.ContentHandler.__init__(self)
-            _FeedParserMixin.__init__(self, baseuri, baselang, encoding)
-            self.bozo = 0
-            self.exc = None
-        
-        def startPrefixMapping(self, prefix, uri):
-            self.trackNamespace(prefix, uri)
-        
-        def startElementNS(self, name, qname, attrs):
-            namespace, localname = name
-            lowernamespace = str(namespace or '').lower()
-            if lowernamespace.find('backend.userland.com/rss') <> -1:
-                # match any backend.userland.com namespace
-                namespace = 'http://backend.userland.com/rss'
-                lowernamespace = namespace
-            if qname and qname.find(':') > 0:
-                givenprefix = qname.split(':')[0]
-            else:
-                givenprefix = None
-            prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
-            if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
-                    raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
-            if prefix:
-                localname = prefix + ':' + localname
-            localname = str(localname).lower()
-            if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
-
-            # qname implementation is horribly broken in Python 2.1 (it
-            # doesn't report any), and slightly broken in Python 2.2 (it
-            # doesn't report the xml: namespace). So we match up namespaces
-            # with a known list first, and then possibly override them with
-            # the qnames the SAX parser gives us (if indeed it gives us any
-            # at all).  Thanks to MatejC for helping me test this and
-            # tirelessly telling me that it didn't work yet.
-            attrsD = {}
-            for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
-                lowernamespace = (namespace or '').lower()
-                prefix = self._matchnamespaces.get(lowernamespace, '')
-                if prefix:
-                    attrlocalname = prefix + ':' + attrlocalname
-                attrsD[str(attrlocalname).lower()] = attrvalue
-            for qname in attrs.getQNames():
-                attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
-            self.unknown_starttag(localname, attrsD.items())
-
-        def characters(self, text):
-            self.handle_data(text)
-
-        def endElementNS(self, name, qname):
-            namespace, localname = name
-            lowernamespace = str(namespace or '').lower()
-            if qname and qname.find(':') > 0:
-                givenprefix = qname.split(':')[0]
-            else:
-                givenprefix = ''
-            prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
-            if prefix:
-                localname = prefix + ':' + localname
-            localname = str(localname).lower()
-            self.unknown_endtag(localname)
-
-        def error(self, exc):
-            self.bozo = 1
-            self.exc = exc
-            
-        def fatalError(self, exc):
-            self.error(exc)
-            raise exc
-
-class _BaseHTMLProcessor(sgmllib.SGMLParser):
-    elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
-      'img', 'input', 'isindex', 'link', 'meta', 'param']
-    
-    def __init__(self, encoding):
-        self.encoding = encoding
-        if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
-        sgmllib.SGMLParser.__init__(self)
-        
-    def reset(self):
-        self.pieces = []
-        sgmllib.SGMLParser.reset(self)
-
-    def _shorttag_replace(self, match):
-        tag = match.group(1)
-        if tag in self.elements_no_end_tag:
-            return '<' + tag + ' />'
-        else:
-            return '<' + tag + '></' + tag + '>'
-        
-    def feed(self, data):
-        data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data)
-        #data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
-        data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data) 
-        data = data.replace('&#39;', "'")
-        data = data.replace('&#34;', '"')
-        if self.encoding and type(data) == type(u''):
-            data = data.encode(self.encoding)
-        sgmllib.SGMLParser.feed(self, data)
-
-    def normalize_attrs(self, attrs):
-        # utility method to be called by descendants
-        attrs = [(k.lower(), v) for k, v in attrs]
-        attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
-        return attrs
-
-    def unknown_starttag(self, tag, attrs):
-        # called for each start tag
-        # attrs is a list of (attr, value) tuples
-        # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
-        if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
-        uattrs = []
-        # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
-        for key, value in attrs:
-            if type(value) != type(u''):
-                value = unicode(value, self.encoding)
-            uattrs.append((unicode(key, self.encoding), value))
-        strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
-        if tag in self.elements_no_end_tag:
-            self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
-        else:
-            self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
-
-    def unknown_endtag(self, tag):
-        # called for each end tag, e.g. for </pre>, tag will be 'pre'
-        # Reconstruct the original end tag.
-        if tag not in self.elements_no_end_tag:
-            self.pieces.append("</%(tag)s>" % locals())
-
-    def handle_charref(self, ref):
-        # called for each character reference, e.g. for '&#160;', ref will be '160'
-        # Reconstruct the original character reference.
-        self.pieces.append('&#%(ref)s;' % locals())
-        
-    def handle_entityref(self, ref):
-        # called for each entity reference, e.g. for '&copy;', ref will be 'copy'
-        # Reconstruct the original entity reference.
-        self.pieces.append('&%(ref)s;' % locals())
-
-    def handle_data(self, text):
-        # called for each block of plain text, i.e. outside of any tag and
-        # not containing any character or entity references
-        # Store the original text verbatim.
-        if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
-        self.pieces.append(text)
-        
-    def handle_comment(self, text):
-        # called for each HTML comment, e.g. <!-- insert Javascript code here -->
-        # Reconstruct the original comment.
-        self.pieces.append('<!--%(text)s-->' % locals())
-        
-    def handle_pi(self, text):
-        # called for each processing instruction, e.g. <?instruction>
-        # Reconstruct original processing instruction.
-        self.pieces.append('<?%(text)s>' % locals())
-
-    def handle_decl(self, text):
-        # called for the DOCTYPE, if present, e.g.
-        # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
-        #     "http://www.w3.org/TR/html4/loose.dtd">
-        # Reconstruct original DOCTYPE
-        self.pieces.append('<!%(text)s>' % locals())
-        
-    _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
-    def _scan_name(self, i, declstartpos):
-        rawdata = self.rawdata
-        n = len(rawdata)
-        if i == n:
-            return None, -1
-        m = self._new_declname_match(rawdata, i)
-        if m:
-            s = m.group()
-            name = s.strip()
-            if (i + len(s)) == n:
-                return None, -1  # end of buffer
-            return name.lower(), m.end()
-        else:
-            self.handle_data(rawdata)
-#            self.updatepos(declstartpos, i)
-            return None, -1
-
-    def output(self):
-        '''Return processed HTML as a single string'''
-        return ''.join([str(p) for p in self.pieces])
-
-class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
-    def __init__(self, baseuri, baselang, encoding):
-        sgmllib.SGMLParser.__init__(self)
-        _FeedParserMixin.__init__(self, baseuri, baselang, encoding)
-
-    def decodeEntities(self, element, data):
-        data = data.replace('&#60;', '&lt;')
-        data = data.replace('&#x3c;', '&lt;')
-        data = data.replace('&#62;', '&gt;')
-        data = data.replace('&#x3e;', '&gt;')
-        data = data.replace('&#38;', '&amp;')
-        data = data.replace('&#x26;', '&amp;')
-        data = data.replace('&#34;', '&quot;')
-        data = data.replace('&#x22;', '&quot;')
-        data = data.replace('&#39;', '&apos;')
-        data = data.replace('&#x27;', '&apos;')
-        if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
-            data = data.replace('&lt;', '<')
-            data = data.replace('&gt;', '>')
-            data = data.replace('&amp;', '&')
-            data = data.replace('&quot;', '"')
-            data = data.replace('&apos;', "'")
-        return data
-        
-class _RelativeURIResolver(_BaseHTMLProcessor):
-    relative_uris = [('a', 'href'),
-                     ('applet', 'codebase'),
-                     ('area', 'href'),
-                     ('blockquote', 'cite'),
-                     ('body', 'background'),
-                     ('del', 'cite'),
-                     ('form', 'action'),
-                     ('frame', 'longdesc'),
-                     ('frame', 'src'),
-                     ('iframe', 'longdesc'),
-                     ('iframe', 'src'),
-                     ('head', 'profile'),
-                     ('img', 'longdesc'),
-                     ('img', 'src'),
-                     ('img', 'usemap'),
-                     ('input', 'src'),
-                     ('input', 'usemap'),
-                     ('ins', 'cite'),
-                     ('link', 'href'),
-                     ('object', 'classid'),
-                     ('object', 'codebase'),
-                     ('object', 'data'),
-                     ('object', 'usemap'),
-                     ('q', 'cite'),
-                     ('script', 'src')]
-
-    def __init__(self, baseuri, encoding):
-        _BaseHTMLProcessor.__init__(self, encoding)
-        self.baseuri = baseuri
-
-    def resolveURI(self, uri):
-        return _urljoin(self.baseuri, uri)
-    
-    def unknown_starttag(self, tag, attrs):
-        attrs = self.normalize_attrs(attrs)
-        attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
-        _BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
-        
-def _resolveRelativeURIs(htmlSource, baseURI, encoding):
-    if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
-    p = _RelativeURIResolver(baseURI, encoding)
-    p.feed(htmlSource)
-    return p.output()
-
-class _HTMLSanitizer(_BaseHTMLProcessor):
-    acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
-      'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
-      'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
-      'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
-      'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
-      'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
-      'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
-      'thead', 'tr', 'tt', 'u', 'ul', 'var']
-
-    acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
-      'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
-      'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
-      'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
-      'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
-      'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
-      'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
-      'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
-      'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
-      'usemap', 'valign', 'value', 'vspace', 'width']
-
-    unacceptable_elements_with_end_tag = ['script', 'applet']
-
-    def reset(self):
-        _BaseHTMLProcessor.reset(self)
-        self.unacceptablestack = 0
-        
-    def unknown_starttag(self, tag, attrs):
-        if not tag in self.acceptable_elements:
-            if tag in self.unacceptable_elements_with_end_tag:
-                self.unacceptablestack += 1
-            return
-        attrs = self.normalize_attrs(attrs)
-        attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
-        _BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
-        
-    def unknown_endtag(self, tag):
-        if not tag in self.acceptable_elements:
-            if tag in self.unacceptable_elements_with_end_tag:
-                self.unacceptablestack -= 1
-            return
-        _BaseHTMLProcessor.unknown_endtag(self, tag)
-
-    def handle_pi(self, text):
-        pass
-
-    def handle_decl(self, text):
-        pass
-
-    def handle_data(self, text):
-        if not self.unacceptablestack:
-            _BaseHTMLProcessor.handle_data(self, text)
-
-def _sanitizeHTML(htmlSource, encoding):
-    p = _HTMLSanitizer(encoding)
-    p.feed(htmlSource)
-    data = p.output()
-    if TIDY_MARKUP:
-        # loop through list of preferred Tidy interfaces looking for one that's installed,
-        # then set up a common _tidy function to wrap the interface-specific API.
-        _tidy = None
-        for tidy_interface in PREFERRED_TIDY_INTERFACES:
-            try:
-                if tidy_interface == "uTidy":
-                    from tidy import parseString as _utidy
-                    def _tidy(data, **kwargs):
-                        return str(_utidy(data, **kwargs))
-                    break
-                elif tidy_interface == "mxTidy":
-                    from mx.Tidy import Tidy as _mxtidy
-                    def _tidy(data, **kwargs):
-                        nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
-                        return data
-                    break
-            except:
-                pass
-        if _tidy:
-            utf8 = type(data) == type(u'')
-            if utf8:
-                data = data.encode('utf-8')
-            data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
-            if utf8:
-                data = unicode(data, 'utf-8')
-            if data.count('<body'):
-                data = data.split('<body', 1)[1]
-                if data.count('>'):
-                    data = data.split('>', 1)[1]
-            if data.count('</body'):
-                data = data.split('</body', 1)[0]
-    data = data.strip().replace('\r\n', '\n')
-    return data
-
-class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
-    def http_error_default(self, req, fp, code, msg, headers):
-        if ((code / 100) == 3) and (code != 304):
-            return self.http_error_302(req, fp, code, msg, headers)
-        infourl = urllib.addinfourl(fp, headers, req.get_full_url())
-        infourl.status = code
-        return infourl
-
-    def http_error_302(self, req, fp, code, msg, headers):
-        if headers.dict.has_key('location'):
-            infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
-        else:
-            infourl = urllib.addinfourl(fp, headers, req.get_full_url())
-        if not hasattr(infourl, 'status'):
-            infourl.status = code
-        return infourl
-
-    def http_error_301(self, req, fp, code, msg, headers):
-        if headers.dict.has_key('location'):
-            infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
-        else:
-            infourl = urllib.addinfourl(fp, headers, req.get_full_url())
-        if not hasattr(infourl, 'status'):
-            infourl.status = code
-        return infourl
-
-    http_error_300 = http_error_302
-    http_error_303 = http_error_302
-    http_error_307 = http_error_302
-        
-    def http_error_401(self, req, fp, code, msg, headers):
-        # Check if
-        # - server requires digest auth, AND
-        # - we tried (unsuccessfully) with basic auth, AND
-        # - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
-        # If all conditions hold, parse authentication information
-        # out of the Authorization header we sent the first time
-        # (for the username and password) and the WWW-Authenticate
-        # header the server sent back (for the realm) and retry
-        # the request with the appropriate digest auth headers instead.
-        # This evil genius hack has been brought to you by Aaron Swartz.
-        host = urlparse.urlparse(req.get_full_url())[1]
-        try:
-            assert sys.version.split()[0] >= '2.3.3'
-            assert base64 != None
-            user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
-            realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
-            self.add_password(realm, host, user, passw)
-            retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
-            self.reset_retry_count()
-            return retry
-        except:
-            return self.http_error_default(req, fp, code, msg, headers)
-
-def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
-    """URL, filename, or string --> stream
-
-    This function lets you define parsers that take any input source
-    (URL, pathname to local or network file, or actual data as a string)
-    and deal with it in a uniform manner.  Returned object is guaranteed
-    to have all the basic stdio read methods (read, readline, readlines).
-    Just .close() the object when you're done with it.
-
-    If the etag argument is supplied, it will be used as the value of an
-    If-None-Match request header.
-
-    If the modified argument is supplied, it must be a tuple of 9 integers
-    as returned by gmtime() in the standard Python time module. This MUST
-    be in GMT (Greenwich Mean Time). The formatted date/time will be used
-    as the value of an If-Modified-Since request header.
-
-    If the agent argument is supplied, it will be used as the value of a
-    User-Agent request header.
-
-    If the referrer argument is supplied, it will be used as the value of a
-    Referer[sic] request header.
-
-    If handlers is supplied, it is a list of handlers used to build a
-    urllib2 opener.
-    """
-
-    if hasattr(url_file_stream_or_string, 'read'):
-        return url_file_stream_or_string
-
-    if url_file_stream_or_string == '-':
-        return sys.stdin
-
-    if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
-        if not agent:
-            agent = USER_AGENT
-        # test for inline user:password for basic auth
-        auth = None
-        if base64:
-            urltype, rest = urllib.splittype(url_file_stream_or_string)
-            realhost, rest = urllib.splithost(rest)
-            if realhost:
-                user_passwd, realhost = urllib.splituser(realhost)
-                if user_passwd:
-                    url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
-                    auth = base64.encodestring(user_passwd).strip()
-        # try to open with urllib2 (to use optional headers)
-        request = urllib2.Request(url_file_stream_or_string)
-        request.add_header('User-Agent', agent)
-        if etag:
-            request.add_header('If-None-Match', etag)
-        if modified:
-            # format into an RFC 1123-compliant timestamp. We can't use
-            # time.strftime() since the %a and %b directives can be affected
-            # by the current locale, but RFC 2616 states that dates must be
-            # in English.
-            short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
-            months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
-            request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
-        if referrer:
-            request.add_header('Referer', referrer)
-        if gzip and zlib:
-            request.add_header('Accept-encoding', 'gzip, deflate')
-        elif gzip:
-            request.add_header('Accept-encoding', 'gzip')
-        elif zlib:
-            request.add_header('Accept-encoding', 'deflate')
-        else:
-            request.add_header('Accept-encoding', '')
-        if auth:
-            request.add_header('Authorization', 'Basic %s' % auth)
-        if ACCEPT_HEADER:
-            request.add_header('Accept', ACCEPT_HEADER)
-        request.add_header('A-IM', 'feed') # RFC 3229 support
-        opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
-        opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
-        try:
-            return opener.open(request)
-        finally:
-            opener.close() # JohnD
-    
-    # try to open with native open function (if url_file_stream_or_string is a filename)
-    try:
-        return open(url_file_stream_or_string)
-    except:
-        pass
-
-    # treat url_file_stream_or_string as string
-    return _StringIO(str(url_file_stream_or_string))
-
-_date_handlers = []
-def registerDateHandler(func):
-    '''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
-    _date_handlers.insert(0, func)
-    
-# ISO-8601 date parsing routines written by Fazal Majid.
-# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
-# parser is beyond the scope of feedparser and would be a worthwhile addition
-# to the Python library.
-# A single regular expression cannot parse ISO 8601 date formats into groups
-# as the standard is highly irregular (for instance is 030104 2003-01-04 or
-# 0301-04-01), so we use templates instead.
-# Please note the order in templates is significant because we need a
-# greedy match.
-_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO',
-                'YY-?MM-?DD', 'YY-?OOO', 'YYYY', 
-                '-YY-?MM', '-OOO', '-YY',
-                '--MM-?DD', '--MM',
-                '---DD',
-                'CC', '']
-_iso8601_re = [
-    tmpl.replace(
-    'YYYY', r'(?P<year>\d{4})').replace(
-    'YY', r'(?P<year>\d\d)').replace(
-    'MM', r'(?P<month>[01]\d)').replace(
-    'DD', r'(?P<day>[0123]\d)').replace(
-    'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
-    'CC', r'(?P<century>\d\d$)')
-    + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
-    + r'(:(?P<second>\d{2}))?'
-    + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
-    for tmpl in _iso8601_tmpl]
-del tmpl
-_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
-del regex
-def _parse_date_iso8601(dateString):
-    '''Parse a variety of ISO-8601-compatible formats like 20040105'''
-    m = None
-    for _iso8601_match in _iso8601_matches:
-        m = _iso8601_match(dateString)
-        if m: break
-    if not m: return
-    if m.span() == (0, 0): return
-    params = m.groupdict()
-    ordinal = params.get('ordinal', 0)
-    if ordinal:
-        ordinal = int(ordinal)
-    else:
-        ordinal = 0
-    year = params.get('year', '--')
-    if not year or year == '--':
-        year = time.gmtime()[0]
-    elif len(year) == 2:
-        # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
-        year = 100 * int(time.gmtime()[0] / 100) + int(year)
-    else:
-        year = int(year)
-    month = params.get('month', '-')
-    if not month or month == '-':
-        # ordinals are NOT normalized by mktime, we simulate them
-        # by setting month=1, day=ordinal
-        if ordinal:
-            month = 1
-        else:
-            month = time.gmtime()[1]
-    month = int(month)
-    day = params.get('day', 0)
-    if not day:
-        # see above
-        if ordinal:
-            day = ordinal
-        elif params.get('century', 0) or \
-                 params.get('year', 0) or params.get('month', 0):
-            day = 1
-        else:
-            day = time.gmtime()[2]
-    else:
-        day = int(day)
-    # special case of the century - is the first year of the 21st century
-    # 2000 or 2001 ? The debate goes on...
-    if 'century' in params.keys():
-        year = (int(params['century']) - 1) * 100 + 1
-    # in ISO 8601 most fields are optional
-    for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
-        if not params.get(field, None):
-            params[field] = 0
-    hour = int(params.get('hour', 0))
-    minute = int(params.get('minute', 0))
-    second = int(params.get('second', 0))
-    # weekday is normalized by mktime(), we can ignore it
-    weekday = 0
-    # daylight savings is complex, but not needed for feedparser's purposes
-    # as time zones, if specified, include mention of whether it is active
-    # (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and
-    # and most implementations have DST bugs
-    daylight_savings_flag = 0
-    tm = [year, month, day, hour, minute, second, weekday,
-          ordinal, daylight_savings_flag]
-    # ISO 8601 time zone adjustments
-    tz = params.get('tz')
-    if tz and tz != 'Z':
-        if tz[0] == '-':
-            tm[3] += int(params.get('tzhour', 0))
-            tm[4] += int(params.get('tzmin', 0))
-        elif tz[0] == '+':
-            tm[3] -= int(params.get('tzhour', 0))
-            tm[4] -= int(params.get('tzmin', 0))
-        else:
-            return None
-    # Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
-    # which is guaranteed to normalize d/m/y/h/m/s.
-    # Many implementations have bugs, but we'll pretend they don't.
-    return time.localtime(time.mktime(tm))
-registerDateHandler(_parse_date_iso8601)
-    
-# 8-bit date handling routines written by ytrewq1.
-_korean_year  = u'\ub144' # b3e2 in euc-kr
-_korean_month = u'\uc6d4' # bff9 in euc-kr
-_korean_day   = u'\uc77c' # c0cf in euc-kr
-_korean_am    = u'\uc624\uc804' # bfc0 c0fc in euc-kr
-_korean_pm    = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
-
-_korean_onblog_date_re = \
-    re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
-               (_korean_year, _korean_month, _korean_day))
-_korean_nate_date_re = \
-    re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
-               (_korean_am, _korean_pm))
-def _parse_date_onblog(dateString):
-    '''Parse a string according to the OnBlog 8-bit date format'''
-    m = _korean_onblog_date_re.match(dateString)
-    if not m: return
-    w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
-                {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
-                 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
-                 'zonediff': '+09:00'}
-    if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
-    return _parse_date_w3dtf(w3dtfdate)
-registerDateHandler(_parse_date_onblog)
-
-def _parse_date_nate(dateString):
-    '''Parse a string according to the Nate 8-bit date format'''
-    m = _korean_nate_date_re.match(dateString)
-    if not m: return
-    hour = int(m.group(5))
-    ampm = m.group(4)
-    if (ampm == _korean_pm):
-        hour += 12
-    hour = str(hour)
-    if len(hour) == 1:
-        hour = '0' + hour
-    w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
-                {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
-                 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
-                 'zonediff': '+09:00'}
-    if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
-    return _parse_date_w3dtf(w3dtfdate)
-registerDateHandler(_parse_date_nate)
-
-_mssql_date_re = \
-    re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
-def _parse_date_mssql(dateString):
-    '''Parse a string according to the MS SQL date format'''
-    m = _mssql_date_re.match(dateString)
-    if not m: return
-    w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
-                {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
-                 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
-                 'zonediff': '+09:00'}
-    if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
-    return _parse_date_w3dtf(w3dtfdate)
-registerDateHandler(_parse_date_mssql)
-
-# Unicode strings for Greek date strings
-_greek_months = \
-  { \
-   u'\u0399\u03b1\u03bd': u'Jan',       # c9e1ed in iso-8859-7
-   u'\u03a6\u03b5\u03b2': u'Feb',       # d6e5e2 in iso-8859-7
-   u'\u039c\u03ac\u03ce': u'Mar',       # ccdcfe in iso-8859-7
-   u'\u039c\u03b1\u03ce': u'Mar',       # cce1fe in iso-8859-7
-   u'\u0391\u03c0\u03c1': u'Apr',       # c1f0f1 in iso-8859-7
-   u'\u039c\u03ac\u03b9': u'May',       # ccdce9 in iso-8859-7
-   u'\u039c\u03b1\u03ca': u'May',       # cce1fa in iso-8859-7
-   u'\u039c\u03b1\u03b9': u'May',       # cce1e9 in iso-8859-7
-   u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
-   u'\u0399\u03bf\u03bd': u'Jun',       # c9efed in iso-8859-7
-   u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
-   u'\u0399\u03bf\u03bb': u'Jul',       # c9f9eb in iso-8859-7
-   u'\u0391\u03cd\u03b3': u'Aug',       # c1fde3 in iso-8859-7
-   u'\u0391\u03c5\u03b3': u'Aug',       # c1f5e3 in iso-8859-7
-   u'\u03a3\u03b5\u03c0': u'Sep',       # d3e5f0 in iso-8859-7
-   u'\u039f\u03ba\u03c4': u'Oct',       # cfeaf4 in iso-8859-7
-   u'\u039d\u03bf\u03ad': u'Nov',       # cdefdd in iso-8859-7
-   u'\u039d\u03bf\u03b5': u'Nov',       # cdefe5 in iso-8859-7
-   u'\u0394\u03b5\u03ba': u'Dec',       # c4e5ea in iso-8859-7
-  }
-
-_greek_wdays = \
-  { \
-   u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
-   u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
-   u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
-   u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
-   u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
-   u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
-   u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7   
-  }
-
-_greek_date_format_re = \
-    re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
-
-def _parse_date_greek(dateString):
-    '''Parse a string according to a Greek 8-bit date format.'''
-    m = _greek_date_format_re.match(dateString)
-    if not m: return
-    try:
-        wday = _greek_wdays[m.group(1)]
-        month = _greek_months[m.group(3)]
-    except:
-        return
-    rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
-                 {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
-                  'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
-                  'zonediff': m.group(8)}
-    if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
-    return _parse_date_rfc822(rfc822date)
-registerDateHandler(_parse_date_greek)
-
-# Unicode strings for Hungarian date strings
-_hungarian_months = \
-  { \
-    u'janu\u00e1r':   u'01',  # e1 in iso-8859-2
-    u'febru\u00e1ri': u'02',  # e1 in iso-8859-2
-    u'm\u00e1rcius':  u'03',  # e1 in iso-8859-2
-    u'\u00e1prilis':  u'04',  # e1 in iso-8859-2
-    u'm\u00e1ujus':   u'05',  # e1 in iso-8859-2
-    u'j\u00fanius':   u'06',  # fa in iso-8859-2
-    u'j\u00falius':   u'07',  # fa in iso-8859-2
-    u'augusztus':     u'08',
-    u'szeptember':    u'09',
-    u'okt\u00f3ber':  u'10',  # f3 in iso-8859-2
-    u'november':      u'11',
-    u'december':      u'12',
-  }
-
-_hungarian_date_format_re = \
-  re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
-
-def _parse_date_hungarian(dateString):
-    '''Parse a string according to a Hungarian 8-bit date format.'''
-    m = _hungarian_date_format_re.match(dateString)
-    if not m: return
-    try:
-        month = _hungarian_months[m.group(2)]
-        day = m.group(3)
-        if len(day) == 1:
-            day = '0' + day
-        hour = m.group(4)
-        if len(hour) == 1:
-            hour = '0' + hour
-    except:
-        return
-    w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
-                {'year': m.group(1), 'month': month, 'day': day,\
-                 'hour': hour, 'minute': m.group(5),\
-                 'zonediff': m.group(6)}
-    if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
-    return _parse_date_w3dtf(w3dtfdate)
-registerDateHandler(_parse_date_hungarian)
-
-# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
-# Drake and licensed under the Python license.  Removed all range checking
-# for month, day, hour, minute, and second, since mktime will normalize
-# these later
-def _parse_date_w3dtf(dateString):
-    def __extract_date(m):
-        year = int(m.group('year'))
-        if year < 100:
-            year = 100 * int(time.gmtime()[0] / 100) + int(year)
-        if year < 1000:
-            return 0, 0, 0
-        julian = m.group('julian')
-        if julian:
-            julian = int(julian)
-            month = julian / 30 + 1
-            day = julian % 30 + 1
-            jday = None
-            while jday != julian:
-                t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
-                jday = time.gmtime(t)[-2]
-                diff = abs(jday - julian)
-                if jday > julian:
-                    if diff < day:
-                        day = day - diff
-                    else:
-                        month = month - 1
-                        day = 31
-                elif jday < julian:
-                    if day + diff < 28:
-                       day = day + diff
-                    else:
-                        month = month + 1
-            return year, month, day
-        month = m.group('month')
-        day = 1
-        if month is None:
-            month = 1
-        else:
-            month = int(month)
-            day = m.group('day')
-            if day:
-                day = int(day)
-            else:
-                day = 1
-        return year, month, day
-
-    def __extract_time(m):
-        if not m:
-            return 0, 0, 0
-        hours = m.group('hours')
-        if not hours:
-            return 0, 0, 0
-        hours = int(hours)
-        minutes = int(m.group('minutes'))
-        seconds = m.group('seconds')
-        if seconds:
-            seconds = int(seconds)
-        else:
-            seconds = 0
-        return hours, minutes, seconds
-
-    def __extract_tzd(m):
-        '''Return the Time Zone Designator as an offset in seconds from UTC.'''
-        if not m:
-            return 0
-        tzd = m.group('tzd')
-        if not tzd:
-            return 0
-        if tzd == 'Z':
-            return 0
-        hours = int(m.group('tzdhours'))
-        minutes = m.group('tzdminutes')
-        if minutes:
-            minutes = int(minutes)
-        else:
-            minutes = 0
-        offset = (hours*60 + minutes) * 60
-        if tzd[0] == '+':
-            return -offset
-        return offset
-
-    __date_re = ('(?P<year>\d\d\d\d)'
-                 '(?:(?P<dsep>-|)'
-                 '(?:(?P<julian>\d\d\d)'
-                 '|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
-    __tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
-    __tzd_rx = re.compile(__tzd_re)
-    __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
-                 '(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
-                 + __tzd_re)
-    __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
-    __datetime_rx = re.compile(__datetime_re)
-    m = __datetime_rx.match(dateString)
-    if (m is None) or (m.group() != dateString): return
-    gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
-    if gmt[0] == 0: return
-    return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
-registerDateHandler(_parse_date_w3dtf)
-
-def _parse_date_rfc822(dateString):
-    '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
-    data = dateString.split()
-    if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
-        del data[0]
-    if len(data) == 4:
-        s = data[3]
-        i = s.find('+')
-        if i > 0:
-            data[3:] = [s[:i], s[i+1:]]
-        else:
-            data.append('')
-        dateString = " ".join(data)
-    if len(data) < 5:
-        dateString += ' 00:00:00 GMT'
-    tm = rfc822.parsedate_tz(dateString)
-    if tm:
-        return time.gmtime(rfc822.mktime_tz(tm))
-# rfc822.py defines several time zones, but we define some extra ones.
-# 'ET' is equivalent to 'EST', etc.
-_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
-rfc822._timezones.update(_additional_timezones)
-registerDateHandler(_parse_date_rfc822)    
-
-def _parse_date(dateString):
-    '''Parses a variety of date formats into a 9-tuple in GMT'''
-    for handler in _date_handlers:
-        try:
-            date9tuple = handler(dateString)
-            if not date9tuple: continue
-            if len(date9tuple) != 9:
-                if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
-                raise ValueError
-            map(int, date9tuple)
-            return date9tuple
-        except Exception, e:
-            if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
-            pass
-    return None
-
-def _getCharacterEncoding(http_headers, xml_data):
-    '''Get the character encoding of the XML document
-
-    http_headers is a dictionary
-    xml_data is a raw string (not Unicode)
-    
-    This is so much trickier than it sounds, it's not even funny.
-    According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
-    is application/xml, application/*+xml,
-    application/xml-external-parsed-entity, or application/xml-dtd,
-    the encoding given in the charset parameter of the HTTP Content-Type
-    takes precedence over the encoding given in the XML prefix within the
-    document, and defaults to 'utf-8' if neither are specified.  But, if
-    the HTTP Content-Type is text/xml, text/*+xml, or
-    text/xml-external-parsed-entity, the encoding given in the XML prefix
-    within the document is ALWAYS IGNORED and only the encoding given in
-    the charset parameter of the HTTP Content-Type header should be
-    respected, and it defaults to 'us-ascii' if not specified.
-
-    Furthermore, discussion on the atom-syntax mailing list with the
-    author of RFC 3023 leads me to the conclusion that any document
-    served with a Content-Type of text/* and no charset parameter
-    must be treated as us-ascii.  (We now do this.)  And also that it
-    must always be flagged as non-well-formed.  (We now do this too.)
-    
-    If Content-Type is unspecified (input was local file or non-HTTP source)
-    or unrecognized (server just got it totally wrong), then go by the
-    encoding given in the XML prefix of the document and default to
-    'iso-8859-1' as per the HTTP specification (RFC 2616).
-    
-    Then, assuming we didn't find a character encoding in the HTTP headers
-    (and the HTTP Content-type allowed us to look in the body), we need
-    to sniff the first few bytes of the XML data and try to determine
-    whether the encoding is ASCII-compatible.  Section F of the XML
-    specification shows the way here:
-    http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
-
-    If the sniffed encoding is not ASCII-compatible, we need to make it
-    ASCII compatible so that we can sniff further into the XML declaration
-    to find the encoding attribute, which will tell us the true encoding.
-
-    Of course, none of this guarantees that we will be able to parse the
-    feed in the declared character encoding (assuming it was declared
-    correctly, which many are not).  CJKCodecs and iconv_codec help a lot;
-    you should definitely install them if you can.
-    http://cjkpython.i18n.org/
-    '''
-
-    def _parseHTTPContentType(content_type):
-        '''takes HTTP Content-Type header and returns (content type, charset)
-
-        If no charset is specified, returns (content type, '')
-        If no content type is specified, returns ('', '')
-        Both return parameters are guaranteed to be lowercase strings
-        '''
-        content_type = content_type or ''
-        content_type, params = cgi.parse_header(content_type)
-        return content_type, params.get('charset', '').replace("'", '')
-
-    sniffed_xml_encoding = ''
-    xml_encoding = ''
-    true_encoding = ''
-    http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
-    # Must sniff for non-ASCII-compatible character encodings before
-    # searching for XML declaration.  This heuristic is defined in
-    # section F of the XML specification:
-    # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
-    try:
-        if xml_data[:4] == '\x4c\x6f\xa7\x94':
-            # EBCDIC
-            xml_data = _ebcdic_to_ascii(xml_data)
-        elif xml_data[:4] == '\x00\x3c\x00\x3f':
-            # UTF-16BE
-            sniffed_xml_encoding = 'utf-16be'
-            xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
-        elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
-            # UTF-16BE with BOM
-            sniffed_xml_encoding = 'utf-16be'
-            xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
-        elif xml_data[:4] == '\x3c\x00\x3f\x00':
-            # UTF-16LE
-            sniffed_xml_encoding = 'utf-16le'
-            xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
-        elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
-            # UTF-16LE with BOM
-            sniffed_xml_encoding = 'utf-16le'
-            xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
-        elif xml_data[:4] == '\x00\x00\x00\x3c':
-            # UTF-32BE
-            sniffed_xml_encoding = 'utf-32be'
-            xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
-        elif xml_data[:4] == '\x3c\x00\x00\x00':
-            # UTF-32LE
-            sniffed_xml_encoding = 'utf-32le'
-            xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
-        elif xml_data[:4] == '\x00\x00\xfe\xff':
-            # UTF-32BE with BOM
-            sniffed_xml_encoding = 'utf-32be'
-            xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
-        elif xml_data[:4] == '\xff\xfe\x00\x00':
-            # UTF-32LE with BOM
-            sniffed_xml_encoding = 'utf-32le'
-            xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
-        elif xml_data[:3] == '\xef\xbb\xbf':
-            # UTF-8 with BOM
-            sniffed_xml_encoding = 'utf-8'
-            xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
-        else:
-            # ASCII-compatible
-            pass
-        xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
-    except:
-        xml_encoding_match = None
-    if xml_encoding_match:
-        xml_encoding = xml_encoding_match.groups()[0].lower()
-        if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
-            xml_encoding = sniffed_xml_encoding
-    acceptable_content_type = 0
-    application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
-    text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
-    if (http_content_type in application_content_types) or \
-       (http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
-        acceptable_content_type = 1
-        true_encoding = http_encoding or xml_encoding or 'utf-8'
-    elif (http_content_type in text_content_types) or \
-         (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
-        acceptable_content_type = 1
-        true_encoding = http_encoding or 'us-ascii'
-    elif http_content_type.startswith('text/'):
-        true_encoding = http_encoding or 'us-ascii'
-    elif http_headers and (not http_headers.has_key('content-type')):
-        true_encoding = xml_encoding or 'iso-8859-1'
-    else:
-        true_encoding = xml_encoding or 'utf-8'
-    return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
-    
-def _toUTF8(data, encoding):
-    '''Changes an XML data stream on the fly to specify a new encoding
-
-    data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
-    encoding is a string recognized by encodings.aliases
-    '''
-    if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
-    # strip Byte Order Mark (if present)
-    if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
-        if _debug:
-            sys.stderr.write('stripping BOM\n')
-            if encoding != 'utf-16be':
-                sys.stderr.write('trying utf-16be instead\n')
-        encoding = 'utf-16be'
-        data = data[2:]
-    elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
-        if _debug:
-            sys.stderr.write('stripping BOM\n')
-            if encoding != 'utf-16le':
-                sys.stderr.write('trying utf-16le instead\n')
-        encoding = 'utf-16le'
-        data = data[2:]
-    elif data[:3] == '\xef\xbb\xbf':
-        if _debug:
-            sys.stderr.write('stripping BOM\n')
-            if encoding != 'utf-8':
-                sys.stderr.write('trying utf-8 instead\n')
-        encoding = 'utf-8'
-        data = data[3:]
-    elif data[:4] == '\x00\x00\xfe\xff':
-        if _debug:
-            sys.stderr.write('stripping BOM\n')
-            if encoding != 'utf-32be':
-                sys.stderr.write('trying utf-32be instead\n')
-        encoding = 'utf-32be'
-        data = data[4:]
-    elif data[:4] == '\xff\xfe\x00\x00':
-        if _debug:
-            sys.stderr.write('stripping BOM\n')
-            if encoding != 'utf-32le':
-                sys.stderr.write('trying utf-32le instead\n')
-        encoding = 'utf-32le'
-        data = data[4:]
-    newdata = unicode(data, encoding)
-    if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
-    declmatch = re.compile('^<\?xml[^>]*?>')
-    newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
-    if declmatch.search(newdata):
-        newdata = declmatch.sub(newdecl, newdata)
-    else:
-        newdata = newdecl + u'\n' + newdata
-    return newdata.encode('utf-8')
-
-def _stripDoctype(data):
-    '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
-
-    rss_version may be 'rss091n' or None
-    stripped_data is the same XML document, minus the DOCTYPE
-    '''
-    entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
-    data = entity_pattern.sub('', data)
-    doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
-    doctype_results = doctype_pattern.findall(data)
-    doctype = doctype_results and doctype_results[0] or ''
-    if doctype.lower().count('netscape'):
-        version = 'rss091n'
-    else:
-        version = None
-    data = doctype_pattern.sub('', data)
-    return version, data
-    
-def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
-    '''Parse a feed from a URL, file, stream, or string'''
-    result = FeedParserDict()
-    result['feed'] = FeedParserDict()
-    result['entries'] = []
-    if _XML_AVAILABLE:
-        result['bozo'] = 0
-    if type(handlers) == types.InstanceType:
-        handlers = [handlers]
-    try:
-        f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
-        data = f.read()
-    except Exception, e:
-        result['bozo'] = 1
-        result['bozo_exception'] = e
-        data = ''
-        f = None
-
-    # if feed is gzip-compressed, decompress it
-    if f and data and hasattr(f, 'headers'):
-        if gzip and f.headers.get('content-encoding', '') == 'gzip':
-            try:
-                data = gzip.GzipFile(fileobj=_StringIO(data)).read()
-            except Exception, e:
-                # Some feeds claim to be gzipped but they're not, so
-                # we get garbage.  Ideally, we should re-request the
-                # feed without the 'Accept-encoding: gzip' header,
-                # but we don't.
-                result['bozo'] = 1
-                result['bozo_exception'] = e
-                data = ''
-        elif zlib and f.headers.get('content-encoding', '') == 'deflate':
-            try:
-                data = zlib.decompress(data, -zlib.MAX_WBITS)
-            except Exception, e:
-                result['bozo'] = 1
-                result['bozo_exception'] = e
-                data = ''
-
-    # save HTTP headers
-    if hasattr(f, 'info'):
-        info = f.info()
-        result['etag'] = info.getheader('ETag')
-        last_modified = info.getheader('Last-Modified')
-        if last_modified:
-            result['modified'] = _parse_date(last_modified)
-    if hasattr(f, 'url'):
-        result['href'] = f.url
-        result['status'] = 200
-    if hasattr(f, 'status'):
-        result['status'] = f.status
-    if hasattr(f, 'headers'):
-        result['headers'] = f.headers.dict
-    if hasattr(f, 'close'):
-        f.close()
-
-    # there are four encodings to keep track of:
-    # - http_encoding is the encoding declared in the Content-Type HTTP header
-    # - xml_encoding is the encoding declared in the <?xml declaration
-    # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
-    # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
-    http_headers = result.get('headers', {})
-    result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
-        _getCharacterEncoding(http_headers, data)
-    if http_headers and (not acceptable_content_type):
-        if http_headers.has_key('content-type'):
-            bozo_message = '%s is not an XML media type' % http_headers['content-type']
-        else:
-            bozo_message = 'no Content-type specified'
-        result['bozo'] = 1
-        result['bozo_exception'] = NonXMLContentType(bozo_message)
-        
-    result['version'], data = _stripDoctype(data)
-
-    baseuri = http_headers.get('content-location', result.get('href'))
-    baselang = http_headers.get('content-language', None)
-
-    # if server sent 304, we're done
-    if result.get('status', 0) == 304:
-        result['version'] = ''
-        result['debug_message'] = 'The feed has not changed since you last checked, ' + \
-            'so the server sent no data.  This is a feature, not a bug!'
-        return result
-
-    # if there was a problem downloading, we're done
-    if not data:
-        return result
-
-    # determine character encoding
-    use_strict_parser = 0
-    known_encoding = 0
-    tried_encodings = []
-    # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
-    for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
-        if not proposed_encoding: continue
-        if proposed_encoding in tried_encodings: continue
-        tried_encodings.append(proposed_encoding)
-        try:
-            data = _toUTF8(data, proposed_encoding)
-            known_encoding = use_strict_parser = 1
-            break
-        except:
-            pass
-    # if no luck and we have auto-detection library, try that
-    if (not known_encoding) and chardet:
-        try:
-            proposed_encoding = chardet.detect(data)['encoding']
-            if proposed_encoding and (proposed_encoding not in tried_encodings):
-                tried_encodings.append(proposed_encoding)
-                data = _toUTF8(data, proposed_encoding)
-                known_encoding = use_strict_parser = 1
-        except:
-            pass
-    # if still no luck and we haven't tried utf-8 yet, try that
-    if (not known_encoding) and ('utf-8' not in tried_encodings):
-        try:
-            proposed_encoding = 'utf-8'
-            tried_encodings.append(proposed_encoding)
-            data = _toUTF8(data, proposed_encoding)
-            known_encoding = use_strict_parser = 1
-        except:
-            pass
-    # if still no luck and we haven't tried windows-1252 yet, try that
-    if (not known_encoding) and ('windows-1252' not in tried_encodings):
-        try:
-            proposed_encoding = 'windows-1252'
-            tried_encodings.append(proposed_encoding)
-            data = _toUTF8(data, proposed_encoding)
-            known_encoding = use_strict_parser = 1
-        except:
-            pass
-    # if still no luck, give up
-    if not known_encoding:
-        result['bozo'] = 1
-        result['bozo_exception'] = CharacterEncodingUnknown( \
-            'document encoding unknown, I tried ' + \
-            '%s, %s, utf-8, and windows-1252 but nothing worked' % \
-            (result['encoding'], xml_encoding))
-        result['encoding'] = ''
-    elif proposed_encoding != result['encoding']:
-        result['bozo'] = 1
-        result['bozo_exception'] = CharacterEncodingOverride( \
-            'documented declared as %s, but parsed as %s' % \
-            (result['encoding'], proposed_encoding))
-        result['encoding'] = proposed_encoding
-
-    if not _XML_AVAILABLE:
-        use_strict_parser = 0
-    if use_strict_parser:
-        # initialize the SAX parser
-        feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
-        saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
-        saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
-        saxparser.setContentHandler(feedparser)
-        saxparser.setErrorHandler(feedparser)
-        source = xml.sax.xmlreader.InputSource()
-        source.setByteStream(_StringIO(data))
-        if hasattr(saxparser, '_ns_stack'):
-            # work around bug in built-in SAX parser (doesn't recognize xml: namespace)
-            # PyXML doesn't have this problem, and it doesn't have _ns_stack either
-            saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
-        try:
-            saxparser.parse(source)
-        except Exception, e:
-            if _debug:
-                import traceback
-                traceback.print_stack()
-                traceback.print_exc()
-                sys.stderr.write('xml parsing failed\n')
-            result['bozo'] = 1
-            result['bozo_exception'] = feedparser.exc or e
-            use_strict_parser = 0
-    if not use_strict_parser:
-        feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '')
-        feedparser.feed(data)
-    result['feed'] = feedparser.feeddata
-    result['entries'] = feedparser.entries
-    result['version'] = result['version'] or feedparser.version
-    result['namespaces'] = feedparser.namespacesInUse
-    return result
-
-if __name__ == '__main__':
-    if not sys.argv[1:]:
-        print __doc__
-        sys.exit(0)
-    else:
-        urls = sys.argv[1:]
-    zopeCompatibilityHack()
-    from pprint import pprint
-    for url in urls:
-        print url
-        print
-        result = parse(url)
-        pprint(result)
-        print
-
-#REVISION HISTORY
-#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
-#  added Simon Fell's test suite
-#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
-#2.0 - 10/19/2002
-#  JD - use inchannel to watch out for image and textinput elements which can
-#  also contain title, link, and description elements
-#  JD - check for isPermaLink='false' attribute on guid elements
-#  JD - replaced openAnything with open_resource supporting ETag and
-#  If-Modified-Since request headers
-#  JD - parse now accepts etag, modified, agent, and referrer optional
-#  arguments
-#  JD - modified parse to return a dictionary instead of a tuple so that any
-#  etag or modified information can be returned and cached by the caller
-#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
-#  because of etag/modified, return the old etag/modified to the caller to
-#  indicate why nothing is being returned
-#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
-#  useless.  Fixes the problem JD was addressing by adding it.
-#2.1 - 11/14/2002 - MAP - added gzip support
-#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
-#  start_admingeneratoragent is an example of how to handle elements with
-#  only attributes, no content.
-#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
-#  also, make sure we send the User-Agent even if urllib2 isn't available.
-#  Match any variation of backend.userland.com/rss namespace.
-#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
-#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
-#  snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
-#  project name
-#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
-#  removed unnecessary urllib code -- urllib2 should always be available anyway;
-#  return actual url, status, and full HTTP headers (as result['url'],
-#  result['status'], and result['headers']) if parsing a remote feed over HTTP --
-#  this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
-#  added the latest namespace-of-the-week for RSS 2.0
-#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
-#  User-Agent (otherwise urllib2 sends two, which confuses some servers)
-#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
-#  inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
-#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
-#  textInput, and also to return the character encoding (if specified)
-#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
-#  nested divs within content (JohnD); fixed missing sys import (JohanS);
-#  fixed regular expression to capture XML character encoding (Andrei);
-#  added support for Atom 0.3-style links; fixed bug with textInput tracking;
-#  added support for cloud (MartijnP); added support for multiple
-#  category/dc:subject (MartijnP); normalize content model: 'description' gets
-#  description (which can come from description, summary, or full content if no
-#  description), 'content' gets dict of base/language/type/value (which can come
-#  from content:encoded, xhtml:body, content, or fullitem);
-#  fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
-#  tracking; fixed bug tracking unknown tags; fixed bug tracking content when
-#  <content> element is not in default namespace (like Pocketsoap feed);
-#  resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
-#  wfw:commentRSS; resolve relative URLs within embedded HTML markup in
-#  description, xhtml:body, content, content:encoded, title, subtitle,
-#  summary, info, tagline, and copyright; added support for pingback and
-#  trackback namespaces
-#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
-#  namespaces, as opposed to 2.6 when I said I did but didn't really;
-#  sanitize HTML markup within some elements; added mxTidy support (if
-#  installed) to tidy HTML markup within some elements; fixed indentation
-#  bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
-#  (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
-#  'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
-#  'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
-#  and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
-#2.7.1 - 1/9/2004 - MAP - fixed bug handling &quot; and &apos;.  fixed memory
-#  leak not closing url opener (JohnD); added dc:publisher support (MarekK);
-#  added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
-#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
-#  encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
-#  fixed relative URI processing for guid (skadz); added ICBM support; added
-#  base64 support
-#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
-#  blogspot.com sites); added _debug variable
-#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
-#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
-#  added several new supported namespaces; fixed bug tracking naked markup in
-#  description; added support for enclosure; added support for source; re-added
-#  support for cloud which got dropped somehow; added support for expirationDate
-#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
-#  xml:base URI, one for documents that don't define one explicitly and one for
-#  documents that define an outer and an inner xml:base that goes out of scope
-#  before the end of the document
-#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
-#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
-#  will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
-#  added support for creativeCommons:license and cc:license; added support for
-#  full Atom content model in title, tagline, info, copyright, summary; fixed bug
-#  with gzip encoding (not always telling server we support it when we do)
-#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
-#  (dictionary of 'name', 'url', 'email'); map author to author_detail if author
-#  contains name + email address
-#3.0b8 - 1/28/2004 - MAP - added support for contributor
-#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
-#  support for summary
-#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
-#  xml.util.iso8601
-#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
-#  dangerous markup; fiddled with decodeEntities (not right); liberalized
-#  date parsing even further
-#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
-#  added support to Atom 0.2 subtitle; added support for Atom content model
-#  in copyright; better sanitizing of dangerous HTML elements with end tags
-#  (script, frameset)
-#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
-#  etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
-#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
-#  Python 2.1
-#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
-#  fixed bug capturing author and contributor URL; fixed bug resolving relative
-#  links in author and contributor URL; fixed bug resolvin relative links in
-#  generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
-#  namespace tests, and included them permanently in the test suite with his
-#  permission; fixed namespace handling under Python 2.1
-#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
-#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
-#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
-#  use libxml2 (if available)
-#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
-#  name was in parentheses; removed ultra-problematic mxTidy support; patch to
-#  workaround crash in PyXML/expat when encountering invalid entities
-#  (MarkMoraes); support for textinput/textInput
-#3.0b20 - 4/7/2004 - MAP - added CDF support
-#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
-#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
-#  results dict; changed results dict to allow getting values with results.key
-#  as well as results[key]; work around embedded illformed HTML with half
-#  a DOCTYPE; work around malformed Content-Type header; if character encoding
-#  is wrong, try several common ones before falling back to regexes (if this
-#  works, bozo_exception is set to CharacterEncodingOverride); fixed character
-#  encoding issues in BaseHTMLProcessor by tracking encoding and converting
-#  from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
-#  convert each value in results to Unicode (if possible), even if using
-#  regex-based parsing
-#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
-#  high-bit characters in attributes in embedded HTML in description (thanks
-#  Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
-#  FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
-#  about a mapped key
-#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
-#  results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
-#  cause the same encoding to be tried twice (even if it failed the first time);
-#  fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
-#  better textinput and image tracking in illformed RSS 1.0 feeds
-#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
-#  my blink tag tests
-#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
-#  failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
-#  duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
-#  added support for image; refactored parse() fallback logic to try other
-#  encodings if SAX parsing fails (previously it would only try other encodings
-#  if re-encoding failed); remove unichr madness in normalize_attrs now that
-#  we're properly tracking encoding in and out of BaseHTMLProcessor; set
-#  feed.language from root-level xml:lang; set entry.id from rdf:about;
-#  send Accept header
-#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
-#  iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
-#  windows-1252); fixed regression that could cause the same encoding to be
-#  tried twice (even if it failed the first time)
-#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
-#  recover from malformed content-type header parameter with no equals sign
-#  ('text/xml; charset:iso-8859-1')
-#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
-#  to Unicode equivalents in illformed feeds (aaronsw); added and
-#  passed tests for converting character entities to Unicode equivalents
-#  in illformed feeds (aaronsw); test for valid parsers when setting
-#  XML_AVAILABLE; make version and encoding available when server returns
-#  a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
-#  digest auth or proxy support); add code to parse username/password
-#  out of url and send as basic authentication; expose downloading-related
-#  exceptions in bozo_exception (aaronsw); added __contains__ method to
-#  FeedParserDict (aaronsw); added publisher_detail (aaronsw)
-#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
-#  convert feed to UTF-8 before passing to XML parser; completely revamped
-#  logic for determining character encoding and attempting XML parsing
-#  (much faster); increased default timeout to 20 seconds; test for presence
-#  of Location header on redirects; added tests for many alternate character
-#  encodings; support various EBCDIC encodings; support UTF-16BE and
-#  UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
-#  UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
-#  XML parsers are available; added support for 'Content-encoding: deflate';
-#  send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
-#  are available
-#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
-#  problem tracking xml:base and xml:lang if element declares it, child
-#  doesn't, first grandchild redeclares it, and second grandchild doesn't;
-#  refactored date parsing; defined public registerDateHandler so callers
-#  can add support for additional date formats at runtime; added support
-#  for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
-#  zopeCompatibilityHack() which turns FeedParserDict into a regular
-#  dictionary, required for Zope compatibility, and also makes command-
-#  line debugging easier because pprint module formats real dictionaries
-#  better than dictionary-like objects; added NonXMLContentType exception,
-#  which is stored in bozo_exception when a feed is served with a non-XML
-#  media type such as 'text/plain'; respect Content-Language as default
-#  language if not xml:lang is present; cloud dict is now FeedParserDict;
-#  generator dict is now FeedParserDict; better tracking of xml:lang,
-#  including support for xml:lang='' to unset the current language;
-#  recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
-#  namespace; don't overwrite final status on redirects (scenarios:
-#  redirecting to a URL that returns 304, redirecting to a URL that
-#  redirects to another URL with a different type of redirect); add
-#  support for HTTP 303 redirects
-#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
-#  encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
-#  support for Atom 1.0; support for iTunes extensions; new 'tags' for
-#  categories/keywords/etc. as array of dict
-#  {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
-#  terminology; parse RFC 822-style dates with no time; lots of other
-#  bug fixes
-#4.1 - MAP - removed socket timeout; added support for chardet library
index f2e3917..92428a6 100644 (file)
@@ -1,9 +1,21 @@
 --find-links=http://stigma.nowoczesnapolska.org.pl/pypi/
 --find-links=http://www.pythonware.com/products/pil/
 
 --find-links=http://stigma.nowoczesnapolska.org.pl/pypi/
 --find-links=http://www.pythonware.com/products/pil/
 
-Django==1.1.1
-librarian==1.2.6
-lxml==2.2.2
-Imaging==1.1.6
-mutagen==1.17
-MySQL-python>=1.2,<2.0
\ No newline at end of file
+# django
+Django>=1.1.1,<1.2
+South>=0.4 # migrations for django
+django-pagination>=1.0
+
+# Feedparser 
+Feedparser>=4.1
+
+# PIL 
+Imaging>=1.1.6
+mutagen>=1.17
+sorl-thumbnail>=3.2
+
+# homebrew & depencies
+librarian>=1.2.6
+lxml>=2.2.2
+
+# MySQL-python>=1.2,<2.0
index f8991c9..089922b 100644 (file)
             <p><img src="/static/img/indicator.gif" alt="*"/> Ładowanie</p>
         </div>
     </div>
             <p><img src="/static/img/indicator.gif" alt="*"/> Ładowanie</p>
         </div>
     </div>
-{% endblock %}
+{% endblock %}
\ No newline at end of file