Bug 1181211 - Figure out how to update Balrog release blobs with multiple partials. r=bhearsum
authorRail Aliiev <rail@mozilla.com>
Thu, 16 Jul 2015 13:24:14 -0400
changeset 5917 da436987c292a70b5b031c6c96fc397e5d5828d6
parent 5916 04945a026458206ee776c24d958d608591a929b4
child 5918 b37c36d93058152380dc9095188d482d3ecf18d8
push id4410
push userraliiev@mozilla.com
push dateThu, 16 Jul 2015 17:24:22 +0000
reviewersbhearsum
bugs1181211
Bug 1181211 - Figure out how to update Balrog release blobs with multiple partials. r=bhearsum
.travis.yml
lib/python/balrog/submitter/api.py
lib/python/balrog/submitter/cli.py
lib/python/balrog/submitter/updates.py
lib/python/mozilla_buildtools/test/test_balrog_submitter_cli.py
lib/python/vendor/functools32-3.2.3-2/ChangeLog
lib/python/vendor/functools32-3.2.3-2/LICENSE
lib/python/vendor/functools32-3.2.3-2/MANIFEST.in
lib/python/vendor/functools32-3.2.3-2/PKG-INFO
lib/python/vendor/functools32-3.2.3-2/functools32/__init__.py
lib/python/vendor/functools32-3.2.3-2/functools32/_dummy_thread32.py
lib/python/vendor/functools32-3.2.3-2/functools32/functools32.py
lib/python/vendor/functools32-3.2.3-2/functools32/reprlib32.py
lib/python/vendor/functools32-3.2.3-2/setup.cfg
lib/python/vendor/functools32-3.2.3-2/setup.py
lib/python/vendor/functools32-3.2.3-2/test_functools32.py
lib/python/vendor/functools32-3.2.3-2/test_support27.py
lib/python/vendor/jsonmerge-1.1.0/ChangeLog
lib/python/vendor/jsonmerge-1.1.0/MANIFEST.in
lib/python/vendor/jsonmerge-1.1.0/PKG-INFO
lib/python/vendor/jsonmerge-1.1.0/README.rst
lib/python/vendor/jsonmerge-1.1.0/jsonmerge.egg-info/PKG-INFO
lib/python/vendor/jsonmerge-1.1.0/jsonmerge.egg-info/SOURCES.txt
lib/python/vendor/jsonmerge-1.1.0/jsonmerge.egg-info/dependency_links.txt
lib/python/vendor/jsonmerge-1.1.0/jsonmerge.egg-info/requires.txt
lib/python/vendor/jsonmerge-1.1.0/jsonmerge.egg-info/top_level.txt
lib/python/vendor/jsonmerge-1.1.0/jsonmerge/__init__.py
lib/python/vendor/jsonmerge-1.1.0/jsonmerge/exceptions.py
lib/python/vendor/jsonmerge-1.1.0/jsonmerge/strategies.py
lib/python/vendor/jsonmerge-1.1.0/setup.cfg
lib/python/vendor/jsonmerge-1.1.0/setup.py
lib/python/vendor/jsonmerge-1.1.0/tests/__init__.py
lib/python/vendor/jsonmerge-1.1.0/tests/test_jsonmerge.py
lib/python/vendor/jsonmerge-1.1.0/tests/test_readme.py
lib/python/vendor/jsonschema-2.5.1/CHANGELOG.rst
lib/python/vendor/jsonschema-2.5.1/COPYING
lib/python/vendor/jsonschema-2.5.1/MANIFEST.in
lib/python/vendor/jsonschema-2.5.1/PKG-INFO
lib/python/vendor/jsonschema-2.5.1/README.rst
lib/python/vendor/jsonschema-2.5.1/json/.gitignore
lib/python/vendor/jsonschema-2.5.1/json/.travis.yml
lib/python/vendor/jsonschema-2.5.1/json/LICENSE
lib/python/vendor/jsonschema-2.5.1/json/README.md
lib/python/vendor/jsonschema-2.5.1/json/bin/jsonschema_suite
lib/python/vendor/jsonschema-2.5.1/json/remotes/folder/folderInteger.json
lib/python/vendor/jsonschema-2.5.1/json/remotes/integer.json
lib/python/vendor/jsonschema-2.5.1/json/remotes/subSchemas.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/additionalItems.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/additionalProperties.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/default.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/dependencies.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/disallow.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/divisibleBy.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/enum.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/extends.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/items.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/maxItems.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/maxLength.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/maximum.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/minItems.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/minLength.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/minimum.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/optional/bignum.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/optional/format.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/optional/jsregex.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/optional/zeroTerminatedFloats.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/pattern.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/patternProperties.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/properties.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/ref.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/refRemote.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/required.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/type.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/uniqueItems.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/additionalItems.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/additionalProperties.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/allOf.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/anyOf.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/default.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/definitions.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/dependencies.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/enum.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/items.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/maxItems.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/maxLength.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/maxProperties.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/maximum.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/minItems.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/minLength.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/minProperties.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/minimum.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/multipleOf.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/not.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/oneOf.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/optional/bignum.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/optional/format.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/optional/zeroTerminatedFloats.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/pattern.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/patternProperties.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/properties.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/ref.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/refRemote.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/required.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/type.json
lib/python/vendor/jsonschema-2.5.1/json/tests/draft4/uniqueItems.json
lib/python/vendor/jsonschema-2.5.1/jsonschema.egg-info/PKG-INFO
lib/python/vendor/jsonschema-2.5.1/jsonschema.egg-info/SOURCES.txt
lib/python/vendor/jsonschema-2.5.1/jsonschema.egg-info/dependency_links.txt
lib/python/vendor/jsonschema-2.5.1/jsonschema.egg-info/entry_points.txt
lib/python/vendor/jsonschema-2.5.1/jsonschema.egg-info/pbr.json
lib/python/vendor/jsonschema-2.5.1/jsonschema.egg-info/requires.txt
lib/python/vendor/jsonschema-2.5.1/jsonschema.egg-info/top_level.txt
lib/python/vendor/jsonschema-2.5.1/jsonschema/__init__.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/__main__.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/_format.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/_reflect.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/_utils.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/_validators.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/_version.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/cli.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/compat.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/exceptions.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/schemas/draft3.json
lib/python/vendor/jsonschema-2.5.1/jsonschema/schemas/draft4.json
lib/python/vendor/jsonschema-2.5.1/jsonschema/tests/__init__.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/tests/compat.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/tests/test_cli.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/tests/test_exceptions.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/tests/test_format.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/tests/test_jsonschema_test_suite.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/tests/test_validators.py
lib/python/vendor/jsonschema-2.5.1/jsonschema/validators.py
lib/python/vendor/jsonschema-2.5.1/setup.cfg
lib/python/vendor/jsonschema-2.5.1/setup.py
lib/python/vendor/jsonschema-2.5.1/tox.ini
lib/python/vendor/jsonschema-2.5.1/version.txt
lib/python/vendor/repoze.lru-ef418de/.gitignore
lib/python/vendor/repoze.lru-ef418de/CHANGES.txt
lib/python/vendor/repoze.lru-ef418de/CONTRIBUTORS.txt
lib/python/vendor/repoze.lru-ef418de/COPYRIGHT.txt
lib/python/vendor/repoze.lru-ef418de/LICENSE.txt
lib/python/vendor/repoze.lru-ef418de/README.txt
lib/python/vendor/repoze.lru-ef418de/docs/Makefile
lib/python/vendor/repoze.lru-ef418de/docs/_static/placeholder.txt
lib/python/vendor/repoze.lru-ef418de/docs/_templates/placeholder.txt
lib/python/vendor/repoze.lru-ef418de/docs/api.rst
lib/python/vendor/repoze.lru-ef418de/docs/conf.py
lib/python/vendor/repoze.lru-ef418de/docs/index.rst
lib/python/vendor/repoze.lru-ef418de/docs/make.bat
lib/python/vendor/repoze.lru-ef418de/docs/narr.rst
lib/python/vendor/repoze.lru-ef418de/repoze/__init__.py
lib/python/vendor/repoze.lru-ef418de/repoze/lru/__init__.py
lib/python/vendor/repoze.lru-ef418de/repoze/lru/tests.py
lib/python/vendor/repoze.lru-ef418de/setup.cfg
lib/python/vendor/repoze.lru-ef418de/setup.py
lib/python/vendor/repoze.lru-ef418de/tox.ini
lib/python/vendorlibs.pth
scripts/updates/balrog-nightly-locker.py
tox.ini
--- a/.travis.yml
+++ b/.travis.yml
@@ -28,23 +28,8 @@ install:
 
 script:
   - tox -e $TOX_ENV
   # Remove logs to avoid cache uploads
   - rm -rf $HOME/.cache/pip/log
 
 after_script:
   - tox -e py27-coveralls
-
-# currently cannot customise per user fork, see:
-# https://github.com/travis-ci/travis-ci/issues/1094
-# please comment out this section in your personal fork!
-notifications:
-  irc:
-    channels:
-      - "irc.mozilla.org#releng"
-    on_success: always
-    on_failure: always
-    template:
-      - "\x02%{repository}\x0314#%{build_number}\x03\x02 (%{branch} - %{commit} : %{author}): \x02\x0312%{message}\x02\x03"
-      - "\x02Change view\x02 : \x0314%{compare_url}\x03"
-      - "\x02Build details\x02 : \x0314%{build_url}\x03"
-      - "\x02Commit message\x02 : \x0314%{commit_message}\x03"
--- a/lib/python/balrog/submitter/api.py
+++ b/lib/python/balrog/submitter/api.py
@@ -1,9 +1,8 @@
-# TODO: extend API to handle release blobs
 import json
 import logging
 import requests
 import os
 
 CA_BUNDLE = os.path.join(os.path.dirname(__file__),
                          '../../../../misc/certs/ca-bundle.crt')
 
@@ -17,37 +16,38 @@ def is_csrf_token_expired(token):
 
 
 class API(object):
     """A class that knows how to make requests to a Balrog server, including
        pre-retrieving CSRF tokens and data versions.
 
        url_template: The URL to submit to when request() is called. Standard
                      Python string interpolation can be used here in
-                     combination with the url_template_vars argument to
-                     request().
+                     combination with url_template_vars.
        prerequest_url_template: Before submitting the real request, a HEAD
                                 operation will be done on this URL. If the
                                 HEAD request succeeds, it is expected that
                                 there will be X-CSRF-Token and X-Data-Version
                                 headers in the response. If the HEAD request
                                 results in a 404, another HEAD request to
                                 /csrf_token will be made in attempt to get a
                                 CSRF Token. This URL can use string
                                 interpolation the same way url_template can.
                                 In some cases this may be the same as the
                                 url_template.
     """
     verify = False
     auth = None
     url_template = None
     prerequest_url_template = None
+    url_template_vars = None
 
     def __init__(self, api_root='https://aus4-admin-dev.allizom.org/api',
-                 auth=None, ca_certs=CA_BUNDLE, timeout=60, raise_exceptions=True):
+                 auth=None, ca_certs=CA_BUNDLE, timeout=60,
+                 raise_exceptions=True):
         """ Creates an API object which wraps REST API of Balrog server.
 
         api_root: API root URL of balrog server
         auth    : a tuple of (username, password) or None
         ca_certs: CA bundle. It follows python-requests `verify' usage.
                   If set to False, no SSL verification is done.
                   If set to True, it tries to load a CA bundle from certifi
                   module.
@@ -61,27 +61,27 @@ class API(object):
         assert isinstance(auth, tuple) or auth == None, \
             "auth should be set to tuple or None"
         self.auth = auth
         self.timeout = timeout
         self.raise_exceptions = raise_exceptions
         self.session = requests.session()
         self.csrf_token = None
 
-    def request(self, data=None, method='GET', url_template_vars={}):
-        url = self.api_root + self.url_template % url_template_vars
+    def request(self, data=None, method='GET'):
+        url = self.api_root + self.url_template % self.url_template_vars
         prerequest_url = self.api_root + \
-            self.prerequest_url_template % url_template_vars
+            self.prerequest_url_template % self.url_template_vars
         # If we'll be modifying things, do a GET first to get a CSRF token
         # and possibly a data_version.
         if method != 'GET' and method != 'HEAD':
             # Use the URL of the resource we're going to modify first,
             # because we'll need a CSRF token, and maybe its data version.
             try:
-                res = self.do_request(prerequest_url, None, 'HEAD', {})
+                res = self.do_request(prerequest_url, None, 'HEAD')
                 # If a data_version was specified we shouldn't overwrite it
                 # because the caller may be acting on a modified version of
                 # a specific older version of the data.
                 if 'data_version' not in data:
                     data['data_version'] = res.headers['X-Data-Version']
                 # We may already have a non-expired CSRF token, but it's
                 # faster/easier just to set it again even if we do, since
                 # we've already made the request.
@@ -90,23 +90,23 @@ class API(object):
             except requests.HTTPError, e:
                 # However, if the resource doesn't exist yet we may as well
                 # not bother doing another request solely for a token unless
                 # we don't have a valid one already.
                 if e.response.status_code != 404:
                     raise
                 if not self.csrf_token or is_csrf_token_expired(self.csrf_token):
                     res = self.do_request(
-                        self.api_root + '/csrf_token', None, 'HEAD', {})
+                        self.api_root + '/csrf_token', None, 'HEAD')
                     data['csrf_token'] = self.csrf_token = res.headers[
                         'X-CSRF-Token']
 
-        return self.do_request(url, data, method, url_template_vars)
+        return self.do_request(url, data, method)
 
-    def do_request(self, url, data, method, url_template_vars):
+    def do_request(self, url, data, method):
         logging.debug('Balrog request to %s' % url)
         if data is not None and 'csrf_token' in data:
             sanitised_data = data.copy()
             del sanitised_data['csrf_token']
             logging.debug('Data sent: %s' % sanitised_data)
         else:
             logging.debug('Data sent: %s' % data)
         headers = {'Accept-Encoding': 'application/json',
@@ -117,62 +117,103 @@ class API(object):
                 verify=self.verify, auth=self.auth, headers=headers)
             if self.raise_exceptions:
                 req.raise_for_status()
             return req
         except requests.HTTPError, e:
             logging.error('Caught HTTPError: %s' % e.response.content)
             raise
 
+    def get_data(self):
+        resp = self.request()
+        return (json.loads(resp.content), resp.headers['X-Data-Version'])
+
 
 class Release(API):
     url_template = '/releases/%(name)s'
     prerequest_url_template = '/releases/%(name)s'
 
-    def update_release(self, name, version, product, hashFunction, releaseData,
+    def __init__(self, name, **kwargs):
+        super(Release, self).__init__(**kwargs)
+        self.name = name
+        self.url_template_vars = dict(name=name)
+
+
+    def update_release(self, version, product, hashFunction, releaseData,
                        data_version=None, schemaVersion=None):
-        data = dict(name=name, version=version, product=product,
+        data = dict(name=self.name, version=version, product=product,
                     hashFunction=hashFunction, data=releaseData)
         if data_version:
             data['data_version'] = data_version
         if schemaVersion:
             data['schema_version'] = schemaVersion
-        return self.request(method='POST', data=data, url_template_vars=dict(name=name))
-
-    def get_data(self, name):
-        resp = self.request(url_template_vars=dict(name=name))
-        return (json.loads(resp.content), resp.headers['X-Data-Version'])
+        return self.request(method='POST', data=data)
 
 
 class SingleLocale(API):
     url_template = '/releases/%(name)s/builds/%(build_target)s/%(locale)s'
     prerequest_url_template = '/releases/%(name)s'
 
-    def update_build(self, name, product, version, build_target, locale,
-                     hashFunction, buildData, copyTo=None, alias=None,
-                     schemaVersion=None):
-        url_template_vars = dict(api_root=self.api_root, name=name,
-                                 locale=locale, build_target=build_target)
+    def __init__(self, name, build_target, locale, **kwargs):
+        super(SingleLocale, self).__init__(**kwargs)
+        self.name = name
+        self.build_target = build_target
+        self.locale = locale
+        self.url_template_vars = dict(name=name, build_target=build_target,
+                                      locale=locale)
+        # keep a copy to be used in get_data()
+        self.release_kwargs = kwargs
+
+    def get_data(self):
+        data, data_version = {}, None
+        # If the locale-specific API end point returns 404, we have to use the
+        # top level blob to get the data version. Because this requires 2 not
+        # atomic HTTP requests, we start with the top level blob and use its
+        # data version.
+        top_level = Release(name=self.name, **self.release_kwargs)
+        # Use data version from the top level blob
+        try:
+            _, data_version = top_level.get_data()
+        except requests.HTTPError, e:
+            if e.response.status_code == 404:
+                # top level blob doesn't exist, assume there is no data
+                return data, data_version
+            else:
+                raise
+        # Got data version. Try to get data from the locale specific blob.
+        # Using data version from the top level blob prevents possible race
+        # conditions if another client updates the locale blob between the
+        # first request and the call below.
+        try:
+            data, _ = super(SingleLocale, self).get_data()
+            return data, data_version
+        except requests.HTTPError, e:
+            if e.response.status_code == 404:
+                # locale blob doesn't exist, no data
+                return data, data_version
+            else:
+                raise
+
+    def update_build(self, product, version, hashFunction, buildData,
+                     alias=None, schemaVersion=None, data_version=None):
         data = dict(product=product, version=version,
                     data=buildData, hashFunction=hashFunction)
-        if copyTo:
-            data['copyTo'] = copyTo
         if alias:
             data['alias'] = alias
+        if data_version:
+            data['data_version'] = data_version
         if schemaVersion:
             data['schema_version'] = schemaVersion
 
-        return self.request(method='PUT', data=data,
-                            url_template_vars=url_template_vars)
+        return self.request(method='PUT', data=data)
 
 
 class Rule(API):
     url_template = '/rules/%(rule_id)s'
     prerequest_url_template = '/rules/%(rule_id)s'
 
-    def update_rule(self, rule_id, **rule_data):
-        url_template_vars = {'rule_id': rule_id}
-        return self.request(method='POST', data=rule_data,
-                            url_template_vars=url_template_vars)
+    def __init__(self, rule_id, **kwargs):
+        super(Rule, self).__init__(**kwargs)
+        self.rule_id = rule_id
+        self.url_template_vars=dict(rule_id=rule_id)
 
-    def get_data(self, rule_id):
-        resp = self.request(url_template_vars=dict(rule_id=rule_id))
-        return (json.loads(resp.content), resp.headers["X-Data-Version"])
+    def update_rule(self, **rule_data):
+        return self.request(method='POST', data=rule_data)
--- a/lib/python/balrog/submitter/cli.py
+++ b/lib/python/balrog/submitter/cli.py
@@ -4,16 +4,17 @@ except ImportError:
     import json
 
 from release.info import getProductDetails
 from release.paths import makeCandidatesDir
 from release.platforms import buildbot2updatePlatforms, buildbot2bouncer, \
   buildbot2ftp
 from release.versions import getPrettyVersion
 from balrog.submitter.api import Release, SingleLocale, Rule
+from balrog.submitter.updates import merge_partial_updates
 from util.algorithms import recursive_update
 import logging
 
 log = logging.getLogger(__name__)
 
 
 def get_nightly_blob_name(productName, branch, build_type, suffix, dummy=False):
     if dummy:
@@ -82,26 +83,25 @@ class ReleaseCreatorBase(object):
                 }
 
         return data
 
     def run(self, appVersion, productName, version, buildNumber,
             updateChannels, stagingServer, bouncerServer,
             enUSPlatforms, hashFunction, schemaVersion, openURL=None,
             **updateKwargs):
-        api = Release(auth=self.auth, api_root=self.api_root)
+        api = Release(self.name, auth=self.auth, api_root=self.api_root)
         data = self.generate_data(appVersion, productName, version,
                                   buildNumber, updateChannels,
                                   stagingServer, bouncerServer, enUSPlatforms,
                                   schemaVersion, openURL, **updateKwargs)
-        current_data, data_version = api.get_data(self.name)
+        current_data, data_version = api.get_data()
         data = recursive_update(current_data, data)
-        api = Release(auth=self.auth, api_root=self.api_root)
-        api.update_release(name=self.name,
-                           version=appVersion,
+        api = Release(name=self.name, auth=self.auth, api_root=self.api_root)
+        api.update_release(version=appVersion,
                            product=productName,
                            hashFunction=hashFunction,
                            releaseData=json.dumps(data),
                            data_version=data_version)
 
 
 class ReleaseCreatorV3(ReleaseCreatorBase):
     def run(self, *args, **kwargs):
@@ -259,49 +259,59 @@ class NightlySubmitterBase(object):
     def run(self, platform, buildID, productName, branch, appVersion, locale,
             hashFunction, extVersion, schemaVersion, isOSUpdate=None, **updateKwargs):
         assert schemaVersion in (3,4), 'Unhandled schema version %s' % schemaVersion
         targets = buildbot2updatePlatforms(platform)
         build_target = targets[0]
         alias = None
         if len(targets) > 1:
             alias = targets[1:]
-
         data = {
             'buildID': buildID,
+            'appVersion': appVersion,
+            'platformVersion': extVersion,
+            'displayVersion': appVersion,
         }
-        data['appVersion'] = appVersion
-        data['platformVersion'] = extVersion
-        data['displayVersion'] = appVersion
         if isOSUpdate:
             data['isOSUpdate'] = isOSUpdate
 
         data.update(self._get_update_data(productName, branch, **updateKwargs))
 
         if platform == 'android-api-9':
             # Bug 1080749 - a hack to support api-9 and api-10+ split builds.
             # Like 1055305, this is a hack to support two builds with same build target that
             # require differed't release blobs and rules
             build_type = 'api-9-%s' % self.build_type
         else:
             build_type = self.build_type
 
         name = get_nightly_blob_name(productName, branch, build_type, buildID, self.dummy)
-        data = json.dumps(data)
-        api = SingleLocale(auth=self.auth, api_root=self.api_root)
-        copyTo = [get_nightly_blob_name(
-            productName, branch, build_type, 'latest', self.dummy)]
-        copyTo = json.dumps(copyTo)
+        api = SingleLocale(name=name, build_target=build_target, locale=locale,
+                           auth=self.auth, api_root=self.api_root)
+        current_data, data_version = api.get_data()
+        # explicitly pass data version
+        data = json.dumps(merge_partial_updates(current_data, data))
         alias = json.dumps(alias)
-        api.update_build(name=name, product=productName,
-                         build_target=build_target,
-                         version=appVersion, locale=locale,
-                         hashFunction=hashFunction,
-                         buildData=data, copyTo=copyTo, alias=alias,
-                         schemaVersion=schemaVersion)
+        api.update_build(
+            product=productName, version=appVersion, hashFunction=hashFunction,
+            buildData=data, alias=alias, schemaVersion=schemaVersion,
+            data_version=data_version)
+        latest = SingleLocale(
+            api_root=self.api_root, auth=self.auth,
+            name=get_nightly_blob_name(productName, branch, build_type,
+                                       'latest', self.dummy),
+            build_target=build_target, locale=locale)
+        # copy everything over using target release's data version
+        _, latest_data_version = latest.get_data()
+        source_data, _ = api.get_data()
+        source_data = json.dumps(source_data)
+        latest.update_build(
+            product=productName, version=appVersion, hashFunction=hashFunction,
+            buildData=source_data, alias=alias, schemaVersion=schemaVersion,
+            data_version=latest_data_version)
 
 
 class MultipleUpdatesNightlyMixin(object):
 
     def _get_update_data(self, productName, branch, completeInfo=None,
                          partialInfo=None):
         data = {}
 
@@ -369,22 +379,22 @@ class ReleaseSubmitterBase(object):
         }
         data['appVersion'] = appVersion
         data['platformVersion'] = extVersion
         data['displayVersion'] = getPrettyVersion(version)
 
         data.update(self._get_update_data(productName, version, build_number, **updateKwargs))
 
         data = json.dumps(data)
-        api = SingleLocale(auth=self.auth, api_root=self.api_root)
+        api = SingleLocale(name=name, build_target=build_target, locale=locale,
+                           auth=self.auth, api_root=self.api_root)
         schemaVersion = json.dumps(schemaVersion)
-        api.update_build(name=name, product=productName,
-                         build_target=build_target, version=appVersion,
-                         locale=locale, hashFunction=hashFunction,
-                         buildData=data, schemaVersion=schemaVersion)
+        api.update_build(
+            product=productName, version=appVersion, hashFunction=hashFunction,
+            buildData=data, schemaVersion=schemaVersion)
 
 
 class MultipleUpdatesReleaseMixin(object):
     def _get_update_data(self, productName, version, build_number,
                          completeInfo=None, partialInfo=None):
         data = {}
 
         if completeInfo:
@@ -429,26 +439,28 @@ class ReleasePusher(object):
     def __init__(self, api_root, auth, dummy=False):
         self.api_root = api_root
         self.auth = auth
         self.dummy = dummy
 
     def run(self, productName, version, build_number, rule_ids):
         name = get_release_blob_name(productName, version, build_number,
                                      self.dummy)
-        api = Rule(auth=self.auth, api_root=self.api_root)
-        for id_ in rule_ids:
-            api.update_rule(id_, mapping=name)
+        for rule_id in rule_ids:
+            Rule(api_root=self.api_root, auth=self.auth, rule_id=rule_id
+                 ).update_rule(mapping=name)
 
 
 class BlobTweaker(object):
     def __init__(self, api_root, auth):
         self.api_root = api_root
         self.auth = auth
 
     def run(self, name, data):
-        api = Release(auth=self.auth, api_root=self.api_root)
-        current_data, data_version = api.get_data(name)
+        api = Release(name=name, auth=self.auth, api_root=self.api_root)
+        current_data, data_version = api.get_data()
         data = recursive_update(current_data, data)
-        api.update_release(name, data['appVersion'], name.split('-')[0],
-                           data['hashFunction'], json.dumps(data), data_version,
-                           schemaVersion=current_data['schema_version'])
+        api.update_release(
+            version=data['appVersion'], product=name.split('-')[0],
+            hashFunction=data['hashFunction'], releaseData=json.dumps(data),
+            data_version=data_version,
+            schemaVersion=current_data['schema_version'])
 
new file mode 100644
--- /dev/null
+++ b/lib/python/balrog/submitter/updates.py
@@ -0,0 +1,26 @@
+import site
+import os
+
+site.addsitedir(os.path.join(os.path.dirname(__file__), "..", ".."))
+import jsonmerge
+
+
+def merge_partial_updates(base_obj, new_obj):
+    """Merges 2 update objects, merging partials and replacing completes"""
+    schema = {
+        "properties": {
+            # Merge partials using fileUrl as an identifier field
+            "partials": {
+                "mergeStrategy": "arrayMergeById",
+                "mergeOptions": {
+                    "idRef": "fileUrl"
+                }
+            },
+            # Replace completes - we don't usually have more than one
+            "completes": {
+                "mergeStrategy": "overwrite"
+            }
+        }
+    }
+    merger = jsonmerge.Merger(schema=schema)
+    return merger.merge(base_obj, new_obj)
--- a/lib/python/mozilla_buildtools/test/test_balrog_submitter_cli.py
+++ b/lib/python/mozilla_buildtools/test/test_balrog_submitter_cli.py
@@ -1,12 +1,15 @@
-import unittest
-import sys
+try:
+    # Python 2.6 backport with assertDictEqual()
+    import unittest2 as unittest
+except ImportError:
+    import unittest
 from balrog.submitter.cli import NightlySubmitterBase, NightlySubmitterV4
-
+from balrog.submitter.updates import merge_partial_updates
 
 class TestNightlySubmitterBase(unittest.TestCase):
 
     def test_replace_canocical_url(self):
         url_replacements = [
             ("ftp.mozilla.org", "download.cdn.mozilla.net")
         ]
         submitter = NightlySubmitterBase(api_root=None, auth=None,
@@ -16,18 +19,16 @@ class TestNightlySubmitterBase(unittest.
             submitter._replace_canocical_url(
                 'http://ftp.mozilla.org/pub/mozilla.org/some/file')
         )
 
 
 class TestNightlySubmitterV4(unittest.TestCase):
 
     def test_canonical_ur_replacement(self):
-        if sys.version_info < (2, 7):
-            return
         url_replacements = [
             ("ftp.mozilla.org", "download.cdn.mozilla.net")
         ]
         submitter = NightlySubmitterV4(api_root=None, auth=None,
                                        url_replacements=url_replacements)
         completeInfo = [{
             'size': 123,
             'hash': 'abcd',
@@ -39,26 +40,113 @@ class TestNightlySubmitterV4(unittest.Te
             {'completes': [{
                 'fileUrl': 'http://download.cdn.mozilla.net/url',
                 'filesize': 123,
                 'from': '*',
                 'hashValue': 'abcd'
             }]})
 
     def test_no_canonical_ur_replacement(self):
-        if sys.version_info < (2, 7):
-            return
         submitter = NightlySubmitterV4(api_root=None, auth=None,
                                        url_replacements=None)
         completeInfo = [{
             'size': 123,
             'hash': 'abcd',
             'url': 'http://ftp.mozilla.org/url'
         }]
         data = submitter._get_update_data("prod", "brnch", completeInfo)
         self.assertDictEqual(
             data,
             {'completes': [{
                 'fileUrl': 'http://ftp.mozilla.org/url',
                 'filesize': 123,
                 'from': '*',
                 'hashValue': 'abcd'
             }]})
+
+class TestUpdateMerger(unittest.TestCase):
+    def test_merge_updates(self):
+        old_data = {
+            'some_other_field': "123",
+            'some_other_field2': {"a": "b", "c": 1},
+            'some_other_list': [1, 2, 3],
+            'completes': [
+                {
+                    'fileUrl': 'https://complete1',
+                    'filesize': 123,
+                    'from': '*',
+                    'hashValue': '123abcdef'
+                },
+            ],
+            'partials': [
+                {
+                    'fileUrl': 'https://partial1',
+                    'filesize': 111,
+                    'from': '111',
+                    'hashValue': '123abc'
+                },
+                {
+                    'fileUrl': 'https://partial2',
+                    'filesize': 112,
+                    'from': '112',
+                    'hashValue': '223abc'
+                },
+            ]
+        }
+        new_data = {
+            'completes': [
+                {
+                    'fileUrl': 'https://complete2',
+                    'filesize': 122,
+                    'from': '*',
+                    'hashValue': '122abcdef'
+                },
+            ],
+            'partials': [
+                {
+                    'fileUrl': 'https://partial2',
+                    'filesize': 112,
+                    'from': '112',
+                    'hashValue': '223abc'
+                },
+                {
+                    'fileUrl': 'https://partial3',
+                    'filesize': 113,
+                    'from': '113',
+                    'hashValue': '323abc'
+                },
+            ]
+        }
+        merged = merge_partial_updates(old_data, new_data)
+        expected_merged = {
+            'some_other_field': "123",
+            'some_other_field2': {"a": "b", "c": 1},
+            'some_other_list': [1, 2, 3],
+            'completes': [
+                {
+                    'fileUrl': 'https://complete2',
+                    'filesize': 122,
+                    'from': '*',
+                    'hashValue': '122abcdef'
+                },
+            ],
+            'partials': [
+                {
+                    'fileUrl': 'https://partial1',
+                    'filesize': 111,
+                    'from': '111',
+                    'hashValue': '123abc'
+                },
+                {
+                    'fileUrl': 'https://partial2',
+                    'filesize': 112,
+                    'from': '112',
+                    'hashValue': '223abc'
+                },
+                {
+                    'fileUrl': 'https://partial3',
+                    'filesize': 113,
+                    'from': '113',
+                    'hashValue': '323abc'
+                },
+            ]
+        }
+        self.assertDictEqual(merged, expected_merged)
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/functools32-3.2.3-2/ChangeLog
@@ -0,0 +1,17 @@
+------------------
+2015-07-11 3.2.3-2
+------------------
+
+- Fix `thread` module import on Python 2.7 (@nmussat)
+
+------------------
+2012-09-19 3.2.3-1
+------------------
+
+- Fix import errors
+
+----------------
+2012-09-18 3.2.3
+----------------
+
+This was the first release. Roughly equivalent to Python 3.2.3.
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/functools32-3.2.3-2/LICENSE
@@ -0,0 +1,289 @@
+A. HISTORY OF THE SOFTWARE
+==========================
+
+Python was created in the early 1990s by Guido van Rossum at Stichting
+Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
+as a successor of a language called ABC.  Guido remains Python's
+principal author, although it includes many contributions from others.
+
+In 1995, Guido continued his work on Python at the Corporation for
+National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
+in Reston, Virginia where he released several versions of the
+software.
+
+In May 2000, Guido and the Python core development team moved to
+BeOpen.com to form the BeOpen PythonLabs team.  In October of the same
+year, the PythonLabs team moved to Digital Creations (now Zope
+Corporation, see http://www.zope.com).  In 2001, the Python Software
+Foundation (PSF, see http://www.python.org/psf/) was formed, a
+non-profit organization created specifically to own Python-related
+Intellectual Property.  Zope Corporation is a sponsoring member of
+the PSF.
+
+All Python releases are Open Source (see http://www.opensource.org for
+the Open Source Definition).  Historically, most, but not all, Python
+releases have also been GPL-compatible; the table below summarizes
+the various releases.
+
+    Release         Derived     Year        Owner       GPL-
+                    from                                compatible? (1)
+
+    0.9.0 thru 1.2              1991-1995   CWI         yes
+    1.3 thru 1.5.2  1.2         1995-1999   CNRI        yes
+    1.6             1.5.2       2000        CNRI        no
+    2.0             1.6         2000        BeOpen.com  no
+    1.6.1           1.6         2001        CNRI        yes (2)
+    2.1             2.0+1.6.1   2001        PSF         no
+    2.0.1           2.0+1.6.1   2001        PSF         yes
+    2.1.1           2.1+2.0.1   2001        PSF         yes
+    2.2             2.1.1       2001        PSF         yes
+    2.1.2           2.1.1       2002        PSF         yes
+    2.1.3           2.1.2       2002        PSF         yes
+    2.2.1           2.2         2002        PSF         yes
+    2.2.2           2.2.1       2002        PSF         yes
+    2.2.3           2.2.2       2003        PSF         yes
+    2.3             2.2.2       2002-2003   PSF         yes
+    2.3.1           2.3         2002-2003   PSF         yes
+    2.3.2           2.3.1       2002-2003   PSF         yes
+    2.3.3           2.3.2       2002-2003   PSF         yes
+    2.3.4           2.3.3       2004        PSF         yes
+    2.3.5           2.3.4       2005        PSF         yes
+    2.4             2.3         2004        PSF         yes
+    2.4.1           2.4         2005        PSF         yes
+    2.4.2           2.4.1       2005        PSF         yes
+    2.4.3           2.4.2       2006        PSF         yes
+    2.4.4           2.4.3       2006        PSF         yes
+    2.5             2.4         2006        PSF         yes
+    2.5.1           2.5         2007        PSF         yes
+    2.5.2           2.5.1       2008        PSF         yes
+    2.5.3           2.5.2       2008        PSF         yes
+    2.6             2.5         2008        PSF         yes
+    2.6.1           2.6         2008        PSF         yes
+    2.6.2           2.6.1       2009        PSF         yes
+    2.6.3           2.6.2       2009        PSF         yes
+    2.6.4           2.6.3       2009        PSF         yes
+    2.6.5           2.6.4       2010        PSF         yes
+    3.0             2.6         2008        PSF         yes
+    3.0.1           3.0         2009        PSF         yes
+    3.1             3.0.1       2009        PSF         yes
+    3.1.1           3.1         2009        PSF         yes
+    3.1.2           3.1.1       2010        PSF         yes
+    3.1.3           3.1.2       2010        PSF         yes
+    3.1.4           3.1.3       2011        PSF         yes
+    3.2             3.1         2011        PSF         yes
+    3.2.1           3.2         2011        PSF         yes
+    3.2.2           3.2.1       2011        PSF         yes
+    3.2.3           3.2.2       2012        PSF         yes
+
+Footnotes:
+
+(1) GPL-compatible doesn't mean that we're distributing Python under
+    the GPL.  All Python licenses, unlike the GPL, let you distribute
+    a modified version without making your changes open source.  The
+    GPL-compatible licenses make it possible to combine Python with
+    other software that is released under the GPL; the others don't.
+
+(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
+    because its license has a choice of law clause.  According to
+    CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
+    is "not incompatible" with the GPL.
+
+Thanks to the many outside volunteers who have worked under Guido's
+direction to make these releases possible.
+
+
+B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
+===============================================================
+
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012 Python Software Foundation; All Rights Reserved" are retained in Python
+alone or in any derivative version prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee.  This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
+-------------------------------------------
+
+BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
+
+1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
+office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
+Individual or Organization ("Licensee") accessing and otherwise using
+this software in source or binary form and its associated
+documentation ("the Software").
+
+2. Subject to the terms and conditions of this BeOpen Python License
+Agreement, BeOpen hereby grants Licensee a non-exclusive,
+royalty-free, world-wide license to reproduce, analyze, test, perform
+and/or display publicly, prepare derivative works, distribute, and
+otherwise use the Software alone or in any derivative version,
+provided, however, that the BeOpen Python License is retained in the
+Software, alone or in any derivative version prepared by Licensee.
+
+3. BeOpen is making the Software available to Licensee on an "AS IS"
+basis.  BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
+SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
+AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
+DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+5. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+6. This License Agreement shall be governed by and interpreted in all
+respects by the law of the State of California, excluding conflict of
+law provisions.  Nothing in this License Agreement shall be deemed to
+create any relationship of agency, partnership, or joint venture
+between BeOpen and Licensee.  This License Agreement does not grant
+permission to use BeOpen trademarks or trade names in a trademark
+sense to endorse or promote products or services of Licensee, or any
+third party.  As an exception, the "BeOpen Python" logos available at
+http://www.pythonlabs.com/logos.html may be used according to the
+permissions granted on that web page.
+
+7. By copying, installing or otherwise using the software, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
+---------------------------------------
+
+1. This LICENSE AGREEMENT is between the Corporation for National
+Research Initiatives, having an office at 1895 Preston White Drive,
+Reston, VA 20191 ("CNRI"), and the Individual or Organization
+("Licensee") accessing and otherwise using Python 1.6.1 software in
+source or binary form and its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, CNRI
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
+license to reproduce, analyze, test, perform and/or display publicly,
+prepare derivative works, distribute, and otherwise use Python 1.6.1
+alone or in any derivative version, provided, however, that CNRI's
+License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
+1995-2001 Corporation for National Research Initiatives; All Rights
+Reserved" are retained in Python 1.6.1 alone or in any derivative
+version prepared by Licensee.  Alternately, in lieu of CNRI's License
+Agreement, Licensee may substitute the following text (omitting the
+quotes): "Python 1.6.1 is made available subject to the terms and
+conditions in CNRI's License Agreement.  This Agreement together with
+Python 1.6.1 may be located on the Internet using the following
+unique, persistent identifier (known as a handle): 1895.22/1013.  This
+Agreement may also be obtained from a proxy server on the Internet
+using the following URL: http://hdl.handle.net/1895.22/1013".
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python 1.6.1 or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python 1.6.1.
+
+4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
+basis.  CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. This License Agreement shall be governed by the federal
+intellectual property law of the United States, including without
+limitation the federal copyright law, and, to the extent such
+U.S. federal law does not apply, by the law of the Commonwealth of
+Virginia, excluding Virginia's conflict of law provisions.
+Notwithstanding the foregoing, with regard to derivative works based
+on Python 1.6.1 that incorporate non-separable material that was
+previously distributed under the GNU General Public License (GPL), the
+law of the Commonwealth of Virginia shall govern this License
+Agreement only as to issues arising under or with respect to
+Paragraphs 4, 5, and 7 of this License Agreement.  Nothing in this
+License Agreement shall be deemed to create any relationship of
+agency, partnership, or joint venture between CNRI and Licensee.  This
+License Agreement does not grant permission to use CNRI trademarks or
+trade name in a trademark sense to endorse or promote products or
+services of Licensee, or any third party.
+
+8. By clicking on the "ACCEPT" button where indicated, or by copying,
+installing or otherwise using Python 1.6.1, Licensee agrees to be
+bound by the terms and conditions of this License Agreement.
+
+        ACCEPT
+
+
+CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
+--------------------------------------------------
+
+Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
+The Netherlands.  All rights reserved.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appear in all copies and that
+both that copyright notice and this permission notice appear in
+supporting documentation, and that the name of Stichting Mathematisch
+Centrum or CWI not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
+THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
+FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/functools32-3.2.3-2/MANIFEST.in
@@ -0,0 +1,12 @@
+include *.txt
+include LICENSE*
+include setup.py
+include setup.cfg
+include ChangeLog
+include MANIFEST.in
+
+include *.py
+
+prune build
+prune dist
+prune .git*
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/functools32-3.2.3-2/PKG-INFO
@@ -0,0 +1,13 @@
+Metadata-Version: 1.0
+Name: functools32
+Version: 3.2.3-2
+Summary: Backport of the functools module from Python 3.2.3 for use on 2.7 and PyPy.
+Home-page: https://github.com/MiCHiLU/python-functools32
+Author: ENDOH takanao
+Author-email: djmchl@gmail.com
+License: PSF license
+Description: 
+        This is a backport of the functools standard library module from
+        Python 3.2.3 for use on Python 2.7 and PyPy. It includes
+        new features `lru_cache` (Least-recently-used cache decorator).
+Platform: UNKNOWN
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/functools32-3.2.3-2/functools32/__init__.py
@@ -0,0 +1,1 @@
+from .functools32 import *
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/functools32-3.2.3-2/functools32/_dummy_thread32.py
@@ -0,0 +1,158 @@
+"""Drop-in replacement for the thread module.
+
+Meant to be used as a brain-dead substitute so that threaded code does
+not need to be rewritten for when the thread module is not present.
+
+Suggested usage is::
+
+    try:
+        try:
+            import _thread  # Python >= 3
+        except:
+            import thread as _thread  # Python < 3
+    except ImportError:
+        import _dummy_thread as _thread
+
+"""
+# Exports only things specified by thread documentation;
+# skipping obsolete synonyms allocate(), start_new(), exit_thread().
+__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
+           'interrupt_main', 'LockType']
+
+# A dummy value
+TIMEOUT_MAX = 2**31
+
+# NOTE: this module can be imported early in the extension building process,
+# and so top level imports of other modules should be avoided.  Instead, all
+# imports are done when needed on a function-by-function basis.  Since threads
+# are disabled, the import lock should not be an issue anyway (??).
+
+class error(Exception):
+    """Dummy implementation of _thread.error."""
+
+    def __init__(self, *args):
+        self.args = args
+
+def start_new_thread(function, args, kwargs={}):
+    """Dummy implementation of _thread.start_new_thread().
+
+    Compatibility is maintained by making sure that ``args`` is a
+    tuple and ``kwargs`` is a dictionary.  If an exception is raised
+    and it is SystemExit (which can be done by _thread.exit()) it is
+    caught and nothing is done; all other exceptions are printed out
+    by using traceback.print_exc().
+
+    If the executed function calls interrupt_main the KeyboardInterrupt will be
+    raised when the function returns.
+
+    """
+    if type(args) != type(tuple()):
+        raise TypeError("2nd arg must be a tuple")
+    if type(kwargs) != type(dict()):
+        raise TypeError("3rd arg must be a dict")
+    global _main
+    _main = False
+    try:
+        function(*args, **kwargs)
+    except SystemExit:
+        pass
+    except:
+        import traceback
+        traceback.print_exc()
+    _main = True
+    global _interrupt
+    if _interrupt:
+        _interrupt = False
+        raise KeyboardInterrupt
+
+def exit():
+    """Dummy implementation of _thread.exit()."""
+    raise SystemExit
+
+def get_ident():
+    """Dummy implementation of _thread.get_ident().
+
+    Since this module should only be used when _threadmodule is not
+    available, it is safe to assume that the current process is the
+    only thread.  Thus a constant can be safely returned.
+    """
+    return -1
+
+def allocate_lock():
+    """Dummy implementation of _thread.allocate_lock()."""
+    return LockType()
+
+def stack_size(size=None):
+    """Dummy implementation of _thread.stack_size()."""
+    if size is not None:
+        raise error("setting thread stack size not supported")
+    return 0
+
+class LockType(object):
+    """Class implementing dummy implementation of _thread.LockType.
+
+    Compatibility is maintained by maintaining self.locked_status
+    which is a boolean that stores the state of the lock.  Pickling of
+    the lock, though, should not be done since if the _thread module is
+    then used with an unpickled ``lock()`` from here problems could
+    occur from this class not having atomic methods.
+
+    """
+
+    def __init__(self):
+        self.locked_status = False
+
+    def acquire(self, waitflag=None, timeout=-1):
+        """Dummy implementation of acquire().
+
+        For blocking calls, self.locked_status is automatically set to
+        True and returned appropriately based on value of
+        ``waitflag``.  If it is non-blocking, then the value is
+        actually checked and not set if it is already acquired.  This
+        is all done so that threading.Condition's assert statements
+        aren't triggered and throw a little fit.
+
+        """
+        if waitflag is None or waitflag:
+            self.locked_status = True
+            return True
+        else:
+            if not self.locked_status:
+                self.locked_status = True
+                return True
+            else:
+                if timeout > 0:
+                    import time
+                    time.sleep(timeout)
+                return False
+
+    __enter__ = acquire
+
+    def __exit__(self, typ, val, tb):
+        self.release()
+
+    def release(self):
+        """Release the dummy lock."""
+        # XXX Perhaps shouldn't actually bother to test?  Could lead
+        #     to problems for complex, threaded code.
+        if not self.locked_status:
+            raise error
+        self.locked_status = False
+        return True
+
+    def locked(self):
+        return self.locked_status
+
+# Used to signal that interrupt_main was called in a "thread"
+_interrupt = False
+# True when not executing in a "thread"
+_main = True
+
+def interrupt_main():
+    """Set _interrupt flag to True to have start_new_thread raise
+    KeyboardInterrupt upon exiting."""
+    if _main:
+        raise KeyboardInterrupt
+    else:
+        global _interrupt
+        _interrupt = True
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/functools32-3.2.3-2/functools32/functools32.py
@@ -0,0 +1,423 @@
+"""functools.py - Tools for working with functions and callable objects
+"""
+# Python module wrapper for _functools C module
+# to allow utilities written in Python to be added
+# to the functools module.
+# Written by Nick Coghlan <ncoghlan at gmail.com>
+# and Raymond Hettinger <python at rcn.com>
+#   Copyright (C) 2006-2010 Python Software Foundation.
+# See C source code for _functools credits/copyright
+
+__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
+           'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial']
+
+from _functools import partial, reduce
+from collections import MutableMapping, namedtuple
+from .reprlib32 import recursive_repr as _recursive_repr
+from weakref import proxy as _proxy
+import sys as _sys
+try:
+    from thread import allocate_lock as Lock
+except ImportError:
+    from ._dummy_thread32 import allocate_lock as Lock
+
+################################################################################
+### OrderedDict
+################################################################################
+
+class _Link(object):
+    __slots__ = 'prev', 'next', 'key', '__weakref__'
+
+class OrderedDict(dict):
+    'Dictionary that remembers insertion order'
+    # An inherited dict maps keys to values.
+    # The inherited dict provides __getitem__, __len__, __contains__, and get.
+    # The remaining methods are order-aware.
+    # Big-O running times for all methods are the same as regular dictionaries.
+
+    # The internal self.__map dict maps keys to links in a doubly linked list.
+    # The circular doubly linked list starts and ends with a sentinel element.
+    # The sentinel element never gets deleted (this simplifies the algorithm).
+    # The sentinel is in self.__hardroot with a weakref proxy in self.__root.
+    # The prev links are weakref proxies (to prevent circular references).
+    # Individual links are kept alive by the hard reference in self.__map.
+    # Those hard references disappear when a key is deleted from an OrderedDict.
+
+    def __init__(self, *args, **kwds):
+        '''Initialize an ordered dictionary.  The signature is the same as
+        regular dictionaries, but keyword arguments are not recommended because
+        their insertion order is arbitrary.
+
+        '''
+        if len(args) > 1:
+            raise TypeError('expected at most 1 arguments, got %d' % len(args))
+        try:
+            self.__root
+        except AttributeError:
+            self.__hardroot = _Link()
+            self.__root = root = _proxy(self.__hardroot)
+            root.prev = root.next = root
+            self.__map = {}
+        self.__update(*args, **kwds)
+
+    def __setitem__(self, key, value,
+                    dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
+        'od.__setitem__(i, y) <==> od[i]=y'
+        # Setting a new item creates a new link at the end of the linked list,
+        # and the inherited dictionary is updated with the new key/value pair.
+        if key not in self:
+            self.__map[key] = link = Link()
+            root = self.__root
+            last = root.prev
+            link.prev, link.next, link.key = last, root, key
+            last.next = link
+            root.prev = proxy(link)
+        dict_setitem(self, key, value)
+
+    def __delitem__(self, key, dict_delitem=dict.__delitem__):
+        'od.__delitem__(y) <==> del od[y]'
+        # Deleting an existing item uses self.__map to find the link which gets
+        # removed by updating the links in the predecessor and successor nodes.
+        dict_delitem(self, key)
+        link = self.__map.pop(key)
+        link_prev = link.prev
+        link_next = link.next
+        link_prev.next = link_next
+        link_next.prev = link_prev
+
+    def __iter__(self):
+        'od.__iter__() <==> iter(od)'
+        # Traverse the linked list in order.
+        root = self.__root
+        curr = root.next
+        while curr is not root:
+            yield curr.key
+            curr = curr.next
+
+    def __reversed__(self):
+        'od.__reversed__() <==> reversed(od)'
+        # Traverse the linked list in reverse order.
+        root = self.__root
+        curr = root.prev
+        while curr is not root:
+            yield curr.key
+            curr = curr.prev
+
+    def clear(self):
+        'od.clear() -> None.  Remove all items from od.'
+        root = self.__root
+        root.prev = root.next = root
+        self.__map.clear()
+        dict.clear(self)
+
+    def popitem(self, last=True):
+        '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+        Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+        '''
+        if not self:
+            raise KeyError('dictionary is empty')
+        root = self.__root
+        if last:
+            link = root.prev
+            link_prev = link.prev
+            link_prev.next = root
+            root.prev = link_prev
+        else:
+            link = root.next
+            link_next = link.next
+            root.next = link_next
+            link_next.prev = root
+        key = link.key
+        del self.__map[key]
+        value = dict.pop(self, key)
+        return key, value
+
+    def move_to_end(self, key, last=True):
+        '''Move an existing element to the end (or beginning if last==False).
+
+        Raises KeyError if the element does not exist.
+        When last=True, acts like a fast version of self[key]=self.pop(key).
+
+        '''
+        link = self.__map[key]
+        link_prev = link.prev
+        link_next = link.next
+        link_prev.next = link_next
+        link_next.prev = link_prev
+        root = self.__root
+        if last:
+            last = root.prev
+            link.prev = last
+            link.next = root
+            last.next = root.prev = link
+        else:
+            first = root.next
+            link.prev = root
+            link.next = first
+            root.next = first.prev = link
+
+    def __sizeof__(self):
+        sizeof = _sys.getsizeof
+        n = len(self) + 1                       # number of links including root
+        size = sizeof(self.__dict__)            # instance dictionary
+        size += sizeof(self.__map) * 2          # internal dict and inherited dict
+        size += sizeof(self.__hardroot) * n     # link objects
+        size += sizeof(self.__root) * n         # proxy objects
+        return size
+
+    update = __update = MutableMapping.update
+    keys = MutableMapping.keys
+    values = MutableMapping.values
+    items = MutableMapping.items
+    __ne__ = MutableMapping.__ne__
+
+    __marker = object()
+
+    def pop(self, key, default=__marker):
+        '''od.pop(k[,d]) -> v, remove specified key and return the corresponding
+        value.  If key is not found, d is returned if given, otherwise KeyError
+        is raised.
+
+        '''
+        if key in self:
+            result = self[key]
+            del self[key]
+            return result
+        if default is self.__marker:
+            raise KeyError(key)
+        return default
+
+    def setdefault(self, key, default=None):
+        'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+        if key in self:
+            return self[key]
+        self[key] = default
+        return default
+
+    @_recursive_repr()
+    def __repr__(self):
+        'od.__repr__() <==> repr(od)'
+        if not self:
+            return '%s()' % (self.__class__.__name__,)
+        return '%s(%r)' % (self.__class__.__name__, list(self.items()))
+
+    def __reduce__(self):
+        'Return state information for pickling'
+        items = [[k, self[k]] for k in self]
+        inst_dict = vars(self).copy()
+        for k in vars(OrderedDict()):
+            inst_dict.pop(k, None)
+        if inst_dict:
+            return (self.__class__, (items,), inst_dict)
+        return self.__class__, (items,)
+
+    def copy(self):
+        'od.copy() -> a shallow copy of od'
+        return self.__class__(self)
+
+    @classmethod
+    def fromkeys(cls, iterable, value=None):
+        '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
+        If not specified, the value defaults to None.
+
+        '''
+        self = cls()
+        for key in iterable:
+            self[key] = value
+        return self
+
+    def __eq__(self, other):
+        '''od.__eq__(y) <==> od==y.  Comparison to another OD is order-sensitive
+        while comparison to a regular mapping is order-insensitive.
+
+        '''
+        if isinstance(other, OrderedDict):
+            return len(self)==len(other) and \
+                   all(p==q for p, q in zip(self.items(), other.items()))
+        return dict.__eq__(self, other)
+
+# update_wrapper() and wraps() are tools to help write
+# wrapper functions that can handle naive introspection
+
+WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
+WRAPPER_UPDATES = ('__dict__',)
+def update_wrapper(wrapper,
+                   wrapped,
+                   assigned = WRAPPER_ASSIGNMENTS,
+                   updated = WRAPPER_UPDATES):
+    """Update a wrapper function to look like the wrapped function
+
+       wrapper is the function to be updated
+       wrapped is the original function
+       assigned is a tuple naming the attributes assigned directly
+       from the wrapped function to the wrapper function (defaults to
+       functools.WRAPPER_ASSIGNMENTS)
+       updated is a tuple naming the attributes of the wrapper that
+       are updated with the corresponding attribute from the wrapped
+       function (defaults to functools.WRAPPER_UPDATES)
+    """
+    wrapper.__wrapped__ = wrapped
+    for attr in assigned:
+        try:
+            value = getattr(wrapped, attr)
+        except AttributeError:
+            pass
+        else:
+            setattr(wrapper, attr, value)
+    for attr in updated:
+        getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+    # Return the wrapper so this can be used as a decorator via partial()
+    return wrapper
+
+def wraps(wrapped,
+          assigned = WRAPPER_ASSIGNMENTS,
+          updated = WRAPPER_UPDATES):
+    """Decorator factory to apply update_wrapper() to a wrapper function
+
+       Returns a decorator that invokes update_wrapper() with the decorated
+       function as the wrapper argument and the arguments to wraps() as the
+       remaining arguments. Default arguments are as for update_wrapper().
+       This is a convenience function to simplify applying partial() to
+       update_wrapper().
+    """
+    return partial(update_wrapper, wrapped=wrapped,
+                   assigned=assigned, updated=updated)
+
+def total_ordering(cls):
+    """Class decorator that fills in missing ordering methods"""
+    convert = {
+        '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
+                   ('__le__', lambda self, other: self < other or self == other),
+                   ('__ge__', lambda self, other: not self < other)],
+        '__le__': [('__ge__', lambda self, other: not self <= other or self == other),
+                   ('__lt__', lambda self, other: self <= other and not self == other),
+                   ('__gt__', lambda self, other: not self <= other)],
+        '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
+                   ('__ge__', lambda self, other: self > other or self == other),
+                   ('__le__', lambda self, other: not self > other)],
+        '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
+                   ('__gt__', lambda self, other: self >= other and not self == other),
+                   ('__lt__', lambda self, other: not self >= other)]
+    }
+    roots = set(dir(cls)) & set(convert)
+    if not roots:
+        raise ValueError('must define at least one ordering operation: < > <= >=')
+    root = max(roots)       # prefer __lt__ to __le__ to __gt__ to __ge__
+    for opname, opfunc in convert[root]:
+        if opname not in roots:
+            opfunc.__name__ = opname
+            opfunc.__doc__ = getattr(int, opname).__doc__
+            setattr(cls, opname, opfunc)
+    return cls
+
+def cmp_to_key(mycmp):
+    """Convert a cmp= function into a key= function"""
+    class K(object):
+        __slots__ = ['obj']
+        def __init__(self, obj):
+            self.obj = obj
+        def __lt__(self, other):
+            return mycmp(self.obj, other.obj) < 0
+        def __gt__(self, other):
+            return mycmp(self.obj, other.obj) > 0
+        def __eq__(self, other):
+            return mycmp(self.obj, other.obj) == 0
+        def __le__(self, other):
+            return mycmp(self.obj, other.obj) <= 0
+        def __ge__(self, other):
+            return mycmp(self.obj, other.obj) >= 0
+        def __ne__(self, other):
+            return mycmp(self.obj, other.obj) != 0
+        __hash__ = None
+    return K
+
+_CacheInfo = namedtuple("CacheInfo", "hits misses maxsize currsize")
+
+def lru_cache(maxsize=100):
+    """Least-recently-used cache decorator.
+
+    If *maxsize* is set to None, the LRU features are disabled and the cache
+    can grow without bound.
+
+    Arguments to the cached function must be hashable.
+
+    View the cache statistics named tuple (hits, misses, maxsize, currsize) with
+    f.cache_info().  Clear the cache and statistics with f.cache_clear().
+    Access the underlying function with f.__wrapped__.
+
+    See:  http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
+
+    """
+    # Users should only access the lru_cache through its public API:
+    #       cache_info, cache_clear, and f.__wrapped__
+    # The internals of the lru_cache are encapsulated for thread safety and
+    # to allow the implementation to change (including a possible C version).
+
+    def decorating_function(user_function,
+                tuple=tuple, sorted=sorted, len=len, KeyError=KeyError):
+
+        hits, misses = [0], [0]
+        kwd_mark = (object(),)          # separates positional and keyword args
+        lock = Lock()                   # needed because OrderedDict isn't threadsafe
+
+        if maxsize is None:
+            cache = dict()              # simple cache without ordering or size limit
+
+            @wraps(user_function)
+            def wrapper(*args, **kwds):
+                key = args
+                if kwds:
+                    key += kwd_mark + tuple(sorted(kwds.items()))
+                try:
+                    result = cache[key]
+                    hits[0] += 1
+                    return result
+                except KeyError:
+                    pass
+                result = user_function(*args, **kwds)
+                cache[key] = result
+                misses[0] += 1
+                return result
+        else:
+            cache = OrderedDict()           # ordered least recent to most recent
+            cache_popitem = cache.popitem
+            cache_renew = cache.move_to_end
+
+            @wraps(user_function)
+            def wrapper(*args, **kwds):
+                key = args
+                if kwds:
+                    key += kwd_mark + tuple(sorted(kwds.items()))
+                with lock:
+                    try:
+                        result = cache[key]
+                        cache_renew(key)    # record recent use of this key
+                        hits[0] += 1
+                        return result
+                    except KeyError:
+                        pass
+                result = user_function(*args, **kwds)
+                with lock:
+                    cache[key] = result     # record recent use of this key
+                    misses[0] += 1
+                    if len(cache) > maxsize:
+                        cache_popitem(0)    # purge least recently used cache entry
+                return result
+
+        def cache_info():
+            """Report cache statistics"""
+            with lock:
+                return _CacheInfo(hits[0], misses[0], maxsize, len(cache))
+
+        def cache_clear():
+            """Clear the cache and cache statistics"""
+            with lock:
+                cache.clear()
+                hits[0] = misses[0] = 0
+
+        wrapper.cache_info = cache_info
+        wrapper.cache_clear = cache_clear
+        return wrapper
+
+    return decorating_function
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/functools32-3.2.3-2/functools32/reprlib32.py
@@ -0,0 +1,157 @@
+"""Redo the builtin repr() (representation) but with limits on most sizes."""
+
+__all__ = ["Repr", "repr", "recursive_repr"]
+
+import __builtin__ as builtins
+from itertools import islice
+try:
+    from thread import get_ident
+except ImportError:
+    from _dummy_thread32 import get_ident
+
+def recursive_repr(fillvalue='...'):
+    'Decorator to make a repr function return fillvalue for a recursive call'
+
+    def decorating_function(user_function):
+        repr_running = set()
+
+        def wrapper(self):
+            key = id(self), get_ident()
+            if key in repr_running:
+                return fillvalue
+            repr_running.add(key)
+            try:
+                result = user_function(self)
+            finally:
+                repr_running.discard(key)
+            return result
+
+        # Can't use functools.wraps() here because of bootstrap issues
+        wrapper.__module__ = getattr(user_function, '__module__')
+        wrapper.__doc__ = getattr(user_function, '__doc__')
+        wrapper.__name__ = getattr(user_function, '__name__')
+        wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
+        return wrapper
+
+    return decorating_function
+
+class Repr:
+
+    def __init__(self):
+        self.maxlevel = 6
+        self.maxtuple = 6
+        self.maxlist = 6
+        self.maxarray = 5
+        self.maxdict = 4
+        self.maxset = 6
+        self.maxfrozenset = 6
+        self.maxdeque = 6
+        self.maxstring = 30
+        self.maxlong = 40
+        self.maxother = 30
+
+    def repr(self, x):
+        return self.repr1(x, self.maxlevel)
+
+    def repr1(self, x, level):
+        typename = type(x).__name__
+        if ' ' in typename:
+            parts = typename.split()
+            typename = '_'.join(parts)
+        if hasattr(self, 'repr_' + typename):
+            return getattr(self, 'repr_' + typename)(x, level)
+        else:
+            return self.repr_instance(x, level)
+
+    def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
+        n = len(x)
+        if level <= 0 and n:
+            s = '...'
+        else:
+            newlevel = level - 1
+            repr1 = self.repr1
+            pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
+            if n > maxiter:  pieces.append('...')
+            s = ', '.join(pieces)
+            if n == 1 and trail:  right = trail + right
+        return '%s%s%s' % (left, s, right)
+
+    def repr_tuple(self, x, level):
+        return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
+
+    def repr_list(self, x, level):
+        return self._repr_iterable(x, level, '[', ']', self.maxlist)
+
+    def repr_array(self, x, level):
+        header = "array('%s', [" % x.typecode
+        return self._repr_iterable(x, level, header, '])', self.maxarray)
+
+    def repr_set(self, x, level):
+        x = _possibly_sorted(x)
+        return self._repr_iterable(x, level, 'set([', '])', self.maxset)
+
+    def repr_frozenset(self, x, level):
+        x = _possibly_sorted(x)
+        return self._repr_iterable(x, level, 'frozenset([', '])',
+                                   self.maxfrozenset)
+
+    def repr_deque(self, x, level):
+        return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
+
+    def repr_dict(self, x, level):
+        n = len(x)
+        if n == 0: return '{}'
+        if level <= 0: return '{...}'
+        newlevel = level - 1
+        repr1 = self.repr1
+        pieces = []
+        for key in islice(_possibly_sorted(x), self.maxdict):
+            keyrepr = repr1(key, newlevel)
+            valrepr = repr1(x[key], newlevel)
+            pieces.append('%s: %s' % (keyrepr, valrepr))
+        if n > self.maxdict: pieces.append('...')
+        s = ', '.join(pieces)
+        return '{%s}' % (s,)
+
+    def repr_str(self, x, level):
+        s = builtins.repr(x[:self.maxstring])
+        if len(s) > self.maxstring:
+            i = max(0, (self.maxstring-3)//2)
+            j = max(0, self.maxstring-3-i)
+            s = builtins.repr(x[:i] + x[len(x)-j:])
+            s = s[:i] + '...' + s[len(s)-j:]
+        return s
+
+    def repr_int(self, x, level):
+        s = builtins.repr(x) # XXX Hope this isn't too slow...
+        if len(s) > self.maxlong:
+            i = max(0, (self.maxlong-3)//2)
+            j = max(0, self.maxlong-3-i)
+            s = s[:i] + '...' + s[len(s)-j:]
+        return s
+
+    def repr_instance(self, x, level):
+        try:
+            s = builtins.repr(x)
+            # Bugs in x.__repr__() can cause arbitrary
+            # exceptions -- then make up something
+        except Exception:
+            return '<%s instance at %x>' % (x.__class__.__name__, id(x))
+        if len(s) > self.maxother:
+            i = max(0, (self.maxother-3)//2)
+            j = max(0, self.maxother-3-i)
+            s = s[:i] + '...' + s[len(s)-j:]
+        return s
+
+
+def _possibly_sorted(x):
+    # Since not all sequences of items can be sorted and comparison
+    # functions may raise arbitrary exceptions, return an unsorted
+    # sequence in that case.
+    try:
+        return sorted(x)
+    except Exception:
+        return list(x)
+
+aRepr = Repr()
+repr = aRepr.repr
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/functools32-3.2.3-2/setup.cfg
@@ -0,0 +1,6 @@
+[sdist]
+formats=gztar,zip
+
+[bdist_rpm]
+release = 1
+group = Development/Languages/Python
new file mode 100755
--- /dev/null
+++ b/lib/python/vendor/functools32-3.2.3-2/setup.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+
+import sys
+from distutils.core import setup
+
+
+def main():
+    if not (3,) > sys.version_info >= (2, 7):
+        sys.stderr.write('This backport is for Python 2.7 only.\n')
+        sys.exit(1)
+
+    setup(
+      name='functools32',
+      version='3.2.3-2',
+      description='Backport of the functools module from Python 3.2.3 for use on 2.7 and PyPy.',
+      long_description="""
+This is a backport of the functools standard library module from
+Python 3.2.3 for use on Python 2.7 and PyPy. It includes
+new features `lru_cache` (Least-recently-used cache decorator).""",
+      license='PSF license',
+
+      maintainer='ENDOH takanao',
+      maintainer_email='djmchl@gmail.com',
+      url='https://github.com/MiCHiLU/python-functools32',
+
+      packages=['functools32'],
+    )
+
+
+if __name__ == '__main__':
+    main()
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/functools32-3.2.3-2/test_functools32.py
@@ -0,0 +1,681 @@
+import functools32 as functools
+import collections
+import sys
+import unittest
+import test_support27 as support
+from weakref import proxy
+import pickle
+from random import choice
+from functools32.functools32 import OrderedDict
+
+@staticmethod
+def PythonPartial(func, *args, **keywords):
+    'Pure Python approximation of partial()'
+    def newfunc(*fargs, **fkeywords):
+        newkeywords = keywords.copy()
+        newkeywords.update(fkeywords)
+        return func(*(args + fargs), **newkeywords)
+    newfunc.func = func
+    newfunc.args = args
+    newfunc.keywords = keywords
+    return newfunc
+
+def capture(*args, **kw):
+    """capture all positional and keyword arguments"""
+    return args, kw
+
+def signature(part):
+    """ return the signature of a partial object """
+    return (part.func, part.args, part.keywords, part.__dict__)
+
+class TestPartial(unittest.TestCase):
+
+    thetype = functools.partial
+
+    def test_basic_examples(self):
+        p = self.thetype(capture, 1, 2, a=10, b=20)
+        self.assertEqual(p(3, 4, b=30, c=40),
+                         ((1, 2, 3, 4), dict(a=10, b=30, c=40)))
+        p = self.thetype(map, lambda x: x*10)
+        self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
+
+    def test_attributes(self):
+        p = self.thetype(capture, 1, 2, a=10, b=20)
+        # attributes should be readable
+        self.assertEqual(p.func, capture)
+        self.assertEqual(p.args, (1, 2))
+        self.assertEqual(p.keywords, dict(a=10, b=20))
+        # attributes should not be writable
+        if not isinstance(self.thetype, type):
+            return
+        if "__pypy__" in sys.modules:
+            raise unittest.SkipTest("In the PyPy execution environment")
+        self.assertRaises(TypeError, setattr, p, 'func', map)
+        self.assertRaises(TypeError, setattr, p, 'args', (1, 2))
+        self.assertRaises(TypeError, setattr, p, 'keywords', dict(a=1, b=2))
+
+        p = self.thetype(hex)
+        try:
+            del p.__dict__
+        except TypeError:
+            pass
+        else:
+            self.fail('partial object allowed __dict__ to be deleted')
+
+    def test_argument_checking(self):
+        self.assertRaises(TypeError, self.thetype)     # need at least a func arg
+        try:
+            self.thetype(2)()
+        except TypeError:
+            pass
+        else:
+            self.fail('First arg not checked for callability')
+
+    def test_protection_of_callers_dict_argument(self):
+        # a caller's dictionary should not be altered by partial
+        def func(a=10, b=20):
+            return a
+        d = {'a':3}
+        p = self.thetype(func, a=5)
+        self.assertEqual(p(**d), 3)
+        self.assertEqual(d, {'a':3})
+        p(b=7)
+        self.assertEqual(d, {'a':3})
+
+    def test_arg_combinations(self):
+        # exercise special code paths for zero args in either partial
+        # object or the caller
+        p = self.thetype(capture)
+        self.assertEqual(p(), ((), {}))
+        self.assertEqual(p(1,2), ((1,2), {}))
+        p = self.thetype(capture, 1, 2)
+        self.assertEqual(p(), ((1,2), {}))
+        self.assertEqual(p(3,4), ((1,2,3,4), {}))
+
+    def test_kw_combinations(self):
+        # exercise special code paths for no keyword args in
+        # either the partial object or the caller
+        p = self.thetype(capture)
+        self.assertEqual(p(), ((), {}))
+        self.assertEqual(p(a=1), ((), {'a':1}))
+        p = self.thetype(capture, a=1)
+        self.assertEqual(p(), ((), {'a':1}))
+        self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
+        # keyword args in the call override those in the partial object
+        self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
+
+    def test_positional(self):
+        # make sure positional arguments are captured correctly
+        for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
+            p = self.thetype(capture, *args)
+            expected = args + ('x',)
+            got, empty = p('x')
+            self.assertTrue(expected == got and empty == {})
+
+    def test_keyword(self):
+        # make sure keyword arguments are captured correctly
+        for a in ['a', 0, None, 3.5]:
+            p = self.thetype(capture, a=a)
+            expected = {'a':a,'x':None}
+            empty, got = p(x=None)
+            self.assertTrue(expected == got and empty == ())
+
+    def test_no_side_effects(self):
+        # make sure there are no side effects that affect subsequent calls
+        p = self.thetype(capture, 0, a=1)
+        args1, kw1 = p(1, b=2)
+        self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
+        args2, kw2 = p()
+        self.assertTrue(args2 == (0,) and kw2 == {'a':1})
+
+    def test_error_propagation(self):
+        def f(x, y):
+            x / y
+        self.assertRaises(ZeroDivisionError, self.thetype(f, 1, 0))
+        self.assertRaises(ZeroDivisionError, self.thetype(f, 1), 0)
+        self.assertRaises(ZeroDivisionError, self.thetype(f), 1, 0)
+        self.assertRaises(ZeroDivisionError, self.thetype(f, y=0), 1)
+
+    def test_weakref(self):
+        f = self.thetype(int, base=16)
+        p = proxy(f)
+        self.assertEqual(f.func, p.func)
+        if "__pypy__" in sys.modules:
+            raise unittest.SkipTest("In the PyPy execution environment")
+        f = None
+        self.assertRaises(ReferenceError, getattr, p, 'func')
+
+    def test_with_bound_and_unbound_methods(self):
+        data = list(map(str, range(10)))
+        join = self.thetype(str.join, '')
+        self.assertEqual(join(data), '0123456789')
+        join = self.thetype(''.join)
+        self.assertEqual(join(data), '0123456789')
+
+    def test_pickle(self):
+        f = self.thetype(signature, 'asdf', bar=True)
+        f.add_something_to__dict__ = True
+        f_copy = pickle.loads(pickle.dumps(f))
+        self.assertEqual(signature(f), signature(f_copy))
+
+class PartialSubclass(functools.partial):
+    pass
+
+class TestPartialSubclass(TestPartial):
+
+    thetype = PartialSubclass
+
+class TestPythonPartial(TestPartial):
+
+    thetype = PythonPartial
+
+    # the python version hasn't a nice repr
+    def test_repr(self): pass
+
+    # the python version isn't picklable
+    def test_pickle(self): pass
+
+class TestUpdateWrapper(unittest.TestCase):
+
+    def check_wrapper(self, wrapper, wrapped,
+                      assigned=functools.WRAPPER_ASSIGNMENTS,
+                      updated=functools.WRAPPER_UPDATES):
+        # Check attributes were assigned
+        for name in assigned:
+            self.assertTrue(getattr(wrapper, name) is getattr(wrapped, name))
+        # Check attributes were updated
+        for name in updated:
+            wrapper_attr = getattr(wrapper, name)
+            wrapped_attr = getattr(wrapped, name)
+            for key in wrapped_attr:
+                self.assertTrue(wrapped_attr[key] is wrapper_attr[key])
+
+    def _default_update(self):
+        def f(a):
+            """This is a test"""
+            pass
+        f.attr = 'This is also a test'
+        def wrapper(b):
+            pass
+        functools.update_wrapper(wrapper, f)
+        return wrapper, f
+
+    def test_default_update(self):
+        wrapper, f = self._default_update()
+        self.check_wrapper(wrapper, f)
+        self.assertIs(wrapper.__wrapped__, f)
+        self.assertEqual(wrapper.__name__, 'f')
+        self.assertEqual(wrapper.attr, 'This is also a test')
+
+    @unittest.skipIf(sys.flags.optimize >= 2,
+                     "Docstrings are omitted with -O2 and above")
+    def test_default_update_doc(self):
+        wrapper, f = self._default_update()
+        self.assertEqual(wrapper.__doc__, 'This is a test')
+
+    def test_no_update(self):
+        def f():
+            """This is a test"""
+            pass
+        f.attr = 'This is also a test'
+        def wrapper():
+            pass
+        functools.update_wrapper(wrapper, f, (), ())
+        self.check_wrapper(wrapper, f, (), ())
+        self.assertEqual(wrapper.__name__, 'wrapper')
+        self.assertEqual(wrapper.__doc__, None)
+        self.assertFalse(hasattr(wrapper, 'attr'))
+
+    def test_selective_update(self):
+        def f():
+            pass
+        f.attr = 'This is a different test'
+        f.dict_attr = dict(a=1, b=2, c=3)
+        def wrapper():
+            pass
+        wrapper.dict_attr = {}
+        assign = ('attr',)
+        update = ('dict_attr',)
+        functools.update_wrapper(wrapper, f, assign, update)
+        self.check_wrapper(wrapper, f, assign, update)
+        self.assertEqual(wrapper.__name__, 'wrapper')
+        self.assertEqual(wrapper.__doc__, None)
+        self.assertEqual(wrapper.attr, 'This is a different test')
+        self.assertEqual(wrapper.dict_attr, f.dict_attr)
+
+    def test_missing_attributes(self):
+        def f():
+            pass
+        def wrapper():
+            pass
+        wrapper.dict_attr = {}
+        assign = ('attr',)
+        update = ('dict_attr',)
+        # Missing attributes on wrapped object are ignored
+        functools.update_wrapper(wrapper, f, assign, update)
+        self.assertNotIn('attr', wrapper.__dict__)
+        self.assertEqual(wrapper.dict_attr, {})
+        # Wrapper must have expected attributes for updating
+        del wrapper.dict_attr
+        with self.assertRaises(AttributeError):
+            functools.update_wrapper(wrapper, f, assign, update)
+        wrapper.dict_attr = 1
+        with self.assertRaises(AttributeError):
+            functools.update_wrapper(wrapper, f, assign, update)
+
+    @unittest.skipIf(sys.flags.optimize >= 2,
+                     "Docstrings are omitted with -O2 and above")
+    def test_builtin_update(self):
+        # Test for bug #1576241
+        def wrapper():
+            pass
+        functools.update_wrapper(wrapper, max)
+        self.assertEqual(wrapper.__name__, 'max')
+        self.assertTrue(wrapper.__doc__.startswith('max('))
+
+class TestWraps(TestUpdateWrapper):
+
+    def _default_update(self):
+        def f():
+            """This is a test"""
+            pass
+        f.attr = 'This is also a test'
+        @functools.wraps(f)
+        def wrapper():
+            pass
+        self.check_wrapper(wrapper, f)
+        return wrapper
+
+    def test_default_update(self):
+        wrapper = self._default_update()
+        self.assertEqual(wrapper.__name__, 'f')
+        self.assertEqual(wrapper.attr, 'This is also a test')
+
+    @unittest.skipIf(not sys.flags.optimize <= 1,
+                     "Docstrings are omitted with -O2 and above")
+    def test_default_update_doc(self):
+        wrapper = self._default_update()
+        self.assertEqual(wrapper.__doc__, 'This is a test')
+
+    def test_no_update(self):
+        def f():
+            """This is a test"""
+            pass
+        f.attr = 'This is also a test'
+        @functools.wraps(f, (), ())
+        def wrapper():
+            pass
+        self.check_wrapper(wrapper, f, (), ())
+        self.assertEqual(wrapper.__name__, 'wrapper')
+        self.assertEqual(wrapper.__doc__, None)
+        self.assertFalse(hasattr(wrapper, 'attr'))
+
+    def test_selective_update(self):
+        def f():
+            pass
+        f.attr = 'This is a different test'
+        f.dict_attr = dict(a=1, b=2, c=3)
+        def add_dict_attr(f):
+            f.dict_attr = {}
+            return f
+        assign = ('attr',)
+        update = ('dict_attr',)
+        @functools.wraps(f, assign, update)
+        @add_dict_attr
+        def wrapper():
+            pass
+        self.check_wrapper(wrapper, f, assign, update)
+        self.assertEqual(wrapper.__name__, 'wrapper')
+        self.assertEqual(wrapper.__doc__, None)
+        self.assertEqual(wrapper.attr, 'This is a different test')
+        self.assertEqual(wrapper.dict_attr, f.dict_attr)
+
+class TestReduce(unittest.TestCase):
+    func = functools.reduce
+
+    def test_reduce(self):
+        class Squares:
+            def __init__(self, max):
+                self.max = max
+                self.sofar = []
+
+            def __len__(self):
+                return len(self.sofar)
+
+            def __getitem__(self, i):
+                if not 0 <= i < self.max: raise IndexError
+                n = len(self.sofar)
+                while n <= i:
+                    self.sofar.append(n*n)
+                    n += 1
+                return self.sofar[i]
+        def add(x, y):
+            return x + y
+        self.assertEqual(self.func(add, ['a', 'b', 'c'], ''), 'abc')
+        self.assertEqual(
+            self.func(add, [['a', 'c'], [], ['d', 'w']], []),
+            ['a','c','d','w']
+        )
+        self.assertEqual(self.func(lambda x, y: x*y, range(2,8), 1), 5040)
+        self.assertEqual(
+            self.func(lambda x, y: x*y, range(2,21), 1),
+            2432902008176640000
+        )
+        self.assertEqual(self.func(add, Squares(10)), 285)
+        self.assertEqual(self.func(add, Squares(10), 0), 285)
+        self.assertEqual(self.func(add, Squares(0), 0), 0)
+        self.assertRaises(TypeError, self.func)
+        self.assertRaises(TypeError, self.func, 42, 42)
+        self.assertRaises(TypeError, self.func, 42, 42, 42)
+        self.assertEqual(self.func(42, "1"), "1") # func is never called with one item
+        self.assertEqual(self.func(42, "", "1"), "1") # func is never called with one item
+        self.assertRaises(TypeError, self.func, 42, (42, 42))
+        self.assertRaises(TypeError, self.func, add, []) # arg 2 must not be empty sequence with no initial value
+        self.assertRaises(TypeError, self.func, add, "")
+        self.assertRaises(TypeError, self.func, add, ())
+        self.assertRaises(TypeError, self.func, add, object())
+
+        self.assertEqual(self.func(add, [], None), None)
+        self.assertEqual(self.func(add, [], 42), 42)
+
+        class BadSeq:
+            def __getitem__(self, index):
+                raise ValueError
+        self.assertRaises(ValueError, self.func, 42, BadSeq())
+
+    # Test reduce()'s use of iterators.
+    def test_iterator_usage(self):
+        class SequenceClass:
+            def __init__(self, n):
+                self.n = n
+            def __getitem__(self, i):
+                if 0 <= i < self.n:
+                    return i
+                else:
+                    raise IndexError
+
+        from operator import add
+        self.assertEqual(self.func(add, SequenceClass(5)), 10)
+        self.assertEqual(self.func(add, SequenceClass(5), 42), 52)
+        self.assertRaises(TypeError, self.func, add, SequenceClass(0))
+        self.assertEqual(self.func(add, SequenceClass(0), 42), 42)
+        self.assertEqual(self.func(add, SequenceClass(1)), 0)
+        self.assertEqual(self.func(add, SequenceClass(1), 42), 42)
+
+        d = {"one": 1, "two": 2, "three": 3}
+        self.assertEqual(self.func(add, d), "".join(d.keys()))
+
+class TestCmpToKey(unittest.TestCase):
+    def test_cmp_to_key(self):
+        def mycmp(x, y):
+            return y - x
+        self.assertEqual(sorted(range(5), key=functools.cmp_to_key(mycmp)),
+                         [4, 3, 2, 1, 0])
+
+    def test_hash(self):
+        def mycmp(x, y):
+            return y - x
+        key = functools.cmp_to_key(mycmp)
+        k = key(10)
+        self.assertRaises(TypeError, hash, k)
+        self.assertFalse(isinstance(k, collections.Hashable))
+
+class TestTotalOrdering(unittest.TestCase):
+
+    def test_total_ordering_lt(self):
+        @functools.total_ordering
+        class A:
+            def __init__(self, value):
+                self.value = value
+            def __lt__(self, other):
+                return self.value < other.value
+            def __eq__(self, other):
+                return self.value == other.value
+        self.assertTrue(A(1) < A(2))
+        self.assertTrue(A(2) > A(1))
+        self.assertTrue(A(1) <= A(2))
+        self.assertTrue(A(2) >= A(1))
+        self.assertTrue(A(2) <= A(2))
+        self.assertTrue(A(2) >= A(2))
+
+    def test_total_ordering_le(self):
+        @functools.total_ordering
+        class A:
+            def __init__(self, value):
+                self.value = value
+            def __le__(self, other):
+                return self.value <= other.value
+            def __eq__(self, other):
+                return self.value == other.value
+        self.assertTrue(A(1) < A(2))
+        self.assertTrue(A(2) > A(1))
+        self.assertTrue(A(1) <= A(2))
+        self.assertTrue(A(2) >= A(1))
+        self.assertTrue(A(2) <= A(2))
+        self.assertTrue(A(2) >= A(2))
+
+    def test_total_ordering_gt(self):
+        @functools.total_ordering
+        class A:
+            def __init__(self, value):
+                self.value = value
+            def __gt__(self, other):
+                return self.value > other.value
+            def __eq__(self, other):
+                return self.value == other.value
+        self.assertTrue(A(1) < A(2))
+        self.assertTrue(A(2) > A(1))
+        self.assertTrue(A(1) <= A(2))
+        self.assertTrue(A(2) >= A(1))
+        self.assertTrue(A(2) <= A(2))
+        self.assertTrue(A(2) >= A(2))
+
+    def test_total_ordering_ge(self):
+        @functools.total_ordering
+        class A:
+            def __init__(self, value):
+                self.value = value
+            def __ge__(self, other):
+                return self.value >= other.value
+            def __eq__(self, other):
+                return self.value == other.value
+        self.assertTrue(A(1) < A(2))
+        self.assertTrue(A(2) > A(1))
+        self.assertTrue(A(1) <= A(2))
+        self.assertTrue(A(2) >= A(1))
+        self.assertTrue(A(2) <= A(2))
+        self.assertTrue(A(2) >= A(2))
+
+    def test_total_ordering_no_overwrite(self):
+        # new methods should not overwrite existing
+        @functools.total_ordering
+        class A(str):
+            pass
+        self.assertTrue(A("a") < A("b"))
+        self.assertTrue(A("b") > A("a"))
+        self.assertTrue(A("a") <= A("b"))
+        self.assertTrue(A("b") >= A("a"))
+        self.assertTrue(A("b") <= A("b"))
+        self.assertTrue(A("b") >= A("b"))
+
+    def test_no_operations_defined(self):
+        with self.assertRaises(ValueError):
+            @functools.total_ordering
+            class A:
+                pass
+
+    def test_bug_10042(self):
+        @functools.total_ordering
+        class TestTO:
+            def __init__(self, value):
+                self.value = value
+            def __eq__(self, other):
+                if isinstance(other, TestTO):
+                    return self.value == other.value
+                return False
+            def __lt__(self, other):
+                if isinstance(other, TestTO):
+                    return self.value < other.value
+                raise TypeError
+        with self.assertRaises(TypeError):
+            TestTO(8) <= ()
+
+class TestLRU(unittest.TestCase):
+
+    def test_lru(self):
+        def orig(x, y):
+            return 3*x+y
+        f = functools.lru_cache(maxsize=20)(orig)
+        hits, misses, maxsize, currsize = f.cache_info()
+        self.assertEqual(maxsize, 20)
+        self.assertEqual(currsize, 0)
+        self.assertEqual(hits, 0)
+        self.assertEqual(misses, 0)
+
+        domain = range(5)
+        for i in range(1000):
+            x, y = choice(domain), choice(domain)
+            actual = f(x, y)
+            expected = orig(x, y)
+            self.assertEqual(actual, expected)
+        hits, misses, maxsize, currsize = f.cache_info()
+        self.assertTrue(hits > misses)
+        self.assertEqual(hits + misses, 1000)
+        self.assertEqual(currsize, 20)
+
+        f.cache_clear()   # test clearing
+        hits, misses, maxsize, currsize = f.cache_info()
+        self.assertEqual(hits, 0)
+        self.assertEqual(misses, 0)
+        self.assertEqual(currsize, 0)
+        f(x, y)
+        hits, misses, maxsize, currsize = f.cache_info()
+        self.assertEqual(hits, 0)
+        self.assertEqual(misses, 1)
+        self.assertEqual(currsize, 1)
+
+        # Test bypassing the cache
+        self.assertIs(f.__wrapped__, orig)
+        f.__wrapped__(x, y)
+        hits, misses, maxsize, currsize = f.cache_info()
+        self.assertEqual(hits, 0)
+        self.assertEqual(misses, 1)
+        self.assertEqual(currsize, 1)
+
+        # test size zero (which means "never-cache")
+        @functools.lru_cache(0)
+        def f():
+            f_cnt[0] += 1
+            return 20
+        self.assertEqual(f.cache_info().maxsize, 0)
+        f_cnt = [0]
+        for i in range(5):
+            self.assertEqual(f(), 20)
+        self.assertEqual(f_cnt[0], 5)
+        hits, misses, maxsize, currsize = f.cache_info()
+        self.assertEqual(hits, 0)
+        self.assertEqual(misses, 5)
+        self.assertEqual(currsize, 0)
+
+        # test size one
+        @functools.lru_cache(1)
+        def f():
+            f_cnt[0] += 1
+            return 20
+        self.assertEqual(f.cache_info().maxsize, 1)
+        f_cnt = [0]
+        for i in range(5):
+            self.assertEqual(f(), 20)
+        self.assertEqual(f_cnt[0], 1)
+        hits, misses, maxsize, currsize = f.cache_info()
+        self.assertEqual(hits, 4)
+        self.assertEqual(misses, 1)
+        self.assertEqual(currsize, 1)
+
+        # test size two
+        @functools.lru_cache(2)
+        def f(x):
+            f_cnt[0] += 1
+            return x*10
+        self.assertEqual(f.cache_info().maxsize, 2)
+        f_cnt = [0]
+        for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
+            #    *  *              *                          *
+            self.assertEqual(f(x), x*10)
+        self.assertEqual(f_cnt[0], 4)
+        hits, misses, maxsize, currsize = f.cache_info()
+        self.assertEqual(hits, 12)
+        self.assertEqual(misses, 4)
+        self.assertEqual(currsize, 2)
+
+    def test_lru_with_maxsize_none(self):
+        @functools.lru_cache(maxsize=None)
+        def fib(n):
+            if n < 2:
+                return n
+            return fib(n-1) + fib(n-2)
+        self.assertEqual([fib(n) for n in range(16)],
+            [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
+        self.assertEqual(fib.cache_info(),
+            functools.functools32._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
+        fib.cache_clear()
+        self.assertEqual(fib.cache_info(),
+            functools.functools32._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
+
+    def test_lru_with_exceptions(self):
+        # Verify that user_function exceptions get passed through without
+        # creating a hard-to-read chained exception.
+        # http://bugs.python.org/issue13177
+        for maxsize in (None, 100):
+            @functools.lru_cache(maxsize)
+            def func(i):
+                return 'abc'[i]
+            self.assertEqual(func(0), 'a')
+            with self.assertRaises(IndexError):
+                func(15)
+            # Verify that the previous exception did not result in a cached entry
+            with self.assertRaises(IndexError):
+                func(15)
+
+class TestOrderedDict(unittest.TestCase):
+    def test_move_to_end(self):
+        od = OrderedDict.fromkeys('abcde')
+        self.assertEqual(list(od), list('abcde'))
+        od.move_to_end('c')
+        self.assertEqual(list(od), list('abdec'))
+        od.move_to_end('c', 0)
+        self.assertEqual(list(od), list('cabde'))
+        od.move_to_end('c', 0)
+        self.assertEqual(list(od), list('cabde'))
+        od.move_to_end('e')
+        self.assertEqual(list(od), list('cabde'))
+        with self.assertRaises(KeyError):
+            od.move_to_end('x')
+
+def test_main(verbose=None):
+    test_classes = (
+        TestPartial,
+        TestPartialSubclass,
+        TestPythonPartial,
+        TestUpdateWrapper,
+        TestTotalOrdering,
+        TestCmpToKey,
+        TestWraps,
+        TestReduce,
+        TestLRU,
+        TestOrderedDict,
+    )
+    support.run_unittest(*test_classes)
+
+    # verify reference counting
+    if verbose and hasattr(sys, "gettotalrefcount"):
+        import gc
+        counts = [None] * 5
+        for i in range(len(counts)):
+            support.run_unittest(*test_classes)
+            gc.collect()
+            counts[i] = sys.gettotalrefcount()
+        print(counts)
+
+if __name__ == '__main__':
+    test_main(verbose=True)
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/functools32-3.2.3-2/test_support27.py
@@ -0,0 +1,1233 @@
+"""Supporting definitions for the Python regression tests."""
+
+if __name__ != 'test_support27':
+    raise ImportError('test_support must be imported from the test package')
+
+import contextlib
+import errno
+import functools
+import gc
+import socket
+import sys
+import os
+import platform
+import shutil
+import warnings
+import unittest
+import importlib
+import UserDict
+import re
+import time
+try:
+    import thread
+except ImportError:
+    thread = None
+
+__all__ = ["Error", "TestFailed", "ResourceDenied", "import_module",
+           "verbose", "use_resources", "max_memuse", "record_original_stdout",
+           "get_original_stdout", "unload", "unlink", "rmtree", "forget",
+           "is_resource_enabled", "requires", "find_unused_port", "bind_port",
+           "fcmp", "have_unicode", "is_jython", "TESTFN", "HOST", "FUZZ",
+           "SAVEDCWD", "temp_cwd", "findfile", "sortdict", "check_syntax_error",
+           "open_urlresource", "check_warnings", "check_py3k_warnings",
+           "CleanImport", "EnvironmentVarGuard", "captured_output",
+           "captured_stdout", "TransientResource", "transient_internet",
+           "run_with_locale", "set_memlimit", "bigmemtest", "bigaddrspacetest",
+           "BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
+           "threading_cleanup", "reap_children", "cpython_only",
+           "check_impl_detail", "get_attribute", "py3k_bytes",
+           "import_fresh_module"]
+
+
+class Error(Exception):
+    """Base class for regression test exceptions."""
+
+class TestFailed(Error):
+    """Test failed."""
+
+class ResourceDenied(unittest.SkipTest):
+    """Test skipped because it requested a disallowed resource.
+
+    This is raised when a test calls requires() for a resource that
+    has not been enabled.  It is used to distinguish between expected
+    and unexpected skips.
+    """
+
+@contextlib.contextmanager
+def _ignore_deprecated_imports(ignore=True):
+    """Context manager to suppress package and module deprecation
+    warnings when importing them.
+
+    If ignore is False, this context manager has no effect."""
+    if ignore:
+        with warnings.catch_warnings():
+            warnings.filterwarnings("ignore", ".+ (module|package)",
+                                    DeprecationWarning)
+            yield
+    else:
+        yield
+
+
+def import_module(name, deprecated=False):
+    """Import and return the module to be tested, raising SkipTest if
+    it is not available.
+
+    If deprecated is True, any module or package deprecation messages
+    will be suppressed."""
+    with _ignore_deprecated_imports(deprecated):
+        try:
+            return importlib.import_module(name)
+        except ImportError, msg:
+            raise unittest.SkipTest(str(msg))
+
+
+def _save_and_remove_module(name, orig_modules):
+    """Helper function to save and remove a module from sys.modules
+
+       Raise ImportError if the module can't be imported."""
+    # try to import the module and raise an error if it can't be imported
+    if name not in sys.modules:
+        __import__(name)
+        del sys.modules[name]
+    for modname in list(sys.modules):
+        if modname == name or modname.startswith(name + '.'):
+            orig_modules[modname] = sys.modules[modname]
+            del sys.modules[modname]
+
+def _save_and_block_module(name, orig_modules):
+    """Helper function to save and block a module in sys.modules
+
+       Return True if the module was in sys.modules, False otherwise."""
+    saved = True
+    try:
+        orig_modules[name] = sys.modules[name]
+    except KeyError:
+        saved = False
+    sys.modules[name] = None
+    return saved
+
+
+def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
+    """Imports and returns a module, deliberately bypassing the sys.modules cache
+    and importing a fresh copy of the module. Once the import is complete,
+    the sys.modules cache is restored to its original state.
+
+    Modules named in fresh are also imported anew if needed by the import.
+    If one of these modules can't be imported, None is returned.
+
+    Importing of modules named in blocked is prevented while the fresh import
+    takes place.
+
+    If deprecated is True, any module or package deprecation messages
+    will be suppressed."""
+    # NOTE: test_heapq, test_json, and test_warnings include extra sanity
+    # checks to make sure that this utility function is working as expected
+    with _ignore_deprecated_imports(deprecated):
+        # Keep track of modules saved for later restoration as well
+        # as those which just need a blocking entry removed
+        orig_modules = {}
+        names_to_remove = []
+        _save_and_remove_module(name, orig_modules)
+        try:
+            for fresh_name in fresh:
+                _save_and_remove_module(fresh_name, orig_modules)
+            for blocked_name in blocked:
+                if not _save_and_block_module(blocked_name, orig_modules):
+                    names_to_remove.append(blocked_name)
+            fresh_module = importlib.import_module(name)
+        except ImportError:
+            fresh_module = None
+        finally:
+            for orig_name, module in orig_modules.items():
+                sys.modules[orig_name] = module
+            for name_to_remove in names_to_remove:
+                del sys.modules[name_to_remove]
+        return fresh_module
+
+
+def get_attribute(obj, name):
+    """Get an attribute, raising SkipTest if AttributeError is raised."""
+    try:
+        attribute = getattr(obj, name)
+    except AttributeError:
+        raise unittest.SkipTest("module %s has no attribute %s" % (
+            obj.__name__, name))
+    else:
+        return attribute
+
+
+verbose = 1              # Flag set to 0 by regrtest.py
+use_resources = None     # Flag set to [] by regrtest.py
+max_memuse = 0           # Disable bigmem tests (they will still be run with
+                         # small sizes, to make sure they work.)
+real_max_memuse = 0
+
+# _original_stdout is meant to hold stdout at the time regrtest began.
+# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
+# The point is to have some flavor of stdout the user can actually see.
+_original_stdout = None
+def record_original_stdout(stdout):
+    global _original_stdout
+    _original_stdout = stdout
+
+def get_original_stdout():
+    return _original_stdout or sys.stdout
+
+def unload(name):
+    try:
+        del sys.modules[name]
+    except KeyError:
+        pass
+
+def unlink(filename):
+    try:
+        os.unlink(filename)
+    except OSError:
+        pass
+
+def rmtree(path):
+    try:
+        shutil.rmtree(path)
+    except OSError, e:
+        # Unix returns ENOENT, Windows returns ESRCH.
+        if e.errno not in (errno.ENOENT, errno.ESRCH):
+            raise
+
+def forget(modname):
+    '''"Forget" a module was ever imported by removing it from sys.modules and
+    deleting any .pyc and .pyo files.'''
+    unload(modname)
+    for dirname in sys.path:
+        unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
+        # Deleting the .pyo file cannot be within the 'try' for the .pyc since
+        # the chance exists that there is no .pyc (and thus the 'try' statement
+        # is exited) but there is a .pyo file.
+        unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
+
+def is_resource_enabled(resource):
+    """Test whether a resource is enabled.  Known resources are set by
+    regrtest.py."""
+    return use_resources is not None and resource in use_resources
+
+def requires(resource, msg=None):
+    """Raise ResourceDenied if the specified resource is not available.
+
+    If the caller's module is __main__ then automatically return True.  The
+    possibility of False being returned occurs when regrtest.py is executing."""
+    # see if the caller's module is __main__ - if so, treat as if
+    # the resource was set
+    if sys._getframe(1).f_globals.get("__name__") == "__main__":
+        return
+    if not is_resource_enabled(resource):
+        if msg is None:
+            msg = "Use of the `%s' resource not enabled" % resource
+        raise ResourceDenied(msg)
+
+HOST = 'localhost'
+
+def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
+    """Returns an unused port that should be suitable for binding.  This is
+    achieved by creating a temporary socket with the same family and type as
+    the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
+    the specified host address (defaults to 0.0.0.0) with the port set to 0,
+    eliciting an unused ephemeral port from the OS.  The temporary socket is
+    then closed and deleted, and the ephemeral port is returned.
+
+    Either this method or bind_port() should be used for any tests where a
+    server socket needs to be bound to a particular port for the duration of
+    the test.  Which one to use depends on whether the calling code is creating
+    a python socket, or if an unused port needs to be provided in a constructor
+    or passed to an external program (i.e. the -accept argument to openssl's
+    s_server mode).  Always prefer bind_port() over find_unused_port() where
+    possible.  Hard coded ports should *NEVER* be used.  As soon as a server
+    socket is bound to a hard coded port, the ability to run multiple instances
+    of the test simultaneously on the same host is compromised, which makes the
+    test a ticking time bomb in a buildbot environment. On Unix buildbots, this
+    may simply manifest as a failed test, which can be recovered from without
+    intervention in most cases, but on Windows, the entire python process can
+    completely and utterly wedge, requiring someone to log in to the buildbot
+    and manually kill the affected process.
+
+    (This is easy to reproduce on Windows, unfortunately, and can be traced to
+    the SO_REUSEADDR socket option having different semantics on Windows versus
+    Unix/Linux.  On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
+    listen and then accept connections on identical host/ports.  An EADDRINUSE
+    socket.error will be raised at some point (depending on the platform and
+    the order bind and listen were called on each socket).
+
+    However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
+    will ever be raised when attempting to bind two identical host/ports. When
+    accept() is called on each socket, the second caller's process will steal
+    the port from the first caller, leaving them both in an awkwardly wedged
+    state where they'll no longer respond to any signals or graceful kills, and
+    must be forcibly killed via OpenProcess()/TerminateProcess().
+
+    The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
+    instead of SO_REUSEADDR, which effectively affords the same semantics as
+    SO_REUSEADDR on Unix.  Given the propensity of Unix developers in the Open
+    Source world compared to Windows ones, this is a common mistake.  A quick
+    look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
+    openssl.exe is called with the 's_server' option, for example. See
+    http://bugs.python.org/issue2550 for more info.  The following site also
+    has a very thorough description about the implications of both REUSEADDR
+    and EXCLUSIVEADDRUSE on Windows:
+    http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
+
+    XXX: although this approach is a vast improvement on previous attempts to
+    elicit unused ports, it rests heavily on the assumption that the ephemeral
+    port returned to us by the OS won't immediately be dished back out to some
+    other process when we close and delete our temporary socket but before our
+    calling code has a chance to bind the returned port.  We can deal with this
+    issue if/when we come across it."""
+    tempsock = socket.socket(family, socktype)
+    port = bind_port(tempsock)
+    tempsock.close()
+    del tempsock
+    return port
+
+def bind_port(sock, host=HOST):
+    """Bind the socket to a free port and return the port number.  Relies on
+    ephemeral ports in order to ensure we are using an unbound port.  This is
+    important as many tests may be running simultaneously, especially in a
+    buildbot environment.  This method raises an exception if the sock.family
+    is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
+    or SO_REUSEPORT set on it.  Tests should *never* set these socket options
+    for TCP/IP sockets.  The only case for setting these options is testing
+    multicasting via multiple UDP sockets.
+
+    Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
+    on Windows), it will be set on the socket.  This will prevent anyone else
+    from bind()'ing to our host/port for the duration of the test.
+    """
+    if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
+        if hasattr(socket, 'SO_REUSEADDR'):
+            if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
+                raise TestFailed("tests should never set the SO_REUSEADDR "   \
+                                 "socket option on TCP/IP sockets!")
+        if hasattr(socket, 'SO_REUSEPORT'):
+            if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
+                raise TestFailed("tests should never set the SO_REUSEPORT "   \
+                                 "socket option on TCP/IP sockets!")
+        if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
+            sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
+
+    sock.bind((host, 0))
+    port = sock.getsockname()[1]
+    return port
+
+FUZZ = 1e-6
+
+def fcmp(x, y): # fuzzy comparison function
+    if isinstance(x, float) or isinstance(y, float):
+        try:
+            fuzz = (abs(x) + abs(y)) * FUZZ
+            if abs(x-y) <= fuzz:
+                return 0
+        except:
+            pass
+    elif type(x) == type(y) and isinstance(x, (tuple, list)):
+        for i in range(min(len(x), len(y))):
+            outcome = fcmp(x[i], y[i])
+            if outcome != 0:
+                return outcome
+        return (len(x) > len(y)) - (len(x) < len(y))
+    return (x > y) - (x < y)
+
+try:
+    unicode
+    have_unicode = True
+except NameError:
+    have_unicode = False
+
+is_jython = sys.platform.startswith('java')
+
+# Filename used for testing
+if os.name == 'java':
+    # Jython disallows @ in module names
+    TESTFN = '$test'
+elif os.name == 'riscos':
+    TESTFN = 'testfile'
+else:
+    TESTFN = '@test'
+    # Unicode name only used if TEST_FN_ENCODING exists for the platform.
+    if have_unicode:
+        # Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
+        # TESTFN_UNICODE is a filename that can be encoded using the
+        # file system encoding, but *not* with the default (ascii) encoding
+        if isinstance('', unicode):
+            # python -U
+            # XXX perhaps unicode() should accept Unicode strings?
+            TESTFN_UNICODE = "@test-\xe0\xf2"
+        else:
+            # 2 latin characters.
+            TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
+        TESTFN_ENCODING = sys.getfilesystemencoding()
+        # TESTFN_UNENCODABLE is a filename that should *not* be
+        # able to be encoded by *either* the default or filesystem encoding.
+        # This test really only makes sense on Windows NT platforms
+        # which have special Unicode support in posixmodule.
+        if (not hasattr(sys, "getwindowsversion") or
+                sys.getwindowsversion()[3] < 2): #  0=win32s or 1=9x/ME
+            TESTFN_UNENCODABLE = None
+        else:
+            # Japanese characters (I think - from bug 846133)
+            TESTFN_UNENCODABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
+            try:
+                # XXX - Note - should be using TESTFN_ENCODING here - but for
+                # Windows, "mbcs" currently always operates as if in
+                # errors=ignore' mode - hence we get '?' characters rather than
+                # the exception.  'Latin1' operates as we expect - ie, fails.
+                # See [ 850997 ] mbcs encoding ignores errors
+                TESTFN_UNENCODABLE.encode("Latin1")
+            except UnicodeEncodeError:
+                pass
+            else:
+                print \
+                'WARNING: The filename %r CAN be encoded by the filesystem.  ' \
+                'Unicode filename tests may not be effective' \
+                % TESTFN_UNENCODABLE
+
+
+# Disambiguate TESTFN for parallel testing, while letting it remain a valid
+# module name.
+TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
+
+# Save the initial cwd
+SAVEDCWD = os.getcwd()
+
+@contextlib.contextmanager
+def temp_cwd(name='tempcwd', quiet=False):
+    """
+    Context manager that creates a temporary directory and set it as CWD.
+
+    The new CWD is created in the current directory and it's named *name*.
+    If *quiet* is False (default) and it's not possible to create or change
+    the CWD, an error is raised.  If it's True, only a warning is raised
+    and the original CWD is used.
+    """
+    if isinstance(name, unicode):
+        try:
+            name = name.encode(sys.getfilesystemencoding() or 'ascii')
+        except UnicodeEncodeError:
+            if not quiet:
+                raise unittest.SkipTest('unable to encode the cwd name with '
+                                        'the filesystem encoding.')
+    saved_dir = os.getcwd()
+    is_temporary = False
+    try:
+        os.mkdir(name)
+        os.chdir(name)
+        is_temporary = True
+    except OSError:
+        if not quiet:
+            raise
+        warnings.warn('tests may fail, unable to change the CWD to ' + name,
+                      RuntimeWarning, stacklevel=3)
+    try:
+        yield os.getcwd()
+    finally:
+        os.chdir(saved_dir)
+        if is_temporary:
+            rmtree(name)
+
+
+def findfile(file, here=__file__, subdir=None):
+    """Try to find a file on sys.path and the working directory.  If it is not
+    found the argument passed to the function is returned (this does not
+    necessarily signal failure; could still be the legitimate path)."""
+    if os.path.isabs(file):
+        return file
+    if subdir is not None:
+        file = os.path.join(subdir, file)
+    path = sys.path
+    path = [os.path.dirname(here)] + path
+    for dn in path:
+        fn = os.path.join(dn, file)
+        if os.path.exists(fn): return fn
+    return file
+
+def sortdict(dict):
+    "Like repr(dict), but in sorted order."
+    items = dict.items()
+    items.sort()
+    reprpairs = ["%r: %r" % pair for pair in items]
+    withcommas = ", ".join(reprpairs)
+    return "{%s}" % withcommas
+
+def make_bad_fd():
+    """
+    Create an invalid file descriptor by opening and closing a file and return
+    its fd.
+    """
+    file = open(TESTFN, "wb")
+    try:
+        return file.fileno()
+    finally:
+        file.close()
+        unlink(TESTFN)
+
+def check_syntax_error(testcase, statement):
+    testcase.assertRaises(SyntaxError, compile, statement,
+                          '<test string>', 'exec')
+
+def open_urlresource(url, check=None):
+    import urlparse, urllib2
+
+    filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
+
+    fn = os.path.join(os.path.dirname(__file__), "data", filename)
+
+    def check_valid_file(fn):
+        f = open(fn)
+        if check is None:
+            return f
+        elif check(f):
+            f.seek(0)
+            return f
+        f.close()
+
+    if os.path.exists(fn):
+        f = check_valid_file(fn)
+        if f is not None:
+            return f
+        unlink(fn)
+
+    # Verify the requirement before downloading the file
+    requires('urlfetch')
+
+    print >> get_original_stdout(), '\tfetching %s ...' % url
+    f = urllib2.urlopen(url, timeout=15)
+    try:
+        with open(fn, "wb") as out:
+            s = f.read()
+            while s:
+                out.write(s)
+                s = f.read()
+    finally:
+        f.close()
+
+    f = check_valid_file(fn)
+    if f is not None:
+        return f
+    raise TestFailed('invalid resource "%s"' % fn)
+
+
+class WarningsRecorder(object):
+    """Convenience wrapper for the warnings list returned on
+       entry to the warnings.catch_warnings() context manager.
+    """
+    def __init__(self, warnings_list):
+        self._warnings = warnings_list
+        self._last = 0
+
+    def __getattr__(self, attr):
+        if len(self._warnings) > self._last:
+            return getattr(self._warnings[-1], attr)
+        elif attr in warnings.WarningMessage._WARNING_DETAILS:
+            return None
+        raise AttributeError("%r has no attribute %r" % (self, attr))
+
+    @property
+    def warnings(self):
+        return self._warnings[self._last:]
+
+    def reset(self):
+        self._last = len(self._warnings)
+
+
+def _filterwarnings(filters, quiet=False):
+    """Catch the warnings, then check if all the expected
+    warnings have been raised and re-raise unexpected warnings.
+    If 'quiet' is True, only re-raise the unexpected warnings.
+    """
+    # Clear the warning registry of the calling module
+    # in order to re-raise the warnings.
+    frame = sys._getframe(2)
+    registry = frame.f_globals.get('__warningregistry__')
+    if registry:
+        registry.clear()
+    with warnings.catch_warnings(record=True) as w:
+        # Set filter "always" to record all warnings.  Because
+        # test_warnings swap the module, we need to look up in
+        # the sys.modules dictionary.
+        sys.modules['warnings'].simplefilter("always")
+        yield WarningsRecorder(w)
+    # Filter the recorded warnings
+    reraise = [warning.message for warning in w]
+    missing = []
+    for msg, cat in filters:
+        seen = False
+        for exc in reraise[:]:
+            message = str(exc)
+            # Filter out the matching messages
+            if (re.match(msg, message, re.I) and
+                issubclass(exc.__class__, cat)):
+                seen = True
+                reraise.remove(exc)
+        if not seen and not quiet:
+            # This filter caught nothing
+            missing.append((msg, cat.__name__))
+    if reraise:
+        raise AssertionError("unhandled warning %r" % reraise[0])
+    if missing:
+        raise AssertionError("filter (%r, %s) did not catch any warning" %
+                             missing[0])
+
+
+@contextlib.contextmanager
+def check_warnings(*filters, **kwargs):
+    """Context manager to silence warnings.
+
+    Accept 2-tuples as positional arguments:
+        ("message regexp", WarningCategory)
+
+    Optional argument:
+     - if 'quiet' is True, it does not fail if a filter catches nothing
+        (default True without argument,
+         default False if some filters are defined)
+
+    Without argument, it defaults to:
+        check_warnings(("", Warning), quiet=True)
+    """
+    quiet = kwargs.get('quiet')
+    if not filters:
+        filters = (("", Warning),)
+        # Preserve backward compatibility
+        if quiet is None:
+            quiet = True
+    return _filterwarnings(filters, quiet)
+
+
+@contextlib.contextmanager
+def check_py3k_warnings(*filters, **kwargs):
+    """Context manager to silence py3k warnings.
+
+    Accept 2-tuples as positional arguments:
+        ("message regexp", WarningCategory)
+
+    Optional argument:
+     - if 'quiet' is True, it does not fail if a filter catches nothing
+        (default False)
+
+    Without argument, it defaults to:
+        check_py3k_warnings(("", DeprecationWarning), quiet=False)
+    """
+    if sys.py3kwarning:
+        if not filters:
+            filters = (("", DeprecationWarning),)
+    else:
+        # It should not raise any py3k warning
+        filters = ()
+    return _filterwarnings(filters, kwargs.get('quiet'))
+
+
+class CleanImport(object):
+    """Context manager to force import to return a new module reference.
+
+    This is useful for testing module-level behaviours, such as
+    the emission of a DeprecationWarning on import.
+
+    Use like this:
+
+        with CleanImport("foo"):
+            importlib.import_module("foo") # new reference
+    """
+
+    def __init__(self, *module_names):
+        self.original_modules = sys.modules.copy()
+        for module_name in module_names:
+            if module_name in sys.modules:
+                module = sys.modules[module_name]
+                # It is possible that module_name is just an alias for
+                # another module (e.g. stub for modules renamed in 3.x).
+                # In that case, we also need delete the real module to clear
+                # the import cache.
+                if module.__name__ != module_name:
+                    del sys.modules[module.__name__]
+                del sys.modules[module_name]
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *ignore_exc):
+        sys.modules.update(self.original_modules)
+
+
+class EnvironmentVarGuard(UserDict.DictMixin):
+
+    """Class to help protect the environment variable properly.  Can be used as
+    a context manager."""
+
+    def __init__(self):
+        self._environ = os.environ
+        self._changed = {}
+
+    def __getitem__(self, envvar):
+        return self._environ[envvar]
+
+    def __setitem__(self, envvar, value):
+        # Remember the initial value on the first access
+        if envvar not in self._changed:
+            self._changed[envvar] = self._environ.get(envvar)
+        self._environ[envvar] = value
+
+    def __delitem__(self, envvar):
+        # Remember the initial value on the first access
+        if envvar not in self._changed:
+            self._changed[envvar] = self._environ.get(envvar)
+        if envvar in self._environ:
+            del self._environ[envvar]
+
+    def keys(self):
+        return self._environ.keys()
+
+    def set(self, envvar, value):
+        self[envvar] = value
+
+    def unset(self, envvar):
+        del self[envvar]
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *ignore_exc):
+        for (k, v) in self._changed.items():
+            if v is None:
+                if k in self._environ:
+                    del self._environ[k]
+            else:
+                self._environ[k] = v
+        os.environ = self._environ
+
+
+class DirsOnSysPath(object):
+    """Context manager to temporarily add directories to sys.path.
+
+    This makes a copy of sys.path, appends any directories given
+    as positional arguments, then reverts sys.path to the copied
+    settings when the context ends.
+
+    Note that *all* sys.path modifications in the body of the
+    context manager, including replacement of the object,
+    will be reverted at the end of the block.
+    """
+
+    def __init__(self, *paths):
+        self.original_value = sys.path[:]
+        self.original_object = sys.path
+        sys.path.extend(paths)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *ignore_exc):
+        sys.path = self.original_object
+        sys.path[:] = self.original_value
+
+
+class TransientResource(object):
+
+    """Raise ResourceDenied if an exception is raised while the context manager
+    is in effect that matches the specified exception and attributes."""
+
+    def __init__(self, exc, **kwargs):
+        self.exc = exc
+        self.attrs = kwargs
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type_=None, value=None, traceback=None):
+        """If type_ is a subclass of self.exc and value has attributes matching
+        self.attrs, raise ResourceDenied.  Otherwise let the exception
+        propagate (if any)."""
+        if type_ is not None and issubclass(self.exc, type_):
+            for attr, attr_value in self.attrs.iteritems():
+                if not hasattr(value, attr):
+                    break
+                if getattr(value, attr) != attr_value:
+                    break
+            else:
+                raise ResourceDenied("an optional resource is not available")
+
+
+@contextlib.contextmanager
+def transient_internet(resource_name, timeout=30.0, errnos=()):
+    """Return a context manager that raises ResourceDenied when various issues
+    with the Internet connection manifest themselves as exceptions."""
+    default_errnos = [
+        ('ECONNREFUSED', 111),
+        ('ECONNRESET', 104),
+        ('EHOSTUNREACH', 113),
+        ('ENETUNREACH', 101),
+        ('ETIMEDOUT', 110),
+    ]
+    default_gai_errnos = [
+        ('EAI_NONAME', -2),
+        ('EAI_NODATA', -5),
+    ]
+
+    denied = ResourceDenied("Resource '%s' is not available" % resource_name)
+    captured_errnos = errnos
+    gai_errnos = []
+    if not captured_errnos:
+        captured_errnos = [getattr(errno, name, num)
+                           for (name, num) in default_errnos]
+        gai_errnos = [getattr(socket, name, num)
+                      for (name, num) in default_gai_errnos]
+
+    def filter_error(err):
+        n = getattr(err, 'errno', None)
+        if (isinstance(err, socket.timeout) or
+            (isinstance(err, socket.gaierror) and n in gai_errnos) or
+            n in captured_errnos):
+            if not verbose:
+                sys.stderr.write(denied.args[0] + "\n")
+            raise denied
+
+    old_timeout = socket.getdefaulttimeout()
+    try:
+        if timeout is not None:
+            socket.setdefaulttimeout(timeout)
+        yield
+    except IOError as err:
+        # urllib can wrap original socket errors multiple times (!), we must
+        # unwrap to get at the original error.
+        while True:
+            a = err.args
+            if len(a) >= 1 and isinstance(a[0], IOError):
+                err = a[0]
+            # The error can also be wrapped as args[1]:
+            #    except socket.error as msg:
+            #        raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
+            elif len(a) >= 2 and isinstance(a[1], IOError):
+                err = a[1]
+            else:
+                break
+        filter_error(err)
+        raise
+    # XXX should we catch generic exceptions and look for their
+    # __cause__ or __context__?
+    finally:
+        socket.setdefaulttimeout(old_timeout)
+
+
+@contextlib.contextmanager
+def captured_output(stream_name):
+    """Return a context manager used by captured_stdout and captured_stdin
+    that temporarily replaces the sys stream *stream_name* with a StringIO."""
+    import StringIO
+    orig_stdout = getattr(sys, stream_name)
+    setattr(sys, stream_name, StringIO.StringIO())
+    try:
+        yield getattr(sys, stream_name)
+    finally:
+        setattr(sys, stream_name, orig_stdout)
+
+def captured_stdout():
+    """Capture the output of sys.stdout:
+
+       with captured_stdout() as s:
+           print "hello"
+       self.assertEqual(s.getvalue(), "hello")
+    """
+    return captured_output("stdout")
+
+def captured_stdin():
+    return captured_output("stdin")
+
+def gc_collect():
+    """Force as many objects as possible to be collected.
+
+    In non-CPython implementations of Python, this is needed because timely
+    deallocation is not guaranteed by the garbage collector.  (Even in CPython
+    this can be the case in case of reference cycles.)  This means that __del__
+    methods may be called later than expected and weakrefs may remain alive for
+    longer than expected.  This function tries its best to force all garbage
+    objects to disappear.
+    """
+    gc.collect()
+    if is_jython:
+        time.sleep(0.1)
+    gc.collect()
+    gc.collect()
+
+
+#=======================================================================
+# Decorator for running a function in a different locale, correctly resetting
+# it afterwards.
+
+def run_with_locale(catstr, *locales):
+    def decorator(func):
+        def inner(*args, **kwds):
+            try:
+                import locale
+                category = getattr(locale, catstr)
+                orig_locale = locale.setlocale(category)
+            except AttributeError:
+                # if the test author gives us an invalid category string
+                raise
+            except:
+                # cannot retrieve original locale, so do nothing
+                locale = orig_locale = None
+            else:
+                for loc in locales:
+                    try:
+                        locale.setlocale(category, loc)
+                        break
+                    except:
+                        pass
+
+            # now run the function, resetting the locale on exceptions
+            try:
+                return func(*args, **kwds)
+            finally:
+                if locale and orig_locale:
+                    locale.setlocale(category, orig_locale)
+        inner.func_name = func.func_name
+        inner.__doc__ = func.__doc__
+        return inner
+    return decorator
+
+#=======================================================================
+# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
+
+# Some handy shorthands. Note that these are used for byte-limits as well
+# as size-limits, in the various bigmem tests
+_1M = 1024*1024
+_1G = 1024 * _1M
+_2G = 2 * _1G
+_4G = 4 * _1G
+
+MAX_Py_ssize_t = sys.maxsize
+
+def set_memlimit(limit):
+    global max_memuse
+    global real_max_memuse
+    sizes = {
+        'k': 1024,
+        'm': _1M,
+        'g': _1G,
+        't': 1024*_1G,
+    }
+    m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
+                 re.IGNORECASE | re.VERBOSE)
+    if m is None:
+        raise ValueError('Invalid memory limit %r' % (limit,))
+    memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
+    real_max_memuse = memlimit
+    if memlimit > MAX_Py_ssize_t:
+        memlimit = MAX_Py_ssize_t
+    if memlimit < _2G - 1:
+        raise ValueError('Memory limit %r too low to be useful' % (limit,))
+    max_memuse = memlimit
+
+def bigmemtest(minsize, memuse, overhead=5*_1M):
+    """Decorator for bigmem tests.
+
+    'minsize' is the minimum useful size for the test (in arbitrary,
+    test-interpreted units.) 'memuse' is the number of 'bytes per size' for
+    the test, or a good estimate of it. 'overhead' specifies fixed overhead,
+    independent of the testsize, and defaults to 5Mb.
+
+    The decorator tries to guess a good value for 'size' and passes it to
+    the decorated test function. If minsize * memuse is more than the
+    allowed memory use (as defined by max_memuse), the test is skipped.
+    Otherwise, minsize is adjusted upward to use up to max_memuse.
+    """
+    def decorator(f):
+        def wrapper(self):
+            if not max_memuse:
+                # If max_memuse is 0 (the default),
+                # we still want to run the tests with size set to a few kb,
+                # to make sure they work. We still want to avoid using
+                # too much memory, though, but we do that noisily.
+                maxsize = 5147
+                self.assertFalse(maxsize * memuse + overhead > 20 * _1M)
+            else:
+                maxsize = int((max_memuse - overhead) / memuse)
+                if maxsize < minsize:
+                    # Really ought to print 'test skipped' or something
+                    if verbose:
+                        sys.stderr.write("Skipping %s because of memory "
+                                         "constraint\n" % (f.__name__,))
+                    return
+                # Try to keep some breathing room in memory use
+                maxsize = max(maxsize - 50 * _1M, minsize)
+            return f(self, maxsize)
+        wrapper.minsize = minsize
+        wrapper.memuse = memuse
+        wrapper.overhead = overhead
+        return wrapper
+    return decorator
+
+def precisionbigmemtest(size, memuse, overhead=5*_1M):
+    def decorator(f):
+        def wrapper(self):
+            if not real_max_memuse:
+                maxsize = 5147
+            else:
+                maxsize = size
+
+                if real_max_memuse and real_max_memuse < maxsize * memuse:
+                    if verbose:
+                        sys.stderr.write("Skipping %s because of memory "
+                                         "constraint\n" % (f.__name__,))
+                    return
+
+            return f(self, maxsize)
+        wrapper.size = size
+        wrapper.memuse = memuse
+        wrapper.overhead = overhead
+        return wrapper
+    return decorator
+
+def bigaddrspacetest(f):
+    """Decorator for tests that fill the address space."""
+    def wrapper(self):
+        if max_memuse < MAX_Py_ssize_t:
+            if verbose:
+                sys.stderr.write("Skipping %s because of memory "
+                                 "constraint\n" % (f.__name__,))
+        else:
+            return f(self)
+    return wrapper
+
+#=======================================================================
+# unittest integration.
+
+class BasicTestRunner:
+    def run(self, test):
+        result = unittest.TestResult()
+        test(result)
+        return result
+
+def _id(obj):
+    return obj
+
+def requires_resource(resource):
+    if is_resource_enabled(resource):
+        return _id
+    else:
+        return unittest.skip("resource {0!r} is not enabled".format(resource))
+
+def cpython_only(test):
+    """
+    Decorator for tests only applicable on CPython.
+    """
+    return impl_detail(cpython=True)(test)
+
+def impl_detail(msg=None, **guards):
+    if check_impl_detail(**guards):
+        return _id
+    if msg is None:
+        guardnames, default = _parse_guards(guards)
+        if default:
+            msg = "implementation detail not available on {0}"
+        else:
+            msg = "implementation detail specific to {0}"
+        guardnames = sorted(guardnames.keys())
+        msg = msg.format(' or '.join(guardnames))
+    return unittest.skip(msg)
+
+def _parse_guards(guards):
+    # Returns a tuple ({platform_name: run_me}, default_value)
+    if not guards:
+        return ({'cpython': True}, False)
+    is_true = guards.values()[0]
+    assert guards.values() == [is_true] * len(guards)   # all True or all False
+    return (guards, not is_true)
+
+# Use the following check to guard CPython's implementation-specific tests --
+# or to run them only on the implementation(s) guarded by the arguments.
+def check_impl_detail(**guards):
+    """This function returns True or False depending on the host platform.
+       Examples:
+          if check_impl_detail():               # only on CPython (default)
+          if check_impl_detail(jython=True):    # only on Jython
+          if check_impl_detail(cpython=False):  # everywhere except on CPython
+    """
+    guards, default = _parse_guards(guards)
+    return guards.get(platform.python_implementation().lower(), default)
+
+
+
+def _run_suite(suite):
+    """Run tests from a unittest.TestSuite-derived class."""
+    if verbose:
+        runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
+    else:
+        runner = BasicTestRunner()
+
+    result = runner.run(suite)
+    if not result.wasSuccessful():
+        if len(result.errors) == 1 and not result.failures:
+            err = result.errors[0][1]
+        elif len(result.failures) == 1 and not result.errors:
+            err = result.failures[0][1]
+        else:
+            err = "multiple errors occurred"
+            if not verbose:
+                err += "; run in verbose mode for details"
+        raise TestFailed(err)
+
+
+def run_unittest(*classes):
+    """Run tests from unittest.TestCase-derived classes."""
+    valid_types = (unittest.TestSuite, unittest.TestCase)
+    suite = unittest.TestSuite()
+    for cls in classes:
+        if isinstance(cls, str):
+            if cls in sys.modules:
+                suite.addTest(unittest.findTestCases(sys.modules[cls]))
+            else:
+                raise ValueError("str arguments must be keys in sys.modules")
+        elif isinstance(cls, valid_types):
+            suite.addTest(cls)
+        else:
+            suite.addTest(unittest.makeSuite(cls))
+    _run_suite(suite)
+
+
+#=======================================================================
+# doctest driver.
+
+def run_doctest(module, verbosity=None):
+    """Run doctest on the given module.  Return (#failures, #tests).
+
+    If optional argument verbosity is not specified (or is None), pass
+    test_support's belief about verbosity on to doctest.  Else doctest's
+    usual behavior is used (it searches sys.argv for -v).
+    """
+
+    import doctest
+
+    if verbosity is None:
+        verbosity = verbose
+    else:
+        verbosity = None
+
+    # Direct doctest output (normally just errors) to real stdout; doctest
+    # output shouldn't be compared by regrtest.
+    save_stdout = sys.stdout
+    sys.stdout = get_original_stdout()
+    try:
+        f, t = doctest.testmod(module, verbose=verbosity)
+        if f:
+            raise TestFailed("%d of %d doctests failed" % (f, t))
+    finally:
+        sys.stdout = save_stdout
+    if verbose:
+        print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
+    return f, t
+
+#=======================================================================
+# Threading support to prevent reporting refleaks when running regrtest.py -R
+
+# NOTE: we use thread._count() rather than threading.enumerate() (or the
+# moral equivalent thereof) because a threading.Thread object is still alive
+# until its __bootstrap() method has returned, even after it has been
+# unregistered from the threading module.
+# thread._count(), on the other hand, only gets decremented *after* the
+# __bootstrap() method has returned, which gives us reliable reference counts
+# at the end of a test run.
+
+def threading_setup():
+    if thread:
+        return thread._count(),
+    else:
+        return 1,
+
+def threading_cleanup(nb_threads):
+    if not thread:
+        return
+
+    _MAX_COUNT = 10
+    for count in range(_MAX_COUNT):
+        n = thread._count()
+        if n == nb_threads:
+            break
+        time.sleep(0.1)
+    # XXX print a warning in case of failure?
+
+def reap_threads(func):
+    """Use this function when threads are being used.  This will
+    ensure that the threads are cleaned up even when the test fails.
+    If threading is unavailable this function does nothing.
+    """
+    if not thread:
+        return func
+
+    @functools.wraps(func)
+    def decorator(*args):
+        key = threading_setup()
+        try:
+            return func(*args)
+        finally:
+            threading_cleanup(*key)
+    return decorator
+
+def reap_children():
+    """Use this function at the end of test_main() whenever sub-processes
+    are started.  This will help ensure that no extra children (zombies)
+    stick around to hog resources and create problems when looking
+    for refleaks.
+    """
+
+    # Reap all our dead child processes so we don't leave zombies around.
+    # These hog resources and might be causing some of the buildbots to die.
+    if hasattr(os, 'waitpid'):
+        any_process = -1
+        while True:
+            try:
+                # This will raise an exception on Windows.  That's ok.
+                pid, status = os.waitpid(any_process, os.WNOHANG)
+                if pid == 0:
+                    break
+            except:
+                break
+
+def py3k_bytes(b):
+    """Emulate the py3k bytes() constructor.
+
+    NOTE: This is only a best effort function.
+    """
+    try:
+        # memoryview?
+        return b.tobytes()
+    except AttributeError:
+        try:
+            # iterable of ints?
+            return b"".join(chr(x) for x in b)
+        except TypeError:
+            return bytes(b)
+
+def args_from_interpreter_flags():
+    """Return a list of command-line arguments reproducing the current
+    settings in sys.flags."""
+    flag_opt_map = {
+        'bytes_warning': 'b',
+        'dont_write_bytecode': 'B',
+        'ignore_environment': 'E',
+        'no_user_site': 's',
+        'no_site': 'S',
+        'optimize': 'O',
+        'py3k_warning': '3',
+        'verbose': 'v',
+    }
+    args = []
+    for flag, opt in flag_opt_map.items():
+        v = getattr(sys.flags, flag)
+        if v > 0:
+            args.append('-' + opt * v)
+    return args
+
+def strip_python_stderr(stderr):
+    """Strip the stderr of a Python process from potential debug output
+    emitted by the interpreter.
+
+    This will typically be run on the result of the communicate() method
+    of a subprocess.Popen object.
+    """
+    stderr = re.sub(br"\[\d+ refs\]\r?\n?$", b"", stderr).strip()
+    return stderr
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/ChangeLog
@@ -0,0 +1,16 @@
+2014-09-20	jsonmerge	1.1.0
+
+	* Add ArrayMergeById strategy (thanks to Sarah Bird).
+	* merge() and get_schema() methods now raise more specific exception
+	  classes on error (BaseInstanceError, HeadInstanceError, SchemaError).
+	* Rename "unique" option for Version strategy to "ignoreDups".
+	* Fix duplicate entries with Version strategy even when "ignoreDups"
+	  was True (thanks to Sarah Bird).
+	* Always add "type" keyword when making new objects in get_schema().
+	* Remove "maxItems" and "uniqueItems" keywords in get_schema() for
+	  Append strategy.
+	* Use setuptools for packaging (thanks to Sarah Bird).
+
+2014-08-19	jsonmerge	1.0.0
+
+	* First release
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/MANIFEST.in
@@ -0,0 +1,3 @@
+include README.rst
+include ChangeLog
+recursive-include tests *.py
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/PKG-INFO
@@ -0,0 +1,274 @@
+Metadata-Version: 1.1
+Name: jsonmerge
+Version: 1.1.0
+Summary: Merge a series of JSON documents.
+Home-page: UNKNOWN
+Author: Tomaz Solc
+Author-email: tomaz.solc@tablix.org
+License: MIT
+Description: Merge a series of JSON documents
+        ================================
+        
+        This Python module allows you to merge a series of JSON documents into a
+        single one.
+        
+        This problem often occurs for example when different authors fill in
+        different parts of a common document and you need to construct a document
+        that includes contributions from all the authors. It also helps when
+        dealing with consecutive versions of a document where different fields get
+        updated over time.
+        
+        Consider a trivial example with two documents::
+        
+            >>> base = {
+            ...         "foo": 1,
+            ...         "bar": [ "one" ],
+            ...      }
+        
+            >>> head = {
+            ...         "bar": [ "two" ],
+            ...         "baz": "Hello, world!"
+            ...     }
+        
+        We call the document we are merging changes into *base* and the changed
+        document *head*. To merge these two documents using *jsonmerge*::
+        
+            >>> from pprint import pprint
+        
+            >>> from jsonmerge import merge
+            >>> result = merge(base, head)
+        
+            >>> pprint(result, width=40)
+            {'bar': ['two'],
+             'baz': 'Hello, world!',
+             'foo': 1}
+        
+        As you can see, when encountering an JSON object, *jsonmerge* by default
+        returns fields that appear in either *base* or *head* document. For other
+        JSON types, it simply replaces the older value. These principles are also
+        applied in case of multiple nested JSON objects.
+        
+        In a more realistic use case however, you might want to apply different
+        *merge strategies* to different parts of the document. You can tell
+        *jsonmerge* how to do that using a syntax based on `JSON schema`_.
+        
+        If you already have schemas for your document, you can simply expand them
+        with additional keywords recognized by *jsonmerge*.
+        
+        You use the *mergeStrategy* schema keyword to specify the strategy. The
+        default two strategies mentioned above are called *objectMerge* for objects
+        and *overwrite* for all other types.
+        
+        Let's say you want to specify that the merged *bar* field in the example
+        document above should contain elements from all documents, not just the
+        latest one. You can do this with a schema like this::
+        
+            >>> schema = {
+            ...             "properties": {
+            ...                 "bar": {
+            ...                     "mergeStrategy": "append"
+            ...                 }
+            ...             }
+            ...         }
+        
+            >>> from jsonmerge import Merger
+            >>> merger = Merger(schema)
+            >>> result = merger.merge(base, head)
+        
+            >>> pprint(result, width=40)
+            {'bar': ['one', 'two'],
+             'baz': 'Hello, world!',
+             'foo': 1}
+        
+        Another common example is when you need to keep a versioned list of values
+        that appeared in the series of documents::
+        
+            >>> schema = {
+            ...             "properties": {
+            ...                 "foo": {
+            ...                     "type": "object",
+            ...                     "mergeStrategy": "version",
+            ...                     "mergeOptions": { "limit": 5 }
+            ...                 }
+            ...             }
+            ...         }
+            >>> from jsonmerge import Merger
+            >>> merger = Merger(schema)
+        
+            >>> v1 = {
+            ...     'foo': {
+            ...         'greeting': 'Hello, World!'
+            ...     }
+            ... }
+        
+            >>> v2 = {
+            ...     'foo': {
+            ...         'greeting': 'Howdy, World!'
+            ...     }
+            ... }
+        
+            >>> base = None
+            >>> base = merger.merge(base, v1, meta={'version': 1})
+            >>> base = merger.merge(base, v2, meta={'version': 2})
+        
+            >>> pprint(base, width=55)
+            {'foo': [{'value': {'greeting': 'Hello, World!'},
+                      'version': 1},
+                     {'value': {'greeting': 'Howdy, World!'},
+                      'version': 2}]}
+        
+        Note that we use the *mergeOptions* keyword to supply additional options to
+        the merge strategy. In this case, we tell the *version* strategy to retain
+        only 5 most recent versions of this field. We also used the *meta* argument
+        to supply some document meta-data that is included for each version of the
+        field. *meta* can contain an arbitrary JSON object.
+        
+        Example above also demonstrates how *jsonmerge* is typically used when
+        merging more than two documents. Typically you start with an empty *base*
+        and then consecutively merge different *heads* into it.
+        
+        If you care about well-formedness of your documents, you might also want to
+        obtain a schema for the documents that the *merge* method creates.
+        *jsonmerge* provides a way to automatically generate it from a schema for
+        the input document::
+        
+            >>> result_schema = merger.get_schema()
+        
+            >>> pprint(result_schema, width=80)
+            {'properties': {'foo': {'items': {'properties': {'value': {'type': 'object'}}},
+                                    'maxItems': 5,
+                                    'type': 'array'}}}
+        
+        Note that because of the *version* strategy, the type of the *foo* field
+        changed from *object* to *array*.
+        
+        
+        Merge strategies
+        ----------------
+        
+        These are the currently implemented merge strategies.
+        
+        overwrite
+          Overwrite with the value in *base* with value in *head*. Works with any
+          type.
+        
+        append
+          Append arrays. Works only with arrays.
+        
+        arrayMergeById
+          Merge arrays, identifying items to be merged by an ID field. Resulting
+          arrays have items from both *base* and *head* arrays.  Any items that
+          have identical an ID are merged based on the strategy specified further
+          down in the hierarchy.
+        
+          By default, array items are expected to be objects and ID of the item is
+          obtained from the *id* property of the object.
+        
+          You can specify an arbitrary *JSON pointer* to point to the ID of the
+          item using the *idRef* merge option. When resolving the pointer, document
+          root is placed at the root of the array item (e.g. by default, *idRef* is
+          '/id')
+        
+          Array items in *head* for which the ID cannot be identified (e.g. *idRef*
+          pointer is invalid) are ignored.
+        
+          You can specify an additional item ID to be ignored using the *ignoreId*
+          merge option.
+        
+        objectMerge
+          Merge objects. Resulting objects have properties from both *base* and
+          *head*. Any properties that are present both in *base* and *head* are
+          merged based on the strategy specified further down in the hierarchy
+          (e.g. in *properties*, *patternProperties* or *additionalProperties*
+          schema keywords).
+        
+        version
+          Changes the type of the value to an array. New values are appended to the
+          array in the form of an object with a *value* property. This way all
+          values seen during the merge are preserved.
+        
+          You can limit the length of the list using the *limit* option in the
+          *mergeOptions* keyword.
+        
+          By default, if a *head* document contains the same value as the *base*,
+          document, no new version will be appended. You can change this by setting
+          *ignoreDups* option to *false*.
+        
+        If a merge strategy is not specified in the schema, *objectMerge* is used
+        to objects and *overwrite* for all other values.
+        
+        You can implement your own strategies by making subclasses of
+        jsonmerge.strategies.Strategy and passing them to Merger() constructor.
+        
+        
+        Limitations
+        -----------
+        
+        Merging of documents with schemas that do not have a well-defined type
+        (e.g. schemas using *allOf*, *anyOf* and *oneOf*) will likely fail. Such
+        documents could require merging of two values of different types. For
+        example, *jsonmerge* does not know how to merge a string to an object.
+        
+        You can work around this limitation by defining for your own strategy that
+        defines what to do in such cases. See docstring documentation for the
+        *Strategy* class on how to do that. get_schema() however currently provides
+        no support for ambiguous schemas like that.
+        
+        
+        Requirements
+        ------------
+        
+        You need *jsonschema* (https://pypi.python.org/pypi/jsonschema) module
+        installed.
+        
+        
+        Installation
+        ------------
+        
+        You install *jsonmerge*, as you would install any Python module, by running
+        these commands::
+        
+            python setup.py install
+            python setup.py test
+        
+        
+        Source
+        ------
+        
+        The latest version is available on GitHub: https://github.com/avian2/jsonmerge
+        
+        
+        License
+        -------
+        
+        Copyright 2014, Tomaz Solc <tomaz.solc@tablix.org>
+        
+        Permission is hereby granted, free of charge, to any person obtaining a copy
+        of this software and associated documentation files (the "Software"), to deal
+        in the Software without restriction, including without limitation the rights
+        to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+        copies of the Software, and to permit persons to whom the Software is
+        furnished to do so, subject to the following conditions:
+        
+        The above copyright notice and this permission notice shall be included in
+        all copies or substantial portions of the Software.
+        
+        THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+        IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+        FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+        AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+        LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+        OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+        THE SOFTWARE.
+        
+        .. _JSON schema: http://json-schema.org
+        
+        ..
+            vim: tw=75 ts=4 sw=4 expandtab softtabstop=4
+        
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Intended Audience :: Developers
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/README.rst
@@ -0,0 +1,259 @@
+Merge a series of JSON documents
+================================
+
+This Python module allows you to merge a series of JSON documents into a
+single one.
+
+This problem often occurs for example when different authors fill in
+different parts of a common document and you need to construct a document
+that includes contributions from all the authors. It also helps when
+dealing with consecutive versions of a document where different fields get
+updated over time.
+
+Consider a trivial example with two documents::
+
+    >>> base = {
+    ...         "foo": 1,
+    ...         "bar": [ "one" ],
+    ...      }
+
+    >>> head = {
+    ...         "bar": [ "two" ],
+    ...         "baz": "Hello, world!"
+    ...     }
+
+We call the document we are merging changes into *base* and the changed
+document *head*. To merge these two documents using *jsonmerge*::
+
+    >>> from pprint import pprint
+
+    >>> from jsonmerge import merge
+    >>> result = merge(base, head)
+
+    >>> pprint(result, width=40)
+    {'bar': ['two'],
+     'baz': 'Hello, world!',
+     'foo': 1}
+
+As you can see, when encountering an JSON object, *jsonmerge* by default
+returns fields that appear in either *base* or *head* document. For other
+JSON types, it simply replaces the older value. These principles are also
+applied in case of multiple nested JSON objects.
+
+In a more realistic use case however, you might want to apply different
+*merge strategies* to different parts of the document. You can tell
+*jsonmerge* how to do that using a syntax based on `JSON schema`_.
+
+If you already have schemas for your document, you can simply expand them
+with additional keywords recognized by *jsonmerge*.
+
+You use the *mergeStrategy* schema keyword to specify the strategy. The
+default two strategies mentioned above are called *objectMerge* for objects
+and *overwrite* for all other types.
+
+Let's say you want to specify that the merged *bar* field in the example
+document above should contain elements from all documents, not just the
+latest one. You can do this with a schema like this::
+
+    >>> schema = {
+    ...             "properties": {
+    ...                 "bar": {
+    ...                     "mergeStrategy": "append"
+    ...                 }
+    ...             }
+    ...         }
+
+    >>> from jsonmerge import Merger
+    >>> merger = Merger(schema)
+    >>> result = merger.merge(base, head)
+
+    >>> pprint(result, width=40)
+    {'bar': ['one', 'two'],
+     'baz': 'Hello, world!',
+     'foo': 1}
+
+Another common example is when you need to keep a versioned list of values
+that appeared in the series of documents::
+
+    >>> schema = {
+    ...             "properties": {
+    ...                 "foo": {
+    ...                     "type": "object",
+    ...                     "mergeStrategy": "version",
+    ...                     "mergeOptions": { "limit": 5 }
+    ...                 }
+    ...             }
+    ...         }
+    >>> from jsonmerge import Merger
+    >>> merger = Merger(schema)
+
+    >>> v1 = {
+    ...     'foo': {
+    ...         'greeting': 'Hello, World!'
+    ...     }
+    ... }
+
+    >>> v2 = {
+    ...     'foo': {
+    ...         'greeting': 'Howdy, World!'
+    ...     }
+    ... }
+
+    >>> base = None
+    >>> base = merger.merge(base, v1, meta={'version': 1})
+    >>> base = merger.merge(base, v2, meta={'version': 2})
+
+    >>> pprint(base, width=55)
+    {'foo': [{'value': {'greeting': 'Hello, World!'},
+              'version': 1},
+             {'value': {'greeting': 'Howdy, World!'},
+              'version': 2}]}
+
+Note that we use the *mergeOptions* keyword to supply additional options to
+the merge strategy. In this case, we tell the *version* strategy to retain
+only 5 most recent versions of this field. We also used the *meta* argument
+to supply some document meta-data that is included for each version of the
+field. *meta* can contain an arbitrary JSON object.
+
+Example above also demonstrates how *jsonmerge* is typically used when
+merging more than two documents. Typically you start with an empty *base*
+and then consecutively merge different *heads* into it.
+
+If you care about well-formedness of your documents, you might also want to
+obtain a schema for the documents that the *merge* method creates.
+*jsonmerge* provides a way to automatically generate it from a schema for
+the input document::
+
+    >>> result_schema = merger.get_schema()
+
+    >>> pprint(result_schema, width=80)
+    {'properties': {'foo': {'items': {'properties': {'value': {'type': 'object'}}},
+                            'maxItems': 5,
+                            'type': 'array'}}}
+
+Note that because of the *version* strategy, the type of the *foo* field
+changed from *object* to *array*.
+
+
+Merge strategies
+----------------
+
+These are the currently implemented merge strategies.
+
+overwrite
+  Overwrite with the value in *base* with value in *head*. Works with any
+  type.
+
+append
+  Append arrays. Works only with arrays.
+
+arrayMergeById
+  Merge arrays, identifying items to be merged by an ID field. Resulting
+  arrays have items from both *base* and *head* arrays.  Any items that
+  have identical an ID are merged based on the strategy specified further
+  down in the hierarchy.
+
+  By default, array items are expected to be objects and ID of the item is
+  obtained from the *id* property of the object.
+
+  You can specify an arbitrary *JSON pointer* to point to the ID of the
+  item using the *idRef* merge option. When resolving the pointer, document
+  root is placed at the root of the array item (e.g. by default, *idRef* is
+  '/id')
+
+  Array items in *head* for which the ID cannot be identified (e.g. *idRef*
+  pointer is invalid) are ignored.
+
+  You can specify an additional item ID to be ignored using the *ignoreId*
+  merge option.
+
+objectMerge
+  Merge objects. Resulting objects have properties from both *base* and
+  *head*. Any properties that are present both in *base* and *head* are
+  merged based on the strategy specified further down in the hierarchy
+  (e.g. in *properties*, *patternProperties* or *additionalProperties*
+  schema keywords).
+
+version
+  Changes the type of the value to an array. New values are appended to the
+  array in the form of an object with a *value* property. This way all
+  values seen during the merge are preserved.
+
+  You can limit the length of the list using the *limit* option in the
+  *mergeOptions* keyword.
+
+  By default, if a *head* document contains the same value as the *base*,
+  document, no new version will be appended. You can change this by setting
+  *ignoreDups* option to *false*.
+
+If a merge strategy is not specified in the schema, *objectMerge* is used
+to objects and *overwrite* for all other values.
+
+You can implement your own strategies by making subclasses of
+jsonmerge.strategies.Strategy and passing them to Merger() constructor.
+
+
+Limitations
+-----------
+
+Merging of documents with schemas that do not have a well-defined type
+(e.g. schemas using *allOf*, *anyOf* and *oneOf*) will likely fail. Such
+documents could require merging of two values of different types. For
+example, *jsonmerge* does not know how to merge a string to an object.
+
+You can work around this limitation by defining for your own strategy that
+defines what to do in such cases. See docstring documentation for the
+*Strategy* class on how to do that. get_schema() however currently provides
+no support for ambiguous schemas like that.
+
+
+Requirements
+------------
+
+You need *jsonschema* (https://pypi.python.org/pypi/jsonschema) module
+installed.
+
+
+Installation
+------------
+
+You install *jsonmerge*, as you would install any Python module, by running
+these commands::
+
+    python setup.py install
+    python setup.py test
+
+
+Source
+------
+
+The latest version is available on GitHub: https://github.com/avian2/jsonmerge
+
+
+License
+-------
+
+Copyright 2014, Tomaz Solc <tomaz.solc@tablix.org>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+.. _JSON schema: http://json-schema.org
+
+..
+    vim: tw=75 ts=4 sw=4 expandtab softtabstop=4
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/jsonmerge.egg-info/PKG-INFO
@@ -0,0 +1,274 @@
+Metadata-Version: 1.1
+Name: jsonmerge
+Version: 1.1.0
+Summary: Merge a series of JSON documents.
+Home-page: UNKNOWN
+Author: Tomaz Solc
+Author-email: tomaz.solc@tablix.org
+License: MIT
+Description: Merge a series of JSON documents
+        ================================
+        
+        This Python module allows you to merge a series of JSON documents into a
+        single one.
+        
+        This problem often occurs for example when different authors fill in
+        different parts of a common document and you need to construct a document
+        that includes contributions from all the authors. It also helps when
+        dealing with consecutive versions of a document where different fields get
+        updated over time.
+        
+        Consider a trivial example with two documents::
+        
+            >>> base = {
+            ...         "foo": 1,
+            ...         "bar": [ "one" ],
+            ...      }
+        
+            >>> head = {
+            ...         "bar": [ "two" ],
+            ...         "baz": "Hello, world!"
+            ...     }
+        
+        We call the document we are merging changes into *base* and the changed
+        document *head*. To merge these two documents using *jsonmerge*::
+        
+            >>> from pprint import pprint
+        
+            >>> from jsonmerge import merge
+            >>> result = merge(base, head)
+        
+            >>> pprint(result, width=40)
+            {'bar': ['two'],
+             'baz': 'Hello, world!',
+             'foo': 1}
+        
+        As you can see, when encountering an JSON object, *jsonmerge* by default
+        returns fields that appear in either *base* or *head* document. For other
+        JSON types, it simply replaces the older value. These principles are also
+        applied in case of multiple nested JSON objects.
+        
+        In a more realistic use case however, you might want to apply different
+        *merge strategies* to different parts of the document. You can tell
+        *jsonmerge* how to do that using a syntax based on `JSON schema`_.
+        
+        If you already have schemas for your document, you can simply expand them
+        with additional keywords recognized by *jsonmerge*.
+        
+        You use the *mergeStrategy* schema keyword to specify the strategy. The
+        default two strategies mentioned above are called *objectMerge* for objects
+        and *overwrite* for all other types.
+        
+        Let's say you want to specify that the merged *bar* field in the example
+        document above should contain elements from all documents, not just the
+        latest one. You can do this with a schema like this::
+        
+            >>> schema = {
+            ...             "properties": {
+            ...                 "bar": {
+            ...                     "mergeStrategy": "append"
+            ...                 }
+            ...             }
+            ...         }
+        
+            >>> from jsonmerge import Merger
+            >>> merger = Merger(schema)
+            >>> result = merger.merge(base, head)
+        
+            >>> pprint(result, width=40)
+            {'bar': ['one', 'two'],
+             'baz': 'Hello, world!',
+             'foo': 1}
+        
+        Another common example is when you need to keep a versioned list of values
+        that appeared in the series of documents::
+        
+            >>> schema = {
+            ...             "properties": {
+            ...                 "foo": {
+            ...                     "type": "object",
+            ...                     "mergeStrategy": "version",
+            ...                     "mergeOptions": { "limit": 5 }
+            ...                 }
+            ...             }
+            ...         }
+            >>> from jsonmerge import Merger
+            >>> merger = Merger(schema)
+        
+            >>> v1 = {
+            ...     'foo': {
+            ...         'greeting': 'Hello, World!'
+            ...     }
+            ... }
+        
+            >>> v2 = {
+            ...     'foo': {
+            ...         'greeting': 'Howdy, World!'
+            ...     }
+            ... }
+        
+            >>> base = None
+            >>> base = merger.merge(base, v1, meta={'version': 1})
+            >>> base = merger.merge(base, v2, meta={'version': 2})
+        
+            >>> pprint(base, width=55)
+            {'foo': [{'value': {'greeting': 'Hello, World!'},
+                      'version': 1},
+                     {'value': {'greeting': 'Howdy, World!'},
+                      'version': 2}]}
+        
+        Note that we use the *mergeOptions* keyword to supply additional options to
+        the merge strategy. In this case, we tell the *version* strategy to retain
+        only 5 most recent versions of this field. We also used the *meta* argument
+        to supply some document meta-data that is included for each version of the
+        field. *meta* can contain an arbitrary JSON object.
+        
+        Example above also demonstrates how *jsonmerge* is typically used when
+        merging more than two documents. Typically you start with an empty *base*
+        and then consecutively merge different *heads* into it.
+        
+        If you care about well-formedness of your documents, you might also want to
+        obtain a schema for the documents that the *merge* method creates.
+        *jsonmerge* provides a way to automatically generate it from a schema for
+        the input document::
+        
+            >>> result_schema = merger.get_schema()
+        
+            >>> pprint(result_schema, width=80)
+            {'properties': {'foo': {'items': {'properties': {'value': {'type': 'object'}}},
+                                    'maxItems': 5,
+                                    'type': 'array'}}}
+        
+        Note that because of the *version* strategy, the type of the *foo* field
+        changed from *object* to *array*.
+        
+        
+        Merge strategies
+        ----------------
+        
+        These are the currently implemented merge strategies.
+        
+        overwrite
+          Overwrite with the value in *base* with value in *head*. Works with any
+          type.
+        
+        append
+          Append arrays. Works only with arrays.
+        
+        arrayMergeById
+          Merge arrays, identifying items to be merged by an ID field. Resulting
+          arrays have items from both *base* and *head* arrays.  Any items that
+          have identical an ID are merged based on the strategy specified further
+          down in the hierarchy.
+        
+          By default, array items are expected to be objects and ID of the item is
+          obtained from the *id* property of the object.
+        
+          You can specify an arbitrary *JSON pointer* to point to the ID of the
+          item using the *idRef* merge option. When resolving the pointer, document
+          root is placed at the root of the array item (e.g. by default, *idRef* is
+          '/id')
+        
+          Array items in *head* for which the ID cannot be identified (e.g. *idRef*
+          pointer is invalid) are ignored.
+        
+          You can specify an additional item ID to be ignored using the *ignoreId*
+          merge option.
+        
+        objectMerge
+          Merge objects. Resulting objects have properties from both *base* and
+          *head*. Any properties that are present both in *base* and *head* are
+          merged based on the strategy specified further down in the hierarchy
+          (e.g. in *properties*, *patternProperties* or *additionalProperties*
+          schema keywords).
+        
+        version
+          Changes the type of the value to an array. New values are appended to the
+          array in the form of an object with a *value* property. This way all
+          values seen during the merge are preserved.
+        
+          You can limit the length of the list using the *limit* option in the
+          *mergeOptions* keyword.
+        
+          By default, if a *head* document contains the same value as the *base*,
+          document, no new version will be appended. You can change this by setting
+          *ignoreDups* option to *false*.
+        
+        If a merge strategy is not specified in the schema, *objectMerge* is used
+        to objects and *overwrite* for all other values.
+        
+        You can implement your own strategies by making subclasses of
+        jsonmerge.strategies.Strategy and passing them to Merger() constructor.
+        
+        
+        Limitations
+        -----------
+        
+        Merging of documents with schemas that do not have a well-defined type
+        (e.g. schemas using *allOf*, *anyOf* and *oneOf*) will likely fail. Such
+        documents could require merging of two values of different types. For
+        example, *jsonmerge* does not know how to merge a string to an object.
+        
+        You can work around this limitation by defining for your own strategy that
+        defines what to do in such cases. See docstring documentation for the
+        *Strategy* class on how to do that. get_schema() however currently provides
+        no support for ambiguous schemas like that.
+        
+        
+        Requirements
+        ------------
+        
+        You need *jsonschema* (https://pypi.python.org/pypi/jsonschema) module
+        installed.
+        
+        
+        Installation
+        ------------
+        
+        You install *jsonmerge*, as you would install any Python module, by running
+        these commands::
+        
+            python setup.py install
+            python setup.py test
+        
+        
+        Source
+        ------
+        
+        The latest version is available on GitHub: https://github.com/avian2/jsonmerge
+        
+        
+        License
+        -------
+        
+        Copyright 2014, Tomaz Solc <tomaz.solc@tablix.org>
+        
+        Permission is hereby granted, free of charge, to any person obtaining a copy
+        of this software and associated documentation files (the "Software"), to deal
+        in the Software without restriction, including without limitation the rights
+        to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+        copies of the Software, and to permit persons to whom the Software is
+        furnished to do so, subject to the following conditions:
+        
+        The above copyright notice and this permission notice shall be included in
+        all copies or substantial portions of the Software.
+        
+        THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+        IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+        FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+        AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+        LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+        OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+        THE SOFTWARE.
+        
+        .. _JSON schema: http://json-schema.org
+        
+        ..
+            vim: tw=75 ts=4 sw=4 expandtab softtabstop=4
+        
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Intended Audience :: Developers
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/jsonmerge.egg-info/SOURCES.txt
@@ -0,0 +1,15 @@
+ChangeLog
+MANIFEST.in
+README.rst
+setup.py
+jsonmerge/__init__.py
+jsonmerge/exceptions.py
+jsonmerge/strategies.py
+jsonmerge.egg-info/PKG-INFO
+jsonmerge.egg-info/SOURCES.txt
+jsonmerge.egg-info/dependency_links.txt
+jsonmerge.egg-info/requires.txt
+jsonmerge.egg-info/top_level.txt
+tests/__init__.py
+tests/test_jsonmerge.py
+tests/test_readme.py
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/jsonmerge.egg-info/dependency_links.txt
@@ -0,0 +1,1 @@
+
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/jsonmerge.egg-info/requires.txt
@@ -0,0 +1,1 @@
+jsonschema
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/jsonmerge.egg-info/top_level.txt
@@ -0,0 +1,1 @@
+jsonmerge
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/jsonmerge/__init__.py
@@ -0,0 +1,213 @@
+# vim:ts=4 sw=4 expandtab softtabstop=4
+from jsonmerge import strategies
+from jsonschema.validators import Draft4Validator
+
+class Walk(object):
+    def __init__(self, merger):
+        self.merger = merger
+        self.resolver = merger.validator.resolver
+
+    def is_type(self, instance, type):
+        """Check if instance if a specific JSON type."""
+        return self.merger.validator.is_type(instance, type)
+
+    def descend(self, schema, *args):
+        if schema is not None:
+            ref = schema.get("$ref")
+            if ref is not None:
+                with self.resolver.resolving(ref) as resolved:
+                    return self.descend(resolved, *args)
+            else:
+                name = schema.get("mergeStrategy")
+                opts = schema.get("mergeOptions")
+                if opts is None:
+                    opts = {}
+        else:
+            name = None
+            opts = {}
+
+        if name is None:
+            name = self.default_strategy(schema, *args, **opts)
+
+        strategy = self.merger.strategies[name]
+
+        return self.work(strategy, schema, *args, **opts)
+
+class WalkInstance(Walk):
+
+    def add_meta(self, head, meta):
+        if meta is None:
+            rv = dict()
+        else:
+            rv = dict(meta)
+
+        rv['value'] = head
+        return rv
+
+    def default_strategy(self, schema, base, head, meta, **kwargs):
+        if self.is_type(head, "object"):
+            return "objectMerge"
+        else:
+            return "overwrite"
+
+    def work(self, strategy, schema, base, head, meta, **kwargs):
+        return strategy.merge(self, base, head, schema, meta, **kwargs)
+
+class WalkSchema(Walk):
+
+    def resolve_refs(self, schema, resolve_base=False):
+
+        if (not resolve_base) and self.resolver.base_uri == self.merger.schema.get('id', ''):
+            # no need to resolve refs in the context of the original schema - they 
+            # are still valid
+            return schema
+        elif self.is_type(schema, "array"):
+            return [ self.resolve_refs(v) for v in schema ]
+        elif self.is_type(schema, "object"):
+            ref = schema.get("$ref")
+            if ref is not None:
+                with self.resolver.resolving(ref) as resolved:
+                    return self.resolve_refs(resolved)
+            else:
+                return dict( ((k, self.resolve_refs(v)) for k, v in schema.items()) )
+        else:
+            return schema
+
+    def schema_is_object(self, schema):
+
+        objonly = (
+                'maxProperties',
+                'minProperties',
+                'required',
+                'additionalProperties',
+                'properties',
+                'patternProperties',
+                'dependencies')
+
+        for k in objonly:
+            if k in schema:
+                return True
+
+        if schema.get('type') == 'object':
+            return True
+
+        return False
+
+    def default_strategy(self, schema, meta, **kwargs):
+
+        if self.schema_is_object(schema):
+            return "objectMerge"
+        else:
+            return "overwrite"
+
+    def work(self, strategy, schema, meta, **kwargs):
+
+        schema = dict(schema)
+        schema.pop("mergeStrategy", None)
+        schema.pop("mergeOptions", None)
+
+        return strategy.get_schema(self, schema, meta, **kwargs)
+
+class Merger(object):
+
+    STRATEGIES = {
+        "overwrite": strategies.Overwrite(),
+        "version": strategies.Version(),
+        "append": strategies.Append(),
+        "objectMerge": strategies.ObjectMerge(),
+        "arrayMergeById": strategies.ArrayMergeById()
+    }
+
+    def __init__(self, schema, strategies=()):
+        """Create a new Merger object.
+
+        schema -- JSON schema to use when merging.
+        strategies -- Any additional merge strategies to use during merge.
+
+        strategies argument should be a dict mapping strategy names to
+        instances of Strategy subclasses.
+        """
+
+        self.schema = schema
+        self.validator = Draft4Validator(schema)
+
+        self.strategies = dict(self.STRATEGIES)
+        self.strategies.update(strategies)
+
+    def cache_schema(self, schema, uri=None):
+        """Cache an external schema reference.
+
+        schema -- JSON schema to cache
+        uri -- Optional URI for the schema
+
+        If the JSON schema for merging contains external references, they will
+        be fetched using HTTP from their respective URLs. Alternatively, this
+        method can be used to pre-populate the cache with any external schemas
+        that are already known.
+
+        If URI is omitted, it is obtained from the 'id' keyword of the schema.
+        """
+
+        if uri is None:
+            uri = schema.get('id', '')
+
+        self.validator.resolver.store.update(((uri, schema),))
+
+    def merge(self, base, head, meta=None):
+        """Merge head into base.
+
+        base -- Old JSON document you are merging into.
+        head -- New JSON document for merging into base.
+        meta -- Optional dictionary with meta-data.
+
+        Any elements in the meta dictionary will be added to
+        the dictionaries appended by the version strategies.
+
+        Returns an updated base document
+        """
+
+        walk = WalkInstance(self)
+        return walk.descend(self.schema, base, head, meta)
+
+    def get_schema(self, meta=None):
+        """Get JSON schema for the merged document.
+
+        meta -- Optional JSON schema for the meta-data.
+
+        Returns a JSON schema for documents returned by the
+        merge() method.
+        """
+
+        if meta is not None:
+
+            # This is kind of ugly - schema for meta data
+            # can again contain references to external schemas.
+            #
+            # Since we already have in place all the machinery
+            # to resolve these references in the merge schema,
+            # we (ab)use it here to do the same for meta data
+            # schema.
+            m = Merger(meta)
+            m.validator.resolver.store.update(self.validator.resolver.store)
+
+            w = WalkSchema(m)
+            meta = w.resolve_refs(meta, resolve_base=True)
+
+        walk = WalkSchema(self)
+        return walk.descend(self.schema, meta)
+
+def merge(base, head, schema={}):
+    """Merge two JSON documents using strategies defined in schema.
+
+    base -- Old JSON document you are merging into.
+    head -- New JSON document for merging into base.
+    schema -- JSON schema to use when merging.
+
+    Merge strategy for each value can be specified in the schema
+    using the "mergeStrategy" keyword. If not specified, default
+    strategy is to use "objectMerge" for objects and "overwrite"
+    for all other types.
+    """
+
+    merger = Merger(schema)
+    return merger.merge(base, head)
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/jsonmerge/exceptions.py
@@ -0,0 +1,5 @@
+class JSONMergeError(TypeError): pass
+
+class BaseInstanceError(JSONMergeError): pass
+class HeadInstanceError(JSONMergeError): pass
+class SchemaError(JSONMergeError): pass
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/jsonmerge/strategies.py
@@ -0,0 +1,248 @@
+# vim:ts=4 sw=4 expandtab softtabstop=4
+from jsonmerge.exceptions import HeadInstanceError, \
+                                 BaseInstanceError, \
+                                 SchemaError
+import jsonschema
+import re
+
+class Strategy(object):
+    """Base class for merge strategies.
+    """
+
+    def merge(self, walk, base, head, schema, meta, **kwargs):
+        """Merge head instance into base.
+
+        walk -- WalkInstance object for the current context.
+        base -- Value being merged into.
+        head -- Value being merged.
+        schema -- Schema used for merging.
+        meta -- Meta data, as passed to the Merger.merge() method.
+        kwargs -- Dict with any extra options given in the 'mergeOptions'
+        keyword
+
+        Specific merge strategies should override this method to implement
+        their behavior.
+
+        The function should return the object resulting from the merge.
+
+        Recursion into the next level, if necessary, is achieved by calling
+        walk.descend() method.
+        """
+        raise NotImplemented
+
+    def get_schema(self, walk, schema, meta, **kwargs):
+        """Return the schema for the merged document.
+
+        walk -- WalkSchema object for the current context.
+        schema -- Original document schema.
+        meta -- Schema for the meta data, as passed to the Merger.get_schema()
+        method.
+        kwargs -- Dict with any extra options given in the 'mergeOptions'
+        keyword.
+
+        Specific merge strategies should override this method to modify the
+        document schema depending on the behavior of the merge() method.
+
+        The function should return the schema for the object resulting from the
+        merge.
+
+        Recursion into the next level, if necessary, is achieved by calling
+        walk.descend() method.
+
+        Implementations should take care that all external schema references
+        are resolved in the returned schema. This can be achieved by calling
+        walk.resolve_refs() method.
+        """
+        raise NotImplemented
+
+class Overwrite(Strategy):
+    def merge(self, walk, base, head, schema, meta, **kwargs):
+        return head
+
+    def get_schema(self, walk, schema, meta, **kwargs):
+        return walk.resolve_refs(schema)
+
+class Version(Strategy):
+    def merge(self, walk, base, head, schema, meta, limit=None, unique=None, ignoreDups=True, **kwargs):
+
+        # backwards compatibility
+        if unique is False:
+            ignoreDups = False
+
+        if base is None:
+            base = []
+        else:
+            base = list(base)
+
+        if not ignoreDups or not base or base[-1]['value'] != head:
+            base.append(walk.add_meta(head, meta))
+            if limit is not None:
+                base = base[-limit:]
+
+        return base
+
+    def get_schema(self, walk, schema, meta, limit=None, **kwargs):
+
+        if meta is not None:
+            item = dict(meta)
+        else:
+            item = {}
+
+        if 'properties' not in item:
+            item['properties'] = {}
+
+        item['properties']['value'] = walk.resolve_refs(schema)
+
+        rv = {  "type": "array",
+                "items": item }
+
+        if limit is not None:
+            rv['maxItems'] = limit
+
+        return rv
+
+class Append(Strategy):
+    def merge(self, walk, base, head, schema, meta, **kwargs):
+        if not walk.is_type(head, "array"):
+            raise HeadInstanceError("Head for an 'append' merge strategy is not an array")
+
+        if base is None:
+            base = []
+        else:
+            if not walk.is_type(base, "array"):
+                raise BaseInstanceError("Base for an 'append' merge strategy is not an array")
+
+            base = list(base)
+
+        base += head
+        return base
+
+    def get_schema(self, walk, schema, meta, **kwargs):
+        schema.pop('maxItems', None)
+        schema.pop('uniqueItems', None)
+
+        return walk.resolve_refs(schema)
+
+
+class ArrayMergeById(Strategy):
+    def merge(self, walk, base, head, schema, meta, idRef="id", ignoreId=None, **kwargs):
+        if not walk.is_type(head, "array"):
+            raise HeadInstanceError("Head for an 'arrayMergeById' merge strategy is not an array")  # nopep8
+
+        if base is None:
+            base = []
+        else:
+            if not walk.is_type(base, "array"):
+                raise BaseInstanceError("Base for an 'arrayMergeById' merge strategy is not an array")  # nopep8
+            base = list(base)
+
+        subschema = None
+
+        if schema:
+            subschema = schema.get('items')
+
+        if walk.is_type(subschema, "array"):
+            raise SchemaError("'arrayMergeById' not supported when 'items' is an array")
+
+        for head_item in head:
+
+            try:
+                head_key = walk.resolver.resolve_fragment(head_item, idRef)
+            except jsonschema.RefResolutionError:
+                # Do nothing if idRef field cannot be found.
+                continue
+
+            if head_key == ignoreId:
+                continue
+
+            key_count = 0
+            for i, base_item in enumerate(base):
+                base_key = walk.resolver.resolve_fragment(base_item, idRef)
+                if base_key == head_key:
+                    key_count += 1
+                    # If there was a match, we replace with a merged item
+                    base[i] = walk.descend(subschema, base_item, head_item, meta)
+            if key_count == 0:
+                # If there wasn't a match, we append a new object
+                base.append(walk.descend(subschema, None, head_item, meta))
+            if key_count > 1:
+                raise BaseInstanceError("Id was not unique")
+
+        return base
+
+    def get_schema(self, walk, schema, meta, **kwargs):
+        subschema = None
+        if schema:
+            subschema = schema.get('items')
+
+        # Note we're discarding the walk.descend() result here. This is because
+        # it would de-reference the $ref if the subschema is a reference - i.e.
+        # in the result it would replace the reference with the copy of the
+        # target.
+        #
+        # But we want to keep the $ref and do the walk.descend() only on the target of the reference.
+        #
+        # This seems to work, but is an ugly workaround. walk.descend() should
+        # be fixed instead to not dereference $refs when not necessary.
+        walk.descend(subschema, meta)
+        return schema
+
+
+class ObjectMerge(Strategy):
+    def merge(self, walk, base, head, schema, meta, **kwargs):
+        if not walk.is_type(head, "object"):
+            raise HeadInstanceError("Head for an 'object' merge strategy is not an object")
+
+        if base is None:
+            base = {}
+        else:
+            if not walk.is_type(base, "object"):
+                raise BaseInstanceError("Base for an 'object' merge strategy is not an object")
+
+            base = dict(base)
+
+        for k, v in head.items():
+
+            subschema = None
+
+            # get subschema for this element
+            if schema is not None:
+                p = schema.get('properties')
+                if p is not None:
+                    subschema = p.get(k)
+
+                if subschema is None:
+                    p = schema.get('patternProperties')
+                    if p is not None:
+                        for pattern, s in p.items():
+                            if re.search(pattern, k):
+                                subschema = s
+
+                if subschema is None:
+                    p = schema.get('additionalProperties')
+                    if p is not None:
+                        subschema = p.get(k)
+
+            base[k] = walk.descend(subschema, base.get(k), v, meta)
+
+        return base
+
+    def get_schema(self, walk, schema, meta, **kwargs):
+
+        for forbidden in ("oneOf", "allOf", "anyOf"):
+            if forbidden in schema:
+                raise SchemaError("Type ambiguous schema")
+
+        schema2 = dict(schema)
+
+        def descend_keyword(keyword):
+            p = schema.get(keyword)
+            if p is not None:
+                for k, v in p.items():
+                    schema2[keyword][k] = walk.descend(v, meta)
+
+        descend_keyword("properties")
+        descend_keyword("patternProperties")
+        descend_keyword("additionalProperties")
+
+        return schema2
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build = 
+tag_date = 0
+tag_svn_revision = 0
+
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/setup.py
@@ -0,0 +1,23 @@
+#!/usr/bin/python
+# vim:ts=4 sw=4 expandtab softtabstop=4
+
+from setuptools import setup
+
+setup(name='jsonmerge',
+    version='1.1.0',
+    description='Merge a series of JSON documents.',
+    license='MIT',
+    long_description=open("README.rst").read(),
+    author='Tomaz Solc',
+    author_email='tomaz.solc@tablix.org',
+    packages = [ 'jsonmerge' ],
+    install_requires = [ 'jsonschema' ],
+    test_suite = 'tests',
+    classifiers = [
+        "License :: OSI Approved :: MIT License",
+        "Programming Language :: Python",
+        "Programming Language :: Python :: 2",
+        "Programming Language :: Python :: 3",
+        "Intended Audience :: Developers",
+    ],
+)
new file mode 100644
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/tests/test_jsonmerge.py
@@ -0,0 +1,1344 @@
+# vim:ts=4 sw=4 expandtab softtabstop=4
+import unittest
+import jsonmerge
+import jsonmerge.strategies
+from jsonmerge.exceptions import (
+    HeadInstanceError,
+    BaseInstanceError,
+    SchemaError
+)
+
+import jsonschema
+
+
+class TestMerge(unittest.TestCase):
+
+    def test_default(self):
+
+        schema = {}
+
+        base = None
+        base = jsonmerge.merge(base, "a", schema)
+        base = jsonmerge.merge(base, "b", schema)
+
+        self.assertEqual(base, "b")
+
+    def test_overwrite(self):
+
+        schema = {'mergeStrategy': 'overwrite'}
+
+        base = None
+        base = jsonmerge.merge(base, "a", schema)
+        base = jsonmerge.merge(base, "b", schema)
+
+        self.assertEqual(base, "b")
+
+    def test_version(self):
+
+        schema = {'mergeStrategy': 'version'}
+
+        base = None
+        base = jsonmerge.merge(base, "a", schema)
+        base = jsonmerge.merge(base, "b", schema)
+
+        self.assertEqual(base, [{'value': "a"}, {'value': "b"}])
+
+    def test_version_does_not_duplicate(self):
+        # Don't record change if it didn't change
+
+        schema = {'mergeStrategy': 'version'}
+
+        base = None
+        base = jsonmerge.merge(base, "a", schema)
+        base = jsonmerge.merge(base, "b", schema)
+        base = jsonmerge.merge(base, "b", schema)
+
+        self.assertEqual(base, [{'value': "a"}, {'value': "b"}])
+
+    def test_version_meta(self):
+
+        schema = {'mergeStrategy': 'version'}
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, "a", meta={'uri': 'http://example.com/a'})
+        base = merger.merge(base, "b", meta={'uri': 'http://example.com/b'})
+
+        self.assertEqual(base, [
+            {'value': "a",
+             'uri': 'http://example.com/a'},
+            {'value': "b",
+             'uri': 'http://example.com/b'}])
+
+    def test_version_ignoredups_false(self):
+
+        schema = {'mergeStrategy': 'version',
+                  'mergeOptions': {'ignoreDups': False}}
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, "a")
+        base = merger.merge(base, "a")
+
+        self.assertEqual(base, [{'value': "a"}, {'value': "a"}])
+
+    def test_version_unique_false(self):
+
+        schema = {'mergeStrategy': 'version',
+                  'mergeOptions': {'unique': False}}
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, "a")
+        base = merger.merge(base, "a")
+
+        self.assertEqual(base, [{'value': "a"}, {'value': "a"}])
+
+    def test_version_ignoredups_true(self):
+
+        schema = {'mergeStrategy': 'version'}
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, "a")
+        base = merger.merge(base, "a")
+
+        self.assertEqual(base, [{'value': "a"}])
+
+    def test_version_last(self):
+
+        schema = {'mergeStrategy': 'version',
+                  'mergeOptions': {'limit': 1}}
+
+        base = None
+        base = jsonmerge.merge(base, "a", schema)
+        base = jsonmerge.merge(base, "b", schema)
+
+        self.assertEqual(base, [{'value': "b"}])
+
+    def test_append(self):
+        schema = {'mergeStrategy': 'append'}
+
+        base = None
+        base = jsonmerge.merge(base, ["a"], schema)
+        base = jsonmerge.merge(base, ["b"], schema)
+
+        self.assertEqual(base, ["a", "b"])
+
+    def test_append_type_error(self):
+
+        schema = {'mergeStrategy': 'append'}
+
+        base = None
+        self.assertRaises(HeadInstanceError,
+                          jsonmerge.merge, base, "a", schema)
+
+    def test_append_type_error_base(self):
+
+        schema = {'mergeStrategy': 'append'}
+
+        base = "ab"
+        self.assertRaises(BaseInstanceError,
+                          jsonmerge.merge, base, ["a"], schema)
+
+    def test_merge_default(self):
+        schema = {}
+        base = None
+        base = jsonmerge.merge(base, {'a': "a"}, schema)
+        base = jsonmerge.merge(base, {'b': "b"}, schema)
+
+        self.assertEqual(base, {'a': "a", 'b': "b"})
+
+    def test_merge_empty_schema(self):
+
+        schema = {}
+
+        base = None
+        base = jsonmerge.merge(base, {'a': {'b': 'c'}}, schema)
+
+        self.assertEqual(base, {'a': {'b': 'c'}})
+
+    def test_merge_trivial(self):
+
+        schema = {'mergeStrategy': 'objectMerge'}
+
+        base = None
+        base = jsonmerge.merge(base, {'a': "a"}, schema)
+        base = jsonmerge.merge(base, {'b': "b"}, schema)
+
+        self.assertEqual(base, {'a': "a", 'b': "b"})
+
+    def test_merge_type_error(self):
+
+        schema = {'mergeStrategy': 'objectMerge'}
+
+        base = None
+        self.assertRaises(HeadInstanceError,
+                          jsonmerge.merge, base, "a", schema)
+
+    def test_merge_type_error_base(self):
+
+        schema = {'mergeStrategy': 'objectMerge'}
+
+        base = "ab"
+        self.assertRaises(BaseInstanceError,
+                          jsonmerge.merge, base, {'foo': 1}, schema)
+
+    def test_merge_overwrite(self):
+
+        schema = {'mergeStrategy': 'objectMerge'}
+
+        base = None
+        base = jsonmerge.merge(base, {'a': "a"}, schema)
+        base = jsonmerge.merge(base, {'a': "b"}, schema)
+
+        self.assertEqual(base, {'a': "b"})
+
+    def test_merge_append(self):
+
+        schema = {'mergeStrategy': 'objectMerge',
+                  'properties': {
+                      'a': {'mergeStrategy': 'append'}
+                  }}
+
+        base = None
+        base = jsonmerge.merge(base, {'a': ["a"]}, schema)
+        base = jsonmerge.merge(base, {'a': ["b"], 'b': 'c'}, schema)
+
+        self.assertEqual(base, {'a': ["a", "b"], 'b': 'c'})
+
+    def test_merge_append_pattern(self):
+
+        schema = {'mergeStrategy': 'objectMerge',
+                  'patternProperties': {
+                      'a': {'mergeStrategy': 'append'}
+                  }}
+
+        base = None
+        base = jsonmerge.merge(base, {'a': ["a"]}, schema)
+        base = jsonmerge.merge(base, {'a': ["b"], 'b': 'c'}, schema)
+
+        self.assertEqual(base, {'a': ["a", "b"], 'b': 'c'})
+
+    def test_merge_append_additional(self):
+
+        schema = {'mergeStrategy': 'objectMerge',
+                  'additionalProperties': {
+                      'a': {'mergeStrategy': 'append'}
+                  }}
+
+        base = None
+        base = jsonmerge.merge(base, {'a': ["a"]}, schema)
+        base = jsonmerge.merge(base, {'a': ["b"], 'b': 'c'}, schema)
+
+        self.assertEqual(base, {'a': ["a", "b"], 'b': 'c'})
+
+    def test_example(self):
+
+        head1 = {
+            'buyer': {
+                'id': {
+                    'name': "Test old",
+                },
+                'uri': 'Test uri old',
+            }
+        }
+
+        head2 = {
+            'buyer': {
+                'id': {
+                    'name': "Test new"
+                },
+                'uri': 'Test uri new',
+            },
+
+            'award': "Award"
+        }
+
+        base_expect = {
+            'buyer': {
+                'id': {
+                    'name': [
+                        {'value': "Test old"},
+                        {'value': "Test new"},
+                    ]
+                },
+                'uri': 'Test uri new',
+            },
+
+            'award': "Award"
+        }
+
+        schema = {
+            'mergeStrategy': 'objectMerge',
+            'properties': {
+                'buyer': {
+                    'properties': {
+                        'id': {
+                            'properties': {
+                                'name': {
+                                    'mergeStrategy': 'version',
+                                }
+                            }
+                        },
+                        'uri': {
+                            'mergeStrategy': 'overwrite',
+                        }
+                    },
+                },
+                'award': {
+                    'mergeStrategy': 'overwrite',
+                }
+            },
+        }
+
+        base = None
+        base = jsonmerge.merge(base, head1, schema)
+        base = jsonmerge.merge(base, head2, schema)
+
+        self.assertEqual(base, base_expect)
+
+    def test_refs(self):
+
+        schema = {
+            'properties': {
+                'a': {'$ref': "#/definitions/a"},
+            },
+            'definitions': {
+                "a": {
+                    "properties": {
+                        "b": {'mergeStrategy': 'version'},
+                    }
+                },
+            }
+        }
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, {"a": {"b": "c"}})
+        base = merger.merge(base, {"a": {"b": "d"}})
+
+        self.assertEqual(base, {"a": {"b": [{"value": "c"}, {"value": "d"}]}})
+
+    def test_oneof(self):
+
+        schema = {
+            'oneOf': [
+                {'properties': {'a': {}}},
+                {'properties': {'b': {}}}
+            ]
+        }
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, {'a': 1})
+        base = merger.merge(base, {'b': 2})
+
+        self.assertEqual(base, {'a': 1, 'b': 2})
+
+    def test_custom_strategy(self):
+
+        schema = {'mergeStrategy': 'myStrategy'}
+
+        class MyStrategy(jsonmerge.strategies.Strategy):
+            def merge(self, walk, base, head, schema, meta, **kwargs):
+                return "foo"
+
+        merger = jsonmerge.Merger(schema=schema,
+                                  strategies={'myStrategy': MyStrategy()})
+
+        base = None
+        base = merger.merge(base, {'a': 1})
+
+        self.assertEqual(base, "foo")
+
+    def test_merge_by_id(self):
+        schema = {
+            "properties": {
+                "awards": {
+                    "type": "array",
+                    "mergeStrategy": "arrayMergeById",
+                    "items": {
+                        "properties": {
+                            "id": {"type": "string"},
+                            "field": {"type": "number"},
+                        }
+                    }
+                }
+            }
+        }
+
+        a = {
+            "awards": [
+                {"id": "A", "field": 1},
+                {"id": "B", "field": 2}
+            ]
+        }
+
+        b = {
+            "awards": [
+                {"id": "B", "field": 3},
+                {"id": "C", "field": 4}
+            ]
+        }
+
+        expected = {
+            "awards": [
+                {"id": "A", "field": 1},
+                {"id": "B", "field": 3},
+                {"id": "C", "field": 4}
+            ]
+        }
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, a)
+        base = merger.merge(base, b)
+
+        self.assertEqual(base, expected)
+
+    def test_merge_by_id_when_key_is_empty_should_do_nothing(self):
+        schema = {
+            "properties": {
+                "awards": {
+                    "type": "array",
+                    "mergeStrategy": "arrayMergeById",
+                    "mergeOptions": {"ignoreId": ""},
+                    "items": {
+                        "properties": {
+                            "id": {"type": "string"},
+                            "field": {"type": "number"},
+                        }
+                    }
+                }
+            }
+        }
+
+        a = {
+            "awards": [
+                {"id": "A", "field": 1},
+                {"id": "", "field": ""}
+            ]
+        }
+
+        b = {
+            "awards": [
+                {"id": "B", "field": 3},
+                {"id": "C", "field": 4}
+            ]
+        }
+
+        expected = {
+            "awards": [
+                {"id": "A", "field": 1},
+                {"id": "B", "field": 3},
+                {"id": "C", "field": 4}
+            ]
+        }
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, a)
+        base = merger.merge(base, b)
+
+        self.assertEqual(base, expected)
+
+    def test_merge_by_id_no_items(self):
+        schema = {
+            "mergeStrategy": "arrayMergeById",
+            "mergeOptions": {"idRef": "id"},
+        }
+
+        a = [
+            {"id": "A", "field": 1},
+        ]
+
+        b = [
+            {"id": "A", "field": 2},
+        ]
+
+        # by default, it should fall back to "replace" strategy for integers.
+        expected = [
+            {"id": "A", "field": 2},
+        ]
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, a)
+        base = merger.merge(base, b)
+
+        self.assertEqual(base, expected)
+
+    def test_merge_by_id_simple_ref(self):
+        schema = {
+            "mergeStrategy": "arrayMergeById",
+            "mergeOptions": {"idRef": "key"}
+        }
+
+        a = [
+            {"key": "A", "field": 1},
+        ]
+
+        b = [
+            {"key": "A", "field": 2},
+        ]
+
+        expected = [
+            {"key": "A", "field": 2},
+        ]
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, a)
+        base = merger.merge(base, b)
+
+        self.assertEqual(base, expected)
+
+    def test_merge_by_id_no_key(self):
+        schema = {
+            "mergeStrategy": "arrayMergeById",
+        }
+
+        a = [
+            {"id": "A", "field": 1},
+        ]
+
+        b = [
+            {'field': 2}
+        ]
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, a)
+        base = merger.merge(base, b)
+
+        # it should ignore array elements that do not have the id
+        self.assertEqual(base, a)
+
+    def test_merge_by_id_compex_ref(self):
+        schema = {
+            "mergeStrategy": "arrayMergeById",
+            "mergeOptions": {"idRef": "/foo/bar"},
+        }
+
+        a = [
+            {'foo': {'bar': 1}, 'baz': 1}
+        ]
+
+        b = [
+            {'foo': {'bar': 2}}
+        ]
+
+        c = [
+            {'foo': {'bar': 1}, 'baz': 2}
+        ]
+
+        # by default, it should fall back to "replace" strategy for integers.
+        expected = [
+            {'foo': {'bar': 1}, 'baz': 2},
+            {'foo': {'bar': 2}}
+        ]
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, a)
+        base = merger.merge(base, b)
+        base = merger.merge(base, c)
+
+        self.assertEqual(base, expected)
+
+    def test_merge_by_id_with_complex_array(self):
+        schema = {
+            "properties": {
+                "awards": {
+                    "type": "array",
+                    "mergeStrategy": "arrayMergeById",
+                    "items": {
+                        "properties": {
+                            "id": {"type": "string"},
+                            "field": {
+                                "type": "array",
+                                "items": {
+                                    "properties": {
+                                        "xx": {
+                                            "type": "string"
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        a = {
+            "awards": [
+                {"id": "A", "field": [{"xx": "testA1"}, {"xx": "testA2"}]},
+                {"id": "B", "field": [{"xx": "testA3"}, {"xx": "testA4"}]}
+            ]
+        }
+
+        b = {
+            "awards": [
+                {"id": "B", "field": [{"xx": "testA3"}, {"xx": "testA6"}]},
+                {"id": "C", "field": [{"xx": "testA7"}, {"xx": "testA8"}]}
+            ]
+        }
+
+        expected = {
+            "awards": [
+                {"id": "A", "field": [{"xx": "testA1"}, {"xx": "testA2"}]},
+                {"id": "B", "field": [{"xx": "testA3"}, {"xx": "testA6"}]},
+                {"id": "C", "field": [{"xx": "testA7"}, {"xx": "testA8"}]}
+            ]
+        }
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, a)
+        base = merger.merge(base, b)
+
+        self.assertEqual(base, expected)
+
+    def test_merge_by_id_with_subschema(self):
+        schema = {
+            "properties": {
+                "awards": {
+                    "type": "array",
+                    "mergeStrategy": "arrayMergeById",
+                    "items": {
+                        "type": "object",
+                        "properties": {
+                            "id": {
+                                "type": "string"
+                            },
+                            "field": {
+                                "type": "number",
+                                "mergeStrategy": "version"
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        a = {
+            "awards": [
+                {"id": "A", "field": 1},
+                {"id": "B", "field": 2}
+            ]
+        }
+
+        b = {
+            "awards": [
+                {"id": "B", "field": 3},
+                {"id": "C", "field": 4}
+            ]
+        }
+
+        expected = {
+            "awards": [
+                {"id": "A", "field": [{"value": 1}]},
+                {"id": "B", "field": [{"value": 2}, {"value": 3}]},
+                {"id": "C", "field": [{"value": 4}]}
+            ]
+        }
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        base = merger.merge(base, a)
+        base = merger.merge(base, b)
+
+        self.assertEqual(base, expected)
+
+    def test_merge_by_id_items_array(self):
+        schema = {
+            "mergeStrategy": "arrayMergeById",
+            "items": [
+                {},
+                {},
+            ]
+        }
+
+        head = [
+            {'id': 'A'},
+            {'id': 'B'}
+        ]
+
+        merger = jsonmerge.Merger(schema)
+
+        base = None
+        self.assertRaises(SchemaError, merger.merge, base, head)
+
+    def test_merge_by_id_bad_head_type(self):
+        schema = {
+            'mergeStrategy': 'arrayMergeById'
+        }
+
+        head = {'foo': 'bar'}
+        base = []
+
+        merger = jsonmerge.Merger(schema)
+        self.assertRaises(HeadInstanceError, merger.merge, base, head)
+
+    def test_merge_by_id_bad_base_type(self):
+        schema = {
+            'mergeStrategy': 'arrayMergeById'
+        }
+
+        head = []
+        base = {'foo': 'bar'}
+
+        merger = jsonmerge.Merger(schema)
+        self.assertRaises(BaseInstanceError, merger.merge, base, head)
+
+    def test_merge_by_id_non_unique_base(self):
+        schema = {
+            "mergeStrategy": "arrayMergeById",
+        }
+
+        base = [
+            {'id': 'a'},
+            {'id': 'a'}
+        ]
+
+        head = [
+            {'id': 'a',
+             'foo': 1}
+        ]
+
+        merger = jsonmerge.Merger(schema)
+
+        self.assertRaises(BaseInstanceError, merger.merge, base, head)
+
+    def test_merge_by_id_non_unique_head(self):
+        schema = {
+            "mergeStrategy": "arrayMergeById",
+        }
+
+        base = [
+            {'id': 'a',
+             'foo': 1},
+        ]
+
+        head = [
+            {'id': 'a',
+             'foo': 2},
+            {'id': 'a',
+             'foo': 3}
+        ]
+
+        merger = jsonmerge.Merger(schema)
+        base = merger.merge(base, head)
+
+        self.assertEqual(base, [{'id': 'a', 'foo': 3}])
+
+    def test_append_with_maxitems(self):
+
+        schema = {
+            "mergeStrategy": "append",
+            "maxItems": 2,
+        }
+
+        merger = jsonmerge.Merger(schema)
+
+        head = ["a"]
+        base = None
+
+        base = merger.merge(base, head)
+        base = merger.merge(base, head)
+        base = merger.merge(base, head)
+
+        schema2 = merger.get_schema()
+
+        jsonschema.validate(head, schema2)
+        jsonschema.validate(base, schema2)
+
+    def test_append_with_unique(self):
+
+        schema = {
+            "mergeStrategy": "append",
+            "uniqueItems": True,
+        }
+
+        merger = jsonmerge.Merger(schema)
+
+        head = ["a"]
+        base = None
+
+        base = merger.merge(base, head)
+        base = merger.merge(base, head)
+
+        schema2 = merger.get_schema()
+
+        jsonschema.validate(head, schema2)
+        jsonschema.validate(base, schema2)
+
+
+class TestGetSchema(unittest.TestCase):
+
+    def test_default_overwrite(self):
+        schema = {'description': 'test'}
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2, {'description': 'test'})
+
+    def test_default_object_merge_trivial(self):
+        schema = {'type': 'object'}
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2, {'type': 'object'})
+
+    def test_default_object_merge(self):
+        schema = {
+            'properties': {
+                'foo': {
+                    'mergeStrategy': 'version',
+                }
+            }
+        }
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2,
+                         {
+                             'properties': {
+                                 'foo': {
+                                     'type': 'array',
+                                     'items': {
+                                         'properties': {
+                                             'value': {},
+                                         }
+                                     }
+                                 }
+                             }
+                         })
+
+    def test_overwrite(self):
+        schema = {'mergeStrategy': 'overwrite'}
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2, {})
+
+    def test_append(self):
+        schema = {'type': 'array',
+                  'mergeStrategy': 'append'}
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2, {'type': 'array'})
+
+    def test_version(self):
+        schema = {'mergeStrategy': 'version'}
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2,
+                         {
+                             'type': 'array',
+                             'items': {
+                                 'properties': {
+                                     'value': {}
+                                 }
+                             }
+                         })
+
+    def test_version_meta(self):
+        schema = {'type': 'object',
+                  'mergeStrategy': 'version'}
+
+        meta = {
+            'properties': {
+                'date': {},
+                'version': {}
+            }
+        }
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema(meta)
+
+        self.assertEqual(schema2,
+                         {
+                             'type': 'array',
+                             'items': {
+                                 'properties': {
+                                     'value': {'type': 'object'},
+                                     'date': {},
+                                     'version': {}
+                                 }
+                             }
+                         })
+
+    def test_version_limit(self):
+        schema = {'mergeStrategy': 'version',
+                  'mergeOptions': {'limit': 5}}
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2,
+                         {
+                             'type': 'array',
+                             'items': {
+                                 'properties': {
+                                     'value': {}
+                                 }
+                             },
+                             'maxItems': 5
+                         })
+
+    def test_object_merge_simple(self):
+        schema = {'mergeStrategy': 'objectMerge'}
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2, {})
+
+    def test_object_merge_nested(self):
+        schema = {'mergeStrategy': 'objectMerge',
+                  'properties': {
+                      'foo': {'mergeStrategy': 'version'}
+                  }}
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2,
+                         {
+                             'properties': {
+                                 'foo': {
+                                     'type': 'array',
+                                     'items': {
+                                         'properties': {
+                                             'value': {}
+                                         }
+                                     }
+                                 }
+                             }
+                         })
+
+    def test_oneof(self):
+
+        schema = {
+            'mergeStrategy': 'objectMerge',
+            'oneOf': [
+                {'properties': {'a': {}}},
+                {'properties': {'b': {}}}
+            ]
+        }
+
+        merger = jsonmerge.Merger(schema)
+        self.assertRaises(SchemaError, merger.get_schema)
+
+    def test_resolve_refs(self):
+
+        schema_1 = {
+            'id': 'http://example.com/schema_1.json',
+            '$ref': 'schema_2.json#/definitions/foo'
+        }
+
+        schema_2 = {
+            'id': 'http://example.com/schema_2.json',
+            'definitions': {
+                'foo': {
+                    'mergeStrategy': 'overwrite',
+                    'properties': {
+                        'bar': {
+                            '$ref': '#/definitions/baz'
+                        },
+                        'b': {}
+                    },
+                },
+                'baz': {
+                    'mergeStrategy': 'append'
+                }
+            }
+        }
+
+        merger = jsonmerge.Merger(schema_1)
+        merger.cache_schema(schema_2)
+
+        mschema = merger.get_schema()
+
+        d = {'bar': []}
+        jsonschema.validate(d, mschema)
+
+    def test_dont_resolve_refs(self):
+
+        schema = {
+            'id': 'http://example.com/schema_1.json',
+            'mergeStrategy': 'overwrite',
+            'properties': {
+                'foo': {
+                    '$ref': '#/definitions/bar'
+                }
+            },
+            'definitions': {
+                'bar': {
+                    'properties': {
+                        'baz': {}
+                    }
+                }
+            }
+        }
+
+        mschema_correct = dict(schema)
+        del mschema_correct['mergeStrategy']
+
+        merger = jsonmerge.Merger(schema)
+
+        mschema = merger.get_schema()
+
+        self.assertEqual(mschema_correct, mschema)
+
+    def test_reference_in_meta(self):
+
+        schema = {'mergeStrategy': 'version'}
+
+        meta_schema = {
+            'id': 'http://example.com/schema_1.json',
+            '$ref': 'schema_2.json#/definitions/meta'
+        }
+
+        schema_2 = {
+            'id': 'http://example.com/schema_2.json',
+            'definitions': {
+                'meta': {
+                    'properties': {
+                        'foo': {
+                            'type': 'string'
+                        }
+                    }
+                }
+            }
+        }
+
+        merger = jsonmerge.Merger(schema)
+        merger.cache_schema(schema_2)
+
+        mschema = merger.get_schema(meta=meta_schema)
+
+        self.assertEqual(mschema,
+                         {
+                             'type': 'array',
+                             'items': {
+                                 'properties': {
+                                     'value': {},
+                                     'foo': {'type': 'string'}
+                                 }
+                             }
+                         })
+
+    def test_array_in_schema(self):
+
+        schema_1 = {
+            'id': 'http://example.com/schema_1.json',
+            '$ref': 'schema_2.json#/definitions/foo'
+        }
+
+        schema_2 = {
+            'id': 'http://example.com/schema_2.json',
+            'definitions': {
+                'foo': {
+                    'mergeStrategy': 'overwrite',
+                    'enum': [
+                        "foo",
+                        "bar",
+                    ]
+                },
+            }
+        }
+
+        merger = jsonmerge.Merger(schema_1)
+        merger.cache_schema(schema_2)
+
+        mschema = merger.get_schema()
+
+        d = {
+            'enum': [
+                "foo",
+                "bar",
+            ]
+        }
+
+        self.assertEqual(d, mschema)
+
+    def test_version_adds_array_type(self):
+        schema = {
+            "type": "object",
+            "properties": {
+                "buyer": {
+                    "properties": {
+                        "id": {
+                            "type": "object",
+                            "properties": {
+                                "name": {
+                                    "type": "string",
+                                    "mergeStrategy": "version"
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        expected = {
+            "type": "object",
+            "properties": {
+                "buyer": {
+                    "properties": {
+                        "id": {
+                            "type": "object",
+                            "properties": {
+                                "name": {
+                                    "type": "array",
+                                    "items": {
+                                        "properties": {
+                                            "value": {
+                                                "type": "string"
+                                            }
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2, expected)
+
+    def test_merge_by_id(self):
+
+        schema = {
+            "mergeStrategy": "arrayMergeById",
+            "items": {
+                'type': 'object'
+            }
+        }
+
+        expected = {
+            "items": {
+                'type': 'object'
+            }
+        }
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2, expected)
+
+    def test_merge_by_id_with_depth(self):
+
+        schema = {
+            "properties": {
+                "test": {
+                    "mergeStrategy": "arrayMergeById",
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/refitem"
+                    }
+                }
+            },
+            "definitions": {
+                "refitem": {
+                    "type": "object",
+                    "properties": {
+                        "field1": {
+                            "type": "string",
+                            "mergeStrategy": "version"
+                        }
+                    }
+                }
+            }
+        }
+
+        expected = {
+            "properties": {
+                "test": {
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/refitem"
+                    }
+                }
+            },
+            "definitions": {
+                "refitem": {
+                    "type": "object",
+                    "properties": {
+                        "field1": {
+                            "type": "array",
+                            "items": {
+                                "properties": {
+                                    "value": {
+                                        "type": "string"
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2, expected)
+
+    def test_merge_by_id_with_depth_twice(self):
+
+        # Here were have a $ref that get_schema() should descend into twice.
+        #
+        # The way ArrayMergeById.get_schema() is currently implemented it
+        # expands any subschemas in #/definitions/refitem twice. By chance this
+        # currently results in the correct output, but it's not clear whether
+        # this is always the case. I can't currently find an example that
+        # breaks.
+
+        schema = {
+            "properties": {
+                "test": {
+                    "mergeStrategy": "arrayMergeById",
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/refitem"
+                    }
+                },
+                "test2": {
+                    "mergeStrategy": "arrayMergeById",
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/refitem"
+                    }
+                }
+            },
+            "definitions": {
+                "refitem": {
+                    "type": "object",
+                    "properties": {
+                        "field1": {
+                            "type": "string",
+                            "mergeStrategy": "version"
+                        }
+                    }
+                }
+            }
+        }
+
+        expected = {
+            "properties": {
+                "test": {
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/refitem"
+                    }
+                },
+                "test2": {
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/refitem"
+                    }
+                }
+            },
+            "definitions": {
+                "refitem": {
+                    "type": "object",
+                    "properties": {
+                        "field1": {
+                            "type": "array",
+                            "items": {
+                                "properties": {
+                                    "value": {
+                                        "type": "string"
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+        self.maxDiff = None
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2, expected)
+
+    def test_merge_by_id_with_depth_no_ref(self):
+        schema = {
+            "properties": {
+                "test": {
+                    "mergeStrategy": "arrayMergeById",
+                    "type": "array",
+                    "items": {
+                        "type": "object",
+                        "properties": {
+                            "field1": {
+                                "type": "string",
+                                "mergeStrategy": "version"
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        expected = {
+            "properties": {
+                "test": {
+                    "type": "array",
+                    "items": {
+                        "type": "object",
+                        "properties": {
+                            "field1": {
+                                "type": "array",
+                                "items": {
+                                    "properties": {
+                                        "value": {
+                                            "type": "string"
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            },
+        }
+
+        merger = jsonmerge.Merger(schema)
+        schema2 = merger.get_schema()
+
+        self.assertEqual(schema2, expected)
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonmerge-1.1.0/tests/test_readme.py
@@ -0,0 +1,4 @@
+import doctest
+
+def additional_tests():
+	return doctest.DocFileSuite("../README.rst")
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/CHANGELOG.rst
@@ -0,0 +1,149 @@
+v2.5.0
+------
+
+* Improved performance on CPython by adding caching around ref resolution
+  (#203)
+
+v2.4.0
+------
+
+* Added a CLI (#134)
+* Added absolute path and absolute schema path to errors (#120)
+* Added ``relevance``
+* Meta-schemas are now loaded via ``pkgutil``
+
+v2.3.0
+------
+
+* Added ``by_relevance`` and ``best_match`` (#91)
+* Fixed ``format`` to allow adding formats for non-strings (#125)
+* Fixed the ``uri`` format to reject URI references (#131)
+
+v2.2.0
+------
+
+* Compile the host name regex (#127)
+* Allow arbitrary objects to be types (#129)
+
+v2.1.0
+------
+
+* Support RFC 3339 datetimes in conformance with the spec
+* Fixed error paths for additionalItems + items (#122)
+* Fixed wording for min / maxProperties (#117)
+
+
+v2.0.0
+------
+
+* Added ``create`` and ``extend`` to ``jsonschema.validators``
+* Removed ``ValidatorMixin``
+* Fixed array indices ref resolution (#95)
+* Fixed unknown scheme defragmenting and handling (#102)
+
+
+v1.3.0
+------
+
+* Better error tracebacks (#83)
+* Raise exceptions in ``ErrorTree``\s for keys not in the instance (#92)
+* __cause__ (#93)
+
+
+v1.2.0
+------
+
+* More attributes for ValidationError (#86)
+* Added ``ValidatorMixin.descend``
+* Fixed bad ``RefResolutionError`` message (#82)
+
+
+v1.1.0
+------
+
+* Canonicalize URIs (#70)
+* Allow attaching exceptions to ``format`` errors (#77)
+
+
+v1.0.0
+------
+
+* Support for Draft 4
+* Support for format
+* Longs are ints too!
+* Fixed a number of issues with ``$ref`` support (#66)
+* Draft4Validator is now the default
+* ``ValidationError.path`` is now in sequential order
+* Added ``ValidatorMixin``
+
+
+v0.8.0
+------
+
+* Full support for JSON References
+* ``validates`` for registering new validators
+* Documentation
+* Bugfixes
+
+    * uniqueItems not so unique (#34)
+    * Improper any (#47)
+
+
+v0.7
+----
+
+* Partial support for (JSON Pointer) ``$ref``
+* Deprecations
+
+  * ``Validator`` is replaced by ``Draft3Validator`` with a slightly different
+    interface
+  * ``validator(meta_validate=False)``
+
+
+v0.6
+----
+
+* Bugfixes
+
+  * Issue #30 - Wrong behavior for the dependencies property validation
+  * Fix a miswritten test
+
+
+v0.5
+----
+
+* Bugfixes
+
+  * Issue #17 - require path for error objects
+  * Issue #18 - multiple type validation for non-objects
+
+
+v0.4
+----
+
+* Preliminary support for programmatic access to error details (Issue #5).
+  There are certainly some corner cases that don't do the right thing yet, but
+  this works mostly.
+
+    In order to make this happen (and also to clean things up a bit), a number
+    of deprecations are necessary:
+
+        * ``stop_on_error`` is deprecated in ``Validator.__init__``. Use 
+          ``Validator.iter_errors()`` instead.
+        * ``number_types`` and ``string_types`` are deprecated there as well.
+          Use ``types={"number" : ..., "string" : ...}`` instead.
+        * ``meta_validate`` is also deprecated, and instead is now accepted as
+          an argument to ``validate``, ``iter_errors`` and ``is_valid``.
+
+* A bugfix or two
+
+
+v0.3
+----
+
+* Default for unknown types and properties is now to *not* error (consistent
+  with the schema).
+* Python 3 support
+* Removed dependency on SecureTypes now that the hash bug has been resolved.
+* "Numerous bug fixes" -- most notably, a divisibleBy error for floats and a
+  bunch of missing typechecks for irrelevant properties.
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/COPYING
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Julian Berman
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/MANIFEST.in
@@ -0,0 +1,5 @@
+include *.rst
+include COPYING
+include tox.ini
+include version.txt
+recursive-include json *
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/PKG-INFO
@@ -0,0 +1,126 @@
+Metadata-Version: 1.1
+Name: jsonschema
+Version: 2.5.1
+Summary: An implementation of JSON Schema validation for Python
+Home-page: http://github.com/Julian/jsonschema
+Author: Julian Berman
+Author-email: Julian@GrayVines.com
+License: MIT
+Description: .. image:: https://img.shields.io/pypi/v/jsonschema.svg
+            :target: https://pypi.python.org/pypi/jsonschema
+        .. image:: https://travis-ci.org/Julian/jsonschema.svg?branch=master
+            :target: https://travis-ci.org/Julian/jsonschema
+        .. image:: https://img.shields.io/pypi/l/jsonschema.svg
+            :target: https://pypi.python.org/pypi/jsonschema
+        
+        ==========
+        jsonschema
+        ==========
+        
+        ``jsonschema`` is an implementation of `JSON Schema <http://json-schema.org>`_
+        for Python (supporting 2.6+ including Python 3).
+        
+        .. code-block:: python
+        
+            >>> from jsonschema import validate
+        
+            >>> # A sample schema, like what we'd get from json.load()
+            >>> schema = {
+            ...     "type" : "object",
+            ...     "properties" : {
+            ...         "price" : {"type" : "number"},
+            ...         "name" : {"type" : "string"},
+            ...     },
+            ... }
+        
+            >>> # If no exception is raised by validate(), the instance is valid.
+            >>> validate({"name" : "Eggs", "price" : 34.99}, schema)
+        
+            >>> validate(
+            ...     {"name" : "Eggs", "price" : "Invalid"}, schema
+            ... )                                   # doctest: +IGNORE_EXCEPTION_DETAIL
+            Traceback (most recent call last):
+                ...
+            ValidationError: 'Invalid' is not of type 'number'
+        
+        
+        Features
+        --------
+        
+        * Full support for
+          `Draft 3 <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.Draft3Validator>`_
+          **and** `Draft 4 <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.Draft4Validator>`_
+          of the schema.
+        
+        * `Lazy validation <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.IValidator.iter_errors>`_
+          that can iteratively report *all* validation errors.
+        
+        * Small and extensible
+        
+        * `Programmatic querying <https://python-jsonschema.readthedocs.org/en/latest/errors/#module-jsonschema>`_
+          of which properties or items failed validation.
+        
+        
+        Release Notes
+        -------------
+        
+        Version 2.5.0 is mainly a performance release. The interface for `RefResolver`
+        was extended to add methods that improve performance on CPython.
+        
+        Support for custom `RefResolver` objects with the legacy interface should *not*
+        be affected. If you notice something amiss please file an issue ticket.
+        
+        
+        Running the Test Suite
+        ----------------------
+        
+        If you have ``tox`` installed (perhaps via ``pip install tox`` or your
+        package manager), running``tox`` in the directory of your source checkout will
+        run ``jsonschema``'s test suite on all of the versions of Python ``jsonschema``
+        supports. Note that you'll need to have all of those versions installed in
+        order to run the tests on each of them, otherwise ``tox`` will skip (and fail)
+        the tests on that version.
+        
+        Of course you're also free to just run the tests on a single version with your
+        favorite test runner. The tests live in the ``jsonschema.tests`` package.
+        
+        
+        Community
+        ---------
+        
+        There's a `mailing list <https://groups.google.com/forum/#!forum/jsonschema>`_
+        for this implementation on Google Groups.
+        
+        Please join, and feel free to send questions there.
+        
+        
+        Contributing
+        ------------
+        
+        I'm Julian Berman.
+        
+        ``jsonschema`` is on `GitHub <http://github.com/Julian/jsonschema>`_.
+        
+        Get in touch, via GitHub or otherwise, if you've got something to contribute,
+        it'd be most welcome!
+        
+        You can also generally find me on Freenode (nick: ``tos9``) in various
+        channels, including ``#python``.
+        
+        If you feel overwhelmingly grateful, you can woo me with beer money on
+        `Gittip <https://www.gittip.com/Julian/>`_ or via Google Wallet with the email
+        in my GitHub profile.
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/README.rst
@@ -0,0 +1,104 @@
+.. image:: https://img.shields.io/pypi/v/jsonschema.svg
+    :target: https://pypi.python.org/pypi/jsonschema
+.. image:: https://travis-ci.org/Julian/jsonschema.svg?branch=master
+    :target: https://travis-ci.org/Julian/jsonschema
+.. image:: https://img.shields.io/pypi/l/jsonschema.svg
+    :target: https://pypi.python.org/pypi/jsonschema
+
+==========
+jsonschema
+==========
+
+``jsonschema`` is an implementation of `JSON Schema <http://json-schema.org>`_
+for Python (supporting 2.6+ including Python 3).
+
+.. code-block:: python
+
+    >>> from jsonschema import validate
+
+    >>> # A sample schema, like what we'd get from json.load()
+    >>> schema = {
+    ...     "type" : "object",
+    ...     "properties" : {
+    ...         "price" : {"type" : "number"},
+    ...         "name" : {"type" : "string"},
+    ...     },
+    ... }
+
+    >>> # If no exception is raised by validate(), the instance is valid.
+    >>> validate({"name" : "Eggs", "price" : 34.99}, schema)
+
+    >>> validate(
+    ...     {"name" : "Eggs", "price" : "Invalid"}, schema
+    ... )                                   # doctest: +IGNORE_EXCEPTION_DETAIL
+    Traceback (most recent call last):
+        ...
+    ValidationError: 'Invalid' is not of type 'number'
+
+
+Features
+--------
+
+* Full support for
+  `Draft 3 <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.Draft3Validator>`_
+  **and** `Draft 4 <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.Draft4Validator>`_
+  of the schema.
+
+* `Lazy validation <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.IValidator.iter_errors>`_
+  that can iteratively report *all* validation errors.
+
+* Small and extensible
+
+* `Programmatic querying <https://python-jsonschema.readthedocs.org/en/latest/errors/#module-jsonschema>`_
+  of which properties or items failed validation.
+
+
+Release Notes
+-------------
+
+Version 2.5.0 is mainly a performance release. The interface for `RefResolver`
+was extended to add methods that improve performance on CPython.
+
+Support for custom `RefResolver` objects with the legacy interface should *not*
+be affected. If you notice something amiss please file an issue ticket.
+
+
+Running the Test Suite
+----------------------
+
+If you have ``tox`` installed (perhaps via ``pip install tox`` or your
+package manager), running``tox`` in the directory of your source checkout will
+run ``jsonschema``'s test suite on all of the versions of Python ``jsonschema``
+supports. Note that you'll need to have all of those versions installed in
+order to run the tests on each of them, otherwise ``tox`` will skip (and fail)
+the tests on that version.
+
+Of course you're also free to just run the tests on a single version with your
+favorite test runner. The tests live in the ``jsonschema.tests`` package.
+
+
+Community
+---------
+
+There's a `mailing list <https://groups.google.com/forum/#!forum/jsonschema>`_
+for this implementation on Google Groups.
+
+Please join, and feel free to send questions there.
+
+
+Contributing
+------------
+
+I'm Julian Berman.
+
+``jsonschema`` is on `GitHub <http://github.com/Julian/jsonschema>`_.
+
+Get in touch, via GitHub or otherwise, if you've got something to contribute,
+it'd be most welcome!
+
+You can also generally find me on Freenode (nick: ``tos9``) in various
+channels, including ``#python``.
+
+If you feel overwhelmingly grateful, you can woo me with beer money on
+`Gittip <https://www.gittip.com/Julian/>`_ or via Google Wallet with the email
+in my GitHub profile.
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/.gitignore
@@ -0,0 +1,1 @@
+TODO
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/.travis.yml
@@ -0,0 +1,4 @@
+language: python
+python: "2.7"
+install: pip install jsonschema
+script: bin/jsonschema_suite check
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2012 Julian Berman
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/README.md
@@ -0,0 +1,138 @@
+JSON Schema Test Suite [![Build Status](https://travis-ci.org/json-schema/JSON-Schema-Test-Suite.png?branch=develop)](https://travis-ci.org/json-schema/JSON-Schema-Test-Suite)
+======================
+
+This repository contains a set of JSON objects that implementors of JSON Schema
+validation libraries can use to test their validators.
+
+It is meant to be language agnostic and should require only a JSON parser.
+
+The conversion of the JSON objects into tests within your test framework of
+choice is still the job of the validator implementor.
+
+Structure of a Test
+-------------------
+
+If you're going to use this suite, you need to know how tests are laid out. The
+tests are contained in the `tests` directory at the root of this repository.
+
+Inside that directory is a subdirectory for each draft or version of the
+schema. We'll use `draft3` as an example.
+
+If you look inside the draft directory, there are a number of `.json` files,
+which logically group a set of test cases together. Often the grouping is by
+property under test, but not always, especially within optional test files
+(discussed below).
+
+Inside each `.json` file is a single array containing objects. It's easiest to
+illustrate the structure of these with an example:
+
+```json
+    {
+        "description": "the description of the test case",
+        "schema": {"the schema that should" : "be validated against"},
+        "tests": [
+            {
+                "description": "a specific test of a valid instance",
+                "data": "the instance",
+                "valid": true
+            },
+            {
+                "description": "another specific test this time, invalid",
+                "data": 15,
+                "valid": false
+            }
+        ]
+    }
+```
+
+So a description, a schema, and some tests, where tests is an array containing
+one or more objects with descriptions, data, and a boolean indicating whether
+they should be valid or invalid.
+
+Coverage
+--------
+
+Draft 3 and 4 should have full coverage. If you see anything missing or think
+there is a useful test missing, please send a pull request or open an issue.
+
+Who Uses the Test Suite
+-----------------------
+
+This suite is being used by:
+
+### Coffeescript ###
+
+* [jsck](https://github.com/pandastrike/jsck)
+
+### Dart ###
+
+* [json_schema](https://github.com/patefacio/json_schema) 
+
+### Erlang ###
+
+* [jesse](https://github.com/klarna/jesse)
+
+### Go ###
+
+* [gojsonschema](https://github.com/sigu-399/gojsonschema) 
+
+### Haskell ###
+
+* [aeson-schema](https://github.com/timjb/aeson-schema)
+* [hjsonschema](https://github.com/seagreen/hjsonschema)
+
+### Java ###
+
+* [json-schema-validator](https://github.com/fge/json-schema-validator)
+
+### Javascript ###
+
+* [json-schema-benchmark](https://github.com/Muscula/json-schema-benchmark)
+* [direct-schema](https://github.com/IreneKnapp/direct-schema)
+* [is-my-json-valid](https://github.com/mafintosh/is-my-json-valid)
+* [jassi](https://github.com/iclanzan/jassi)
+* [JaySchema](https://github.com/natesilva/jayschema)
+* [json-schema-valid](https://github.com/ericgj/json-schema-valid)
+* [Jsonary](https://github.com/jsonary-js/jsonary)
+* [jsonschema](https://github.com/tdegrunt/jsonschema)
+* [request-validator](https://github.com/bugventure/request-validator)
+* [skeemas](https://github.com/Prestaul/skeemas)
+* [tv4](https://github.com/geraintluff/tv4)
+* [z-schema](https://github.com/zaggino/z-schema)
+* [jsen](https://github.com/bugventure/jsen)
+
+### .NET ###
+
+* [Newtonsoft.Json.Schema](https://github.com/JamesNK/Newtonsoft.Json.Schema)
+
+### PHP ###
+
+* [json-schema](https://github.com/justinrainbow/json-schema)
+
+### Python ###
+
+* [jsonschema](https://github.com/Julian/jsonschema)
+
+### Ruby ###
+
+* [json-schema](https://github.com/hoxworth/json-schema)
+
+### Rust ###
+
+* [valico](https://github.com/rustless/valico)
+
+### Swift ###
+
+* [JSONSchema](https://github.com/kylef/JSONSchema.swift)
+
+If you use it as well, please fork and send a pull request adding yourself to
+the list :).
+
+Contributing
+------------
+
+If you see something missing or incorrect, a pull request is most welcome!
+
+There are some sanity checks in place for testing the test suite. You can run
+them with `bin/jsonschema_suite check`. They will be run automatically by
+[Travis CI](https://travis-ci.org/) as well.
new file mode 100755
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/bin/jsonschema_suite
@@ -0,0 +1,283 @@
+#! /usr/bin/env python
+from __future__ import print_function
+import sys
+import textwrap
+
+try:
+    import argparse
+except ImportError:
+    print(textwrap.dedent("""
+        The argparse library could not be imported. jsonschema_suite requires
+        either Python 2.7 or for you to install argparse. You can do so by
+        running `pip install argparse`, `easy_install argparse` or by
+        downloading argparse and running `python2.6 setup.py install`.
+
+        See https://pypi.python.org/pypi/argparse for details.
+    """.strip("\n")))
+    sys.exit(1)
+
+import errno
+import fnmatch
+import json
+import os
+import random
+import shutil
+import unittest
+import warnings
+
+if getattr(unittest, "skipIf", None) is None:
+    unittest.skipIf = lambda cond, msg : lambda fn : fn
+
+try:
+    import jsonschema
+except ImportError:
+    jsonschema = None
+else:
+    validators = getattr(
+        jsonschema.validators, "validators", jsonschema.validators
+    )
+
+
+ROOT_DIR = os.path.join(
+    os.path.dirname(__file__), os.pardir).rstrip("__pycache__")
+SUITE_ROOT_DIR = os.path.join(ROOT_DIR, "tests")
+
+REMOTES = {
+    "integer.json": {"type": "integer"},
+    "subSchemas.json": {
+        "integer": {"type": "integer"},
+        "refToInteger": {"$ref": "#/integer"},
+    },
+    "folder/folderInteger.json": {"type": "integer"}
+}
+REMOTES_DIR = os.path.join(ROOT_DIR, "remotes")
+
+TESTSUITE_SCHEMA = {
+    "$schema": "http://json-schema.org/draft-03/schema#",
+    "type": "array",
+    "items": {
+        "type": "object",
+        "properties": {
+            "description": {"type": "string", "required": True},
+            "schema": {"required": True},
+            "tests": {
+                "type": "array",
+                "items": {
+                    "type": "object",
+                    "properties": {
+                        "description": {"type": "string", "required": True},
+                        "data": {"required": True},
+                        "valid": {"type": "boolean", "required": True}
+                    },
+                    "additionalProperties": False
+                },
+                "minItems": 1
+            }
+        },
+        "additionalProperties": False,
+        "minItems": 1
+    }
+}
+
+
+def files(paths):
+    for path in paths:
+        with open(path) as test_file:
+            yield json.load(test_file)
+
+
+def groups(paths):
+    for test_file in files(paths):
+        for group in test_file:
+            yield group
+
+
+def cases(paths):
+    for test_group in groups(paths):
+        for test in test_group["tests"]:
+            test["schema"] = test_group["schema"]
+            yield test
+
+
+def collect(root_dir):
+    for root, dirs, files in os.walk(root_dir):
+        for filename in fnmatch.filter(files, "*.json"):
+            yield os.path.join(root, filename)
+
+
+class SanityTests(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        print("Looking for tests in %s" % SUITE_ROOT_DIR)
+        cls.test_files = list(collect(SUITE_ROOT_DIR))
+        print("Found %s test files" % len(cls.test_files))
+        assert cls.test_files, "Didn't find the test files!"
+
+    def test_all_files_are_valid_json(self):
+        for path in self.test_files:
+            with open(path) as test_file:
+                try:
+                    json.load(test_file)
+                except ValueError as error:
+                    self.fail("%s contains invalid JSON (%s)" % (path, error))
+
+    def test_all_descriptions_have_reasonable_length(self):
+        for case in cases(self.test_files):
+            descript = case["description"]
+            self.assertLess(
+                len(descript),
+                60,
+                "%r is too long! (keep it to less than 60 chars)" % (descript,)
+            )
+
+    def test_all_descriptions_are_unique(self):
+        for group in groups(self.test_files):
+            descriptions = set(test["description"] for test in group["tests"])
+            self.assertEqual(
+                len(descriptions),
+                len(group["tests"]),
+                "%r contains a duplicate description" % (group,)
+            )
+
+    @unittest.skipIf(jsonschema is None, "Validation library not present!")
+    def test_all_schemas_are_valid(self):
+        for schema in os.listdir(SUITE_ROOT_DIR):
+            schema_validator = validators.get(schema)
+            if schema_validator is not None:
+                test_files = collect(os.path.join(SUITE_ROOT_DIR, schema))
+                for case in cases(test_files):
+                    try:
+                        schema_validator.check_schema(case["schema"])
+                    except jsonschema.SchemaError as error:
+                        self.fail("%s contains an invalid schema (%s)" %
+                                  (case, error))
+            else:
+                warnings.warn("No schema validator for %s" % schema)
+
+    @unittest.skipIf(jsonschema is None, "Validation library not present!")
+    def test_suites_are_valid(self):
+        validator = jsonschema.Draft3Validator(TESTSUITE_SCHEMA)
+        for tests in files(self.test_files):
+            try:
+                validator.validate(tests)
+            except jsonschema.ValidationError as error:
+                self.fail(str(error))
+
+    def test_remote_schemas_are_updated(self):
+        for url, schema in REMOTES.items():
+            filepath = os.path.join(REMOTES_DIR, url)
+            with open(filepath) as schema_file:
+                self.assertEqual(json.load(schema_file), schema)
+
+
+def main(arguments):
+    if arguments.command == "check":
+        suite = unittest.TestLoader().loadTestsFromTestCase(SanityTests)
+        result = unittest.TextTestRunner(verbosity=2).run(suite)
+        sys.exit(not result.wasSuccessful())
+    elif arguments.command == "flatten":
+        selected_cases = [case for case in cases(collect(arguments.version))]
+
+        if arguments.randomize:
+            random.shuffle(selected_cases)
+
+        json.dump(selected_cases, sys.stdout, indent=4, sort_keys=True)
+    elif arguments.command == "remotes":
+        json.dump(REMOTES, sys.stdout, indent=4, sort_keys=True)
+    elif arguments.command == "dump_remotes":
+        if arguments.update:
+            shutil.rmtree(arguments.out_dir, ignore_errors=True)
+
+        try:
+            os.makedirs(arguments.out_dir)
+        except OSError as e:
+            if e.errno == errno.EEXIST:
+                print("%s already exists. Aborting." % arguments.out_dir)
+                sys.exit(1)
+            raise
+
+        for url, schema in REMOTES.items():
+            filepath = os.path.join(arguments.out_dir, url)
+
+            try:
+                os.makedirs(os.path.dirname(filepath))
+            except OSError as e:
+                if e.errno != errno.EEXIST:
+                    raise
+
+            with open(filepath, "wb") as out_file:
+                json.dump(schema, out_file, indent=4, sort_keys=True)
+    elif arguments.command == "serve":
+        try:
+            from flask import Flask, jsonify
+        except ImportError:
+            print(textwrap.dedent("""
+                The Flask library is required to serve the remote schemas.
+
+                You can install it by running `pip install Flask`.
+
+                Alternatively, see the `jsonschema_suite remotes` or
+                `jsonschema_suite dump_remotes` commands to create static files
+                that can be served with your own web server.
+            """.strip("\n")))
+            sys.exit(1)
+
+        app = Flask(__name__)
+
+        @app.route("/<path:path>")
+        def serve_path(path):
+            if path in REMOTES:
+                return jsonify(REMOTES[path])
+            return "Document does not exist.", 404
+
+        app.run(port=1234)
+
+
+parser = argparse.ArgumentParser(
+    description="JSON Schema Test Suite utilities",
+)
+subparsers = parser.add_subparsers(help="utility commands", dest="command")
+
+check = subparsers.add_parser("check", help="Sanity check the test suite.")
+
+flatten = subparsers.add_parser(
+    "flatten",
+    help="Output a flattened file containing a selected version's test cases."
+)
+flatten.add_argument(
+    "--randomize",
+    action="store_true",
+    help="Randomize the order of the outputted cases.",
+)
+flatten.add_argument(
+    "version", help="The directory containing the version to output",
+)
+
+remotes = subparsers.add_parser(
+    "remotes",
+    help="Output the expected URLs and their associated schemas for remote "
+         "ref tests as a JSON object."
+)
+
+dump_remotes = subparsers.add_parser(
+    "dump_remotes", help="Dump the remote ref schemas into a file tree",
+)
+dump_remotes.add_argument(
+    "--update",
+    action="store_true",
+    help="Update the remotes in an existing directory.",
+)
+dump_remotes.add_argument(
+    "--out-dir",
+    default=REMOTES_DIR,
+    type=os.path.abspath,
+    help="The output directory to create as the root of the file tree",
+)
+
+serve = subparsers.add_parser(
+    "serve",
+    help="Start a webserver to serve schemas used by remote ref tests."
+)
+
+if __name__ == "__main__":
+    main(parser.parse_args())
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/remotes/folder/folderInteger.json
@@ -0,0 +1,3 @@
+{
+    "type": "integer"
+}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/remotes/integer.json
@@ -0,0 +1,3 @@
+{
+    "type": "integer"
+}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/remotes/subSchemas.json
@@ -0,0 +1,8 @@
+{
+    "integer": {
+        "type": "integer"
+    }, 
+    "refToInteger": {
+        "$ref": "#/integer"
+    }
+}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/additionalItems.json
@@ -0,0 +1,82 @@
+[
+    {
+        "description": "additionalItems as schema",
+        "schema": {
+            "items": [],
+            "additionalItems": {"type": "integer"}
+        },
+        "tests": [
+            {
+                "description": "additional items match schema",
+                "data": [ 1, 2, 3, 4 ],
+                "valid": true
+            },
+            {
+                "description": "additional items do not match schema",
+                "data": [ 1, 2, 3, "foo" ],
+                "valid": false
+            }
+        ]
+    },
+    {
+        "description": "items is schema, no additionalItems",
+        "schema": {
+            "items": {},
+            "additionalItems": false
+        },
+        "tests": [
+            {
+                "description": "all items match schema",
+                "data": [ 1, 2, 3, 4, 5 ],
+                "valid": true
+            }
+        ]
+    },
+    {
+        "description": "array of items with no additionalItems",
+        "schema": {
+            "items": [{}, {}, {}],
+            "additionalItems": false
+        },
+        "tests": [
+            {
+                "description": "no additional items present",
+                "data": [ 1, 2, 3 ],
+                "valid": true
+            },
+            {
+                "description": "additional items are not permitted",
+                "data": [ 1, 2, 3, 4 ],
+                "valid": false
+            }
+        ]
+    },
+    {
+        "description": "additionalItems as false without items",
+        "schema": {"additionalItems": false},
+        "tests": [
+            {
+                "description":
+                    "items defaults to empty schema so everything is valid",
+                "data": [ 1, 2, 3, 4, 5 ],
+                "valid": true
+            },
+            {
+                "description": "ignores non-arrays",
+                "data": {"foo" : "bar"},
+                "valid": true
+            }
+        ]
+    },
+    {
+        "description": "additionalItems are allowed by default",
+        "schema": {"items": []},
+        "tests": [
+            {
+                "description": "only the first items are validated",
+                "data": [1, "foo", false],
+                "valid": true
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/additionalProperties.json
@@ -0,0 +1,88 @@
+[
+    {
+        "description":
+            "additionalProperties being false does not allow other properties",
+        "schema": {
+            "properties": {"foo": {}, "bar": {}},
+            "patternProperties": { "^v": {} },
+            "additionalProperties": false
+        },
+        "tests": [
+            {
+                "description": "no additional properties is valid",
+                "data": {"foo": 1},
+                "valid": true
+            },
+            {
+                "description": "an additional property is invalid",
+                "data": {"foo" : 1, "bar" : 2, "quux" : "boom"},
+                "valid": false
+            },
+            {
+                "description": "ignores non-objects",
+                "data": [1, 2, 3],
+                "valid": true
+            },
+            {
+                "description": "patternProperties are not additional properties",
+                "data": {"foo":1, "vroom": 2},
+                "valid": true
+            }
+        ]
+    },
+    {
+        "description":
+            "additionalProperties allows a schema which should validate",
+        "schema": {
+            "properties": {"foo": {}, "bar": {}},
+            "additionalProperties": {"type": "boolean"}
+        },
+        "tests": [
+            {
+                "description": "no additional properties is valid",
+                "data": {"foo": 1},
+                "valid": true
+            },
+            {
+                "description": "an additional valid property is valid",
+                "data": {"foo" : 1, "bar" : 2, "quux" : true},
+                "valid": true
+            },
+            {
+                "description": "an additional invalid property is invalid",
+                "data": {"foo" : 1, "bar" : 2, "quux" : 12},
+                "valid": false
+            }
+        ]
+    },
+    {
+        "description":
+            "additionalProperties can exist by itself",
+        "schema": {
+            "additionalProperties": {"type": "boolean"}
+        },
+        "tests": [
+            {
+                "description": "an additional valid property is valid",
+                "data": {"foo" : true},
+                "valid": true
+            },
+            {
+                "description": "an additional invalid property is invalid",
+                "data": {"foo" : 1},
+                "valid": false
+            }
+        ]
+    },
+    {
+        "description": "additionalProperties are allowed by default",
+        "schema": {"properties": {"foo": {}, "bar": {}}},
+        "tests": [
+            {
+                "description": "additional properties are allowed",
+                "data": {"foo": 1, "bar": 2, "quux": true},
+                "valid": true
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/default.json
@@ -0,0 +1,49 @@
+[
+    {
+        "description": "invalid type for default",
+        "schema": {
+            "properties": {
+                "foo": {
+                    "type": "integer",
+                    "default": []
+                }
+            }
+        },
+        "tests": [
+            {
+                "description": "valid when property is specified",
+                "data": {"foo": 13},
+                "valid": true
+            },
+            {
+                "description": "still valid when the invalid default is used",
+                "data": {},
+                "valid": true
+            }
+        ]
+    },
+    {
+        "description": "invalid string value for default",
+        "schema": {
+            "properties": {
+                "bar": {
+                    "type": "string",
+                    "minLength": 4,
+                    "default": "bad"
+                }
+            }
+        },
+        "tests": [
+            {
+                "description": "valid when property is specified",
+                "data": {"bar": "good"},
+                "valid": true
+            },
+            {
+                "description": "still valid when the invalid default is used",
+                "data": {},
+                "valid": true
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/dependencies.json
@@ -0,0 +1,108 @@
+[
+    {
+        "description": "dependencies",
+        "schema": {
+            "dependencies": {"bar": "foo"}
+        },
+        "tests": [
+            {
+                "description": "neither",
+                "data": {},
+                "valid": true
+            },
+            {
+                "description": "nondependant",
+                "data": {"foo": 1},
+                "valid": true
+            },
+            {
+                "description": "with dependency",
+                "data": {"foo": 1, "bar": 2},
+                "valid": true
+            },
+            {
+                "description": "missing dependency",
+                "data": {"bar": 2},
+                "valid": false
+            },
+            {
+                "description": "ignores non-objects",
+                "data": "foo",
+                "valid": true
+            }
+        ]
+    },
+    {
+        "description": "multiple dependencies",
+        "schema": {
+            "dependencies": {"quux": ["foo", "bar"]}
+        },
+        "tests": [
+            {
+                "description": "neither",
+                "data": {},
+                "valid": true
+            },
+            {
+                "description": "nondependants",
+                "data": {"foo": 1, "bar": 2},
+                "valid": true
+            },
+            {
+                "description": "with dependencies",
+                "data": {"foo": 1, "bar": 2, "quux": 3},
+                "valid": true
+            },
+            {
+                "description": "missing dependency",
+                "data": {"foo": 1, "quux": 2},
+                "valid": false
+            },
+            {
+                "description": "missing other dependency",
+                "data": {"bar": 1, "quux": 2},
+                "valid": false
+            },
+            {
+                "description": "missing both dependencies",
+                "data": {"quux": 1},
+                "valid": false
+            }
+        ]
+    },
+    {
+        "description": "multiple dependencies subschema",
+        "schema": {
+            "dependencies": {
+                "bar": {
+                    "properties": {
+                        "foo": {"type": "integer"},
+                        "bar": {"type": "integer"}
+                    }
+                }
+            }
+        },
+        "tests": [
+            {
+                "description": "valid",
+                "data": {"foo": 1, "bar": 2},
+                "valid": true
+            },
+            {
+                "description": "wrong type",
+                "data": {"foo": "quux", "bar": 2},
+                "valid": false
+            },
+            {
+                "description": "wrong type other",
+                "data": {"foo": 2, "bar": "quux"},
+                "valid": false
+            },
+            {
+                "description": "wrong type both",
+                "data": {"foo": "quux", "bar": "quux"},
+                "valid": false
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/disallow.json
@@ -0,0 +1,80 @@
+[
+    {
+        "description": "disallow",
+        "schema": {
+            "disallow": "integer"
+        },
+        "tests": [
+            {
+                "description": "allowed",
+                "data": "foo",
+                "valid": true
+            },
+            {
+                "description": "disallowed",
+                "data": 1,
+                "valid": false
+            }
+        ]
+    },
+    {
+        "description": "multiple disallow",
+        "schema": {
+            "disallow": ["integer", "boolean"]
+        },
+        "tests": [
+            {
+                "description": "valid",
+                "data": "foo",
+                "valid": true
+            },
+            {
+                "description": "mismatch",
+                "data": 1,
+                "valid": false
+            },
+            {
+                "description": "other mismatch",
+                "data": true,
+                "valid": false
+            }
+        ]
+    },
+    {
+        "description": "multiple disallow subschema",
+        "schema": {
+            "disallow":
+                ["string",
+                 {
+                    "type": "object",
+                    "properties": {
+                        "foo": {
+                            "type": "string"
+                        }
+                    }
+                 }]
+        },
+        "tests": [
+            {
+                "description": "match",
+                "data": 1,
+                "valid": true
+            },
+            {
+                "description": "other match",
+                "data": {"foo": 1},
+                "valid": true
+            },
+            {
+                "description": "mismatch",
+                "data": "foo",
+                "valid": false
+            },
+            {
+                "description": "other mismatch",
+                "data": {"foo": "bar"},
+                "valid": false
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/divisibleBy.json
@@ -0,0 +1,60 @@
+[
+    {
+        "description": "by int",
+        "schema": {"divisibleBy": 2},
+        "tests": [
+            {
+                "description": "int by int",
+                "data": 10,
+                "valid": true
+            },
+            {
+                "description": "int by int fail",
+                "data": 7,
+                "valid": false
+            },
+            {
+                "description": "ignores non-numbers",
+                "data": "foo",
+                "valid": true
+            }
+        ]
+    },
+    {
+        "description": "by number",
+        "schema": {"divisibleBy": 1.5},
+        "tests": [
+            {
+                "description": "zero is divisible by anything (except 0)",
+                "data": 0,
+                "valid": true
+            },
+            {
+                "description": "4.5 is divisible by 1.5",
+                "data": 4.5,
+                "valid": true
+            },
+            {
+                "description": "35 is not divisible by 1.5",
+                "data": 35,
+                "valid": false
+            }
+        ]
+    },
+    {
+        "description": "by small number",
+        "schema": {"divisibleBy": 0.0001},
+        "tests": [
+            {
+                "description": "0.0075 is divisible by 0.0001",
+                "data": 0.0075,
+                "valid": true
+            },
+            {
+                "description": "0.00751 is not divisible by 0.0001",
+                "data": 0.00751,
+                "valid": false
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/enum.json
@@ -0,0 +1,71 @@
+[
+    {
+        "description": "simple enum validation",
+        "schema": {"enum": [1, 2, 3]},
+        "tests": [
+            {
+                "description": "one of the enum is valid",
+                "data": 1,
+                "valid": true
+            },
+            {
+                "description": "something else is invalid",
+                "data": 4,
+                "valid": false
+            }
+        ]
+    },
+    {
+        "description": "heterogeneous enum validation",
+        "schema": {"enum": [6, "foo", [], true, {"foo": 12}]},
+        "tests": [
+            {
+                "description": "one of the enum is valid",
+                "data": [],
+                "valid": true
+            },
+            {
+                "description": "something else is invalid",
+                "data": null,
+                "valid": false
+            },
+            {
+                "description": "objects are deep compared",
+                "data": {"foo": false},
+                "valid": false
+            }
+        ]
+    },
+    {
+        "description": "enums in properties",
+        "schema": {
+           "type":"object",
+		     "properties": {
+		        "foo": {"enum":["foo"]},
+		        "bar": {"enum":["bar"], "required":true}
+		     }
+		  },
+        "tests": [
+            {
+                "description": "both properties are valid",
+                "data": {"foo":"foo", "bar":"bar"},
+                "valid": true
+            },
+            {
+                "description": "missing optional property is valid",
+                "data": {"bar":"bar"},
+                "valid": true
+            },
+            {
+                "description": "missing required property is invalid",
+                "data": {"foo":"foo"},
+                "valid": false
+            },
+            {
+                "description": "missing all properties is invalid",
+                "data": {},
+                "valid": false
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/extends.json
@@ -0,0 +1,94 @@
+[
+    {
+        "description": "extends",
+        "schema": {
+            "properties": {"bar": {"type": "integer", "required": true}},
+            "extends": {
+                "properties": {
+                    "foo": {"type": "string", "required": true}
+                }
+            }
+        },
+        "tests": [
+            {
+                "description": "extends",
+                "data": {"foo": "baz", "bar": 2},
+                "valid": true
+            },
+            {
+                "description": "mismatch extends",
+                "data": {"foo": "baz"},
+                "valid": false
+            },
+            {
+                "description": "mismatch extended",
+                "data": {"bar": 2},
+                "valid": false
+            },
+            {
+                "description": "wrong type",
+                "data": {"foo": "baz", "bar": "quux"},
+                "valid": false
+            }
+        ]
+    },
+    {
+        "description": "multiple extends",
+        "schema": {
+            "properties": {"bar": {"type": "integer", "required": true}},
+            "extends" : [
+                {
+                    "properties": {
+                        "foo": {"type": "string", "required": true}
+                    }
+                },
+                {
+                    "properties": {
+                        "baz": {"type": "null", "required": true}
+                    }
+                }
+            ]
+        },
+        "tests": [
+            {
+                "description": "valid",
+                "data": {"foo": "quux", "bar": 2, "baz": null},
+                "valid": true
+            },
+            {
+                "description": "mismatch first extends",
+                "data": {"bar": 2, "baz": null},
+                "valid": false
+            },
+            {
+                "description": "mismatch second extends",
+                "data": {"foo": "quux", "bar": 2},
+                "valid": false
+            },
+            {
+                "description": "mismatch both",
+                "data": {"bar": 2},
+                "valid": false
+            }
+        ]
+    },
+    {
+        "description": "extends simple types",
+        "schema": {
+            "minimum": 20,
+            "extends": {"maximum": 30}
+        },
+        "tests": [
+            {
+                "description": "valid",
+                "data": 25,
+                "valid": true
+            },
+            {
+                "description": "mismatch extends",
+                "data": 35,
+                "valid": false
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/items.json
@@ -0,0 +1,46 @@
+[
+    {
+        "description": "a schema given for items",
+        "schema": {
+            "items": {"type": "integer"}
+        },
+        "tests": [
+            {
+                "description": "valid items",
+                "data": [ 1, 2, 3 ],
+                "valid": true
+            },
+            {
+                "description": "wrong type of items",
+                "data": [1, "x"],
+                "valid": false
+            },
+            {
+                "description": "ignores non-arrays",
+                "data": {"foo" : "bar"},
+                "valid": true
+            }
+        ]
+    },
+    {
+        "description": "an array of schemas for items",
+        "schema": {
+            "items": [
+                {"type": "integer"},
+                {"type": "string"}
+            ]
+        },
+        "tests": [
+            {
+                "description": "correct types",
+                "data": [ 1, "foo" ],
+                "valid": true
+            },
+            {
+                "description": "wrong types",
+                "data": [ "foo", 1 ],
+                "valid": false
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/maxItems.json
@@ -0,0 +1,28 @@
+[
+    {
+        "description": "maxItems validation",
+        "schema": {"maxItems": 2},
+        "tests": [
+            {
+                "description": "shorter is valid",
+                "data": [1],
+                "valid": true
+            },
+            {
+                "description": "exact length is valid",
+                "data": [1, 2],
+                "valid": true
+            },
+            {
+                "description": "too long is invalid",
+                "data": [1, 2, 3],
+                "valid": false
+            },
+            {
+                "description": "ignores non-arrays",
+                "data": "foobar",
+                "valid": true
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/maxLength.json
@@ -0,0 +1,33 @@
+[
+    {
+        "description": "maxLength validation",
+        "schema": {"maxLength": 2},
+        "tests": [
+            {
+                "description": "shorter is valid",
+                "data": "f",
+                "valid": true
+            },
+            {
+                "description": "exact length is valid",
+                "data": "fo",
+                "valid": true
+            },
+            {
+                "description": "too long is invalid",
+                "data": "foo",
+                "valid": false
+            },
+            {
+                "description": "ignores non-strings",
+                "data": 10,
+                "valid": true
+            },
+            {
+                "description": "two supplementary Unicode code points is long enough",
+                "data": "\uD83D\uDCA9\uD83D\uDCA9",
+                "valid": true
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/maximum.json
@@ -0,0 +1,42 @@
+[
+    {
+        "description": "maximum validation",
+        "schema": {"maximum": 3.0},
+        "tests": [
+            {
+                "description": "below the maximum is valid",
+                "data": 2.6,
+                "valid": true
+            },
+            {
+                "description": "above the maximum is invalid",
+                "data": 3.5,
+                "valid": false
+            },
+            {
+                "description": "ignores non-numbers",
+                "data": "x",
+                "valid": true
+            }
+        ]
+    },
+    {
+        "description": "exclusiveMaximum validation",
+        "schema": {
+            "maximum": 3.0,
+            "exclusiveMaximum": true
+        },
+        "tests": [
+            {
+                "description": "below the maximum is still valid",
+                "data": 2.2,
+                "valid": true
+            },
+            {
+                "description": "boundary point is invalid",
+                "data": 3.0,
+                "valid": false
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/minItems.json
@@ -0,0 +1,28 @@
+[
+    {
+        "description": "minItems validation",
+        "schema": {"minItems": 1},
+        "tests": [
+            {
+                "description": "longer is valid",
+                "data": [1, 2],
+                "valid": true
+            },
+            {
+                "description": "exact length is valid",
+                "data": [1],
+                "valid": true
+            },
+            {
+                "description": "too short is invalid",
+                "data": [],
+                "valid": false
+            },
+            {
+                "description": "ignores non-arrays",
+                "data": "",
+                "valid": true
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/minLength.json
@@ -0,0 +1,33 @@
+[
+    {
+        "description": "minLength validation",
+        "schema": {"minLength": 2},
+        "tests": [
+            {
+                "description": "longer is valid",
+                "data": "foo",
+                "valid": true
+            },
+            {
+                "description": "exact length is valid",
+                "data": "fo",
+                "valid": true
+            },
+            {
+                "description": "too short is invalid",
+                "data": "f",
+                "valid": false
+            },
+            {
+                "description": "ignores non-strings",
+                "data": 1,
+                "valid": true
+            },
+            {
+                "description": "one supplementary Unicode code point is not long enough",
+                "data": "\uD83D\uDCA9",
+                "valid": false
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/minimum.json
@@ -0,0 +1,42 @@
+[
+    {
+        "description": "minimum validation",
+        "schema": {"minimum": 1.1},
+        "tests": [
+            {
+                "description": "above the minimum is valid",
+                "data": 2.6,
+                "valid": true
+            },
+            {
+                "description": "below the minimum is invalid",
+                "data": 0.6,
+                "valid": false
+            },
+            {
+                "description": "ignores non-numbers",
+                "data": "x",
+                "valid": true
+            }
+        ]
+    },
+    {
+        "description": "exclusiveMinimum validation",
+        "schema": {
+            "minimum": 1.1,
+            "exclusiveMinimum": true
+        },
+        "tests": [
+            {
+                "description": "above the minimum is still valid",
+                "data": 1.2,
+                "valid": true
+            },
+            {
+                "description": "boundary point is invalid",
+                "data": 1.1,
+                "valid": false
+            }
+        ]
+    }
+]
new file mode 100644
--- /dev/null
+++ b/lib/python/vendor/jsonschema-2.5.1/json/tests/draft3/optional/bignum.json
@@ -0,0 +1,107 @@
+[
+    {
+        "description": "integer",
+        "schema": {"type": "integer"},
+        "tests": [
+            {
+                "description": "a bignum is an integer",
+                "data": 12345678910111213141516171819202122232425262728293031,
+                "valid": true
+            }
+        ]
+    },
+    {
+        "description": "number",
+        "schema": {"type": "number"},
+        "tests": [
+            {
+                "description": "a bignum is a number",
+                "data": 98249283749234923498293171823948729348710298301928331,
+                "valid": true
+            }
+        ]
+    },
+    {
+        "description": "integer",
+        "schema": {"type": "integer"},
+        "tests": [
+            {
+                "description": "a negative bignum is an integer",
+                "data": -12345678910111213141516171819202122232425262728293031,
+                "valid": true
+            }