Backout 7634808d94af (bug 703660) for Linux64 PGO build failures; a=khuey
authorEd Morley <bmo@edmorley.co.uk>
Tue, 22 Nov 2011 11:54:50 +0000
changeset 80635 30d495095e2b83e03355c09d724e403ce0258e2d
parent 80634 3a00a7c1227ffcabf49ef00c843fb0a95147b93b
child 80636 81048a4acdfdd994b31228441323692488405b75
push id21515
push userbmo@edmorley.co.uk
push dateTue, 22 Nov 2011 15:53:31 +0000
treeherdermozilla-central@6f998cc964be [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerskhuey
bugs703660
milestone11.0a1
backs out7634808d94af205830767ea4ac5581b13b92c4b9
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backout 7634808d94af (bug 703660) for Linux64 PGO build failures; a=khuey
dom/indexedDB/IDBFactory.cpp
dom/indexedDB/IDBObjectStore.cpp
dom/indexedDB/IDBObjectStore.h
dom/indexedDB/IndexedDatabase.h
dom/indexedDB/OpenDatabaseHelper.cpp
dom/indexedDB/OpenDatabaseHelper.h
other-licenses/snappy/Makefile.in
other-licenses/snappy/README
other-licenses/snappy/snappy-stubs-public.h
other-licenses/snappy/src/AUTHORS
other-licenses/snappy/src/COPYING
other-licenses/snappy/src/ChangeLog
other-licenses/snappy/src/NEWS
other-licenses/snappy/src/README
other-licenses/snappy/src/format_description.txt
other-licenses/snappy/src/snappy-c.cc
other-licenses/snappy/src/snappy-c.h
other-licenses/snappy/src/snappy-internal.h
other-licenses/snappy/src/snappy-sinksource.cc
other-licenses/snappy/src/snappy-sinksource.h
other-licenses/snappy/src/snappy-stubs-internal.cc
other-licenses/snappy/src/snappy-stubs-internal.h
other-licenses/snappy/src/snappy-stubs-public.h.in
other-licenses/snappy/src/snappy-test.cc
other-licenses/snappy/src/snappy-test.h
other-licenses/snappy/src/snappy.cc
other-licenses/snappy/src/snappy.h
other-licenses/snappy/src/snappy_unittest.cc
toolkit/content/license.html
toolkit/library/Makefile.in
toolkit/toolkit-tiers.mk
--- a/dom/indexedDB/IDBFactory.cpp
+++ b/dom/indexedDB/IDBFactory.cpp
@@ -137,16 +137,26 @@ IDBFactory::GetConnection(const nsAStrin
     do_GetService(MOZ_STORAGE_SERVICE_CONTRACTID);
   NS_ENSURE_TRUE(ss, nsnull);
 
   nsCOMPtr<mozIStorageConnection> connection;
   rv = ss->OpenDatabaseWithVFS(dbFile, NS_LITERAL_CSTRING("quota"),
                                getter_AddRefs(connection));
   NS_ENSURE_SUCCESS(rv, nsnull);
 
+#ifdef DEBUG
+  {
+    // Check to make sure that the database schema is correct again.
+    PRInt32 schemaVersion;
+    NS_ASSERTION(NS_SUCCEEDED(connection->GetSchemaVersion(&schemaVersion)) &&
+                 schemaVersion == DB_SCHEMA_VERSION,
+                 "Wrong schema!");
+  }
+#endif
+
   // Turn on foreign key constraints!
   rv = connection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
     "PRAGMA foreign_keys = ON;"
   ));
   NS_ENSURE_SUCCESS(rv, nsnull);
 
   return connection.forget();
 }
--- a/dom/indexedDB/IDBObjectStore.cpp
+++ b/dom/indexedDB/IDBObjectStore.cpp
@@ -45,17 +45,16 @@
 #include "mozilla/storage.h"
 #include "nsCharSeparatedTokenizer.h"
 #include "nsContentUtils.h"
 #include "nsDOMClassInfo.h"
 #include "nsEventDispatcher.h"
 #include "nsJSUtils.h"
 #include "nsServiceManagerUtils.h"
 #include "nsThreadUtils.h"
-#include "snappy/snappy.h"
 
 #include "AsyncConnectionHelper.h"
 #include "IDBCursor.h"
 #include "IDBEvents.h"
 #include "IDBIndex.h"
 #include "IDBKeyRange.h"
 #include "IDBTransaction.h"
 #include "DatabaseInfo.h"
@@ -568,30 +567,33 @@ IDBObjectStore::IsValidKeyPath(JSContext
     return false;
   }
 
   return true;
 }
 
 // static
 nsresult
-IDBObjectStore::GetKeyPathValueFromStructuredData(
-                                     const JSAutoStructuredCloneBuffer& aBuffer,
-                                     const nsAString& aKeyPath,
-                                     JSContext* aCx,
-                                     Key& aValue)
+IDBObjectStore::GetKeyPathValueFromStructuredData(const PRUint8* aData,
+                                                  PRUint32 aDataLength,
+                                                  const nsAString& aKeyPath,
+                                                  JSContext* aCx,
+                                                  Key& aValue)
 {
-  NS_ASSERTION(aBuffer.data(), "Null pointer!");
+  NS_ASSERTION(aData, "Null pointer!");
+  NS_ASSERTION(aDataLength, "Empty data!");
   NS_ASSERTION(!aKeyPath.IsEmpty(), "Empty keyPath!");
   NS_ASSERTION(aCx, "Null pointer!");
 
   JSAutoRequest ar(aCx);
 
   jsval clone;
-  if (!aBuffer.read(aCx, &clone, NULL, NULL)) {
+  if (!JS_ReadStructuredClone(aCx, reinterpret_cast<const uint64*>(aData),
+                              aDataLength, JS_STRUCTURED_CLONE_VERSION,
+                              &clone, NULL, NULL)) {
     return NS_ERROR_DOM_DATA_CLONE_ERR;
   }
 
   if (JSVAL_IS_PRIMITIVE(clone)) {
     // This isn't an object, so just leave the key unset.
     aValue.Unset();
     return NS_OK;
   }
@@ -774,41 +776,22 @@ IDBObjectStore::GetStructuredCloneDataFr
   {
     PRInt32 valueType;
     NS_ASSERTION(NS_SUCCEEDED(aStatement->GetTypeOfIndex(aIndex, &valueType)) &&
                  valueType == mozIStorageStatement::VALUE_TYPE_BLOB,
                  "Bad value type!");
   }
 #endif
 
-  const PRUint8* blobData;
-  PRUint32 blobDataLength;
-  nsresult rv = aStatement->GetSharedBlob(aIndex, &blobDataLength, &blobData);
+  const PRUint8* data;
+  PRUint32 dataLength;
+  nsresult rv = aStatement->GetSharedBlob(aIndex, &dataLength, &data);
   NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
-  const char* compressed = reinterpret_cast<const char*>(blobData);
-  size_t compressedLength = size_t(blobDataLength);
-
-  size_t uncompressedLength;
-  if (!snappy::GetUncompressedLength(compressed, compressedLength,
-                                     &uncompressedLength)) {
-    NS_WARNING("Snappy can't determine uncompressed length!");
-    return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
-  }
-
-  nsAutoArrayPtr<char> uncompressed(new char[uncompressedLength]);
-
-  if (!snappy::RawUncompress(compressed, compressedLength,
-                             uncompressed.get())) {
-    NS_WARNING("Snappy can't determine uncompressed length!");
-    return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
-  }
-
-  return aBuffer.copy(reinterpret_cast<const uint64_t *>(uncompressed.get()),
-                      uncompressedLength) ?
+  return aBuffer.copy(reinterpret_cast<const uint64_t *>(data), dataLength) ?
          NS_OK :
          NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
 }
 
 // static
 void
 IDBObjectStore::ClearStructuredCloneBuffer(JSAutoStructuredCloneBuffer& aBuffer)
 {
@@ -1659,51 +1642,21 @@ AddHelper::DoDatabaseWork(mozIStorageCon
 
   if (!autoIncrement || mayOverwrite) {
     NS_ASSERTION(!mKey.IsUnset(), "This shouldn't happen!");
 
     rv = mKey.BindToStatement(stmt, keyValue);
     NS_ENSURE_SUCCESS(rv, rv);
   }
 
-  NS_NAMED_LITERAL_CSTRING(data, "data");
-
-  // This will hold our compressed data until the end of the method. The
-  // BindBlobByName function will copy it.
-  nsAutoArrayPtr<char> compressed;
-
-  // This points to the compressed buffer.
-  const PRUint8* dataBuffer = nsnull;
-  size_t dataBufferLength = 0;
-
-  // If we're going to modify the buffer later to add a key property on an
-  // autoIncrement objectStore then we will wait to compress our data until we
-  // have the appropriate key value.
-  if (autoIncrement && !mOverwrite && !keyPath.IsEmpty() && unsetKey) {
-    rv = stmt->BindInt32ByName(data, 0);
-    NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
-  }
-  else {
-    // Compress the bytes before adding into the database.
-    const char* uncompressed =
-      reinterpret_cast<const char*>(mCloneBuffer.data());
-    size_t uncompressedLength = mCloneBuffer.nbytes();
-
-    size_t compressedLength = snappy::MaxCompressedLength(uncompressedLength);
-    compressed = new char[compressedLength];
-
-    snappy::RawCompress(uncompressed, uncompressedLength, compressed.get(),
-                        &compressedLength);
-
-    dataBuffer = reinterpret_cast<const PRUint8*>(compressed.get());
-    dataBufferLength = compressedLength;
-
-    rv = stmt->BindBlobByName(data, dataBuffer, dataBufferLength);
-    NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
-  }
+  const PRUint8* buffer = reinterpret_cast<const PRUint8*>(mCloneBuffer.data());
+  size_t bufferLength = mCloneBuffer.nbytes();
+
+  rv = stmt->BindBlobByName(NS_LITERAL_CSTRING("data"), buffer, bufferLength);
+  NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
   rv = stmt->Execute();
   if (NS_FAILED(rv)) {
     if (mayOverwrite && rv == NS_ERROR_STORAGE_CONSTRAINT) {
       scoper.Abandon();
 
       rv = stmt->Reset();
       NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
@@ -1716,19 +1669,18 @@ AddHelper::DoDatabaseWork(mozIStorageCon
       rv = stmt->BindInt64ByName(NS_LITERAL_CSTRING("osid"), osid);
       NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
       NS_ASSERTION(!mKey.IsUnset(), "This shouldn't happen!");
 
       rv = mKey.BindToStatement(stmt, keyValue);
       NS_ENSURE_SUCCESS(rv, rv);
 
-      NS_ASSERTION(dataBuffer && dataBufferLength, "These should be set!");
-
-      rv = stmt->BindBlobByName(data, dataBuffer, dataBufferLength);
+      rv = stmt->BindBlobByName(NS_LITERAL_CSTRING("data"), buffer,
+                                bufferLength);
       NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
       rv = stmt->Execute();
     }
 
     if (NS_FAILED(rv)) {
       return NS_ERROR_DOM_INDEXEDDB_CONSTRAINT_ERR;
     }
@@ -1772,33 +1724,21 @@ AddHelper::DoDatabaseWork(mozIStorageCon
       mozStorageStatementScoper scoper2(stmt);
 
       rv = stmt->BindInt64ByName(NS_LITERAL_CSTRING("osid"), osid);
       NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
       rv = mKey.BindToStatement(stmt, keyValue);
       NS_ENSURE_SUCCESS(rv, rv);
 
-      NS_ASSERTION(!dataBuffer && !dataBufferLength, "These should be unset!");
-
-      const char* uncompressed =
-        reinterpret_cast<const char*>(mCloneBuffer.data());
-      size_t uncompressedLength = mCloneBuffer.nbytes();
-
-      size_t compressedLength =
-        snappy::MaxCompressedLength(uncompressedLength);
-      compressed = new char[compressedLength];
-
-      snappy::RawCompress(uncompressed, uncompressedLength, compressed.get(),
-                          &compressedLength);
-
-      dataBuffer = reinterpret_cast<const PRUint8*>(compressed.get());
-      dataBufferLength = compressedLength;
-
-      rv = stmt->BindBlobByName(data, dataBuffer, dataBufferLength);
+      buffer = reinterpret_cast<const PRUint8*>(mCloneBuffer.data());
+      bufferLength = mCloneBuffer.nbytes();
+
+      rv = stmt->BindBlobByName(NS_LITERAL_CSTRING("data"), buffer,
+                                bufferLength);
       NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
       rv = stmt->Execute();
       NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
     }
   }
 
   // Update our indexes if needed.
@@ -2321,34 +2261,35 @@ CreateIndexHelper::InsertDataFromObjectS
       rv = key.SetFromStatement(stmt, 2);
       NS_ENSURE_SUCCESS(rv, rv);
 
       rv =
         key.BindToStatement(insertStmt, NS_LITERAL_CSTRING("object_data_key"));
       NS_ENSURE_SUCCESS(rv, rv);
     }
 
-    JSAutoStructuredCloneBuffer buffer;
-    rv = IDBObjectStore::GetStructuredCloneDataFromStatement(stmt, 1, buffer);
+    const PRUint8* data;
+    PRUint32 dataLength;
+    rv = stmt->GetSharedBlob(1, &dataLength, &data);
     NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
     NS_ENSURE_TRUE(sTLSIndex != BAD_TLS_INDEX, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
     ThreadLocalJSRuntime* tlsEntry =
       reinterpret_cast<ThreadLocalJSRuntime*>(PR_GetThreadPrivate(sTLSIndex));
 
     if (!tlsEntry) {
       tlsEntry = ThreadLocalJSRuntime::Create();
       NS_ENSURE_TRUE(tlsEntry, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
       PR_SetThreadPrivate(sTLSIndex, tlsEntry);
     }
 
     Key key;
-    rv = IDBObjectStore::GetKeyPathValueFromStructuredData(buffer,
+    rv = IDBObjectStore::GetKeyPathValueFromStructuredData(data, dataLength,
                                                            mIndex->KeyPath(),
                                                            tlsEntry->Context(),
                                                            key);
     NS_ENSURE_SUCCESS(rv, rv);
 
     if (key.IsUnset()) {
       continue;
     }
--- a/dom/indexedDB/IDBObjectStore.h
+++ b/dom/indexedDB/IDBObjectStore.h
@@ -71,17 +71,18 @@ public:
   static already_AddRefed<IDBObjectStore>
   Create(IDBTransaction* aTransaction,
          const ObjectStoreInfo* aInfo);
 
   static bool
   IsValidKeyPath(JSContext* aCx, const nsAString& aKeyPath);
 
   static nsresult
-  GetKeyPathValueFromStructuredData(const JSAutoStructuredCloneBuffer& aBuffer,
+  GetKeyPathValueFromStructuredData(const PRUint8* aData,
+                                    PRUint32 aDataLength,
                                     const nsAString& aKeyPath,
                                     JSContext* aCx,
                                     Key& aValue);
 
   static nsresult
   GetIndexUpdateInfo(ObjectStoreInfo* aObjectStoreInfo,
                      JSContext* aCx,
                      jsval aObject,
--- a/dom/indexedDB/IndexedDatabase.h
+++ b/dom/indexedDB/IndexedDatabase.h
@@ -46,16 +46,18 @@
 #include "jsapi.h"
 #include "nsAutoPtr.h"
 #include "nsCOMPtr.h"
 #include "nsDebug.h"
 #include "nsDOMError.h"
 #include "nsStringGlue.h"
 #include "nsTArray.h"
 
+#define DB_SCHEMA_VERSION 6
+
 #define BEGIN_INDEXEDDB_NAMESPACE \
   namespace mozilla { namespace dom { namespace indexedDB {
 
 #define END_INDEXEDDB_NAMESPACE \
   } /* namespace indexedDB */ } /* namepsace dom */ } /* namespace mozilla */
 
 #define USING_INDEXEDDB_NAMESPACE \
   using namespace mozilla::dom::indexedDB;
--- a/dom/indexedDB/OpenDatabaseHelper.cpp
+++ b/dom/indexedDB/OpenDatabaseHelper.cpp
@@ -32,71 +32,31 @@
  * decision by deleting the provisions above and replace them with the notice
  * and other provisions required by the GPL or the LGPL. If you do not delete
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "OpenDatabaseHelper.h"
+#include "IDBEvents.h"
+#include "IDBFactory.h"
+#include "IndexedDatabaseManager.h"
 
+#include "mozilla/storage.h"
 #include "nsIFile.h"
 
-#include "mozilla/storage.h"
 #include "nsContentUtils.h"
 #include "nsEscape.h"
 #include "nsThreadUtils.h"
-#include "snappy/snappy.h"
-
-#include "IDBEvents.h"
-#include "IDBFactory.h"
-#include "IndexedDatabaseManager.h"
 
 USING_INDEXEDDB_NAMESPACE
 
 namespace {
 
-inline
-PRInt64
-MakeDataVersion(PRUint32 aStructuredCloneVersion,
-                PRUint32 aInternalDataVersion)
-{
-  return (PRInt64(aInternalDataVersion) << 32) + aStructuredCloneVersion;
-}
-
-inline
-PRUint32 GetStructuredCloneVersion(PRInt64 aDataVersion)
-{
-  return PRUint32(aDataVersion & 0xFFFFFFFF);
-}
-
-inline
-PRUint32 GetInternalDataVersion(PRInt64 aDataVersion)
-{
-  return PRUint32(aDataVersion >> 32);
-}
-
-// Version corresponding to SQLite table structure.
-const PRInt32 kSQLiteSchemaVersion = 6;
-
-// Version corresponding to the JS engine's structured clone serialization
-// format.
-const PRUint32 kJSStructuredCloneVersion = 1;
-
-// If JS_STRUCTURED_CLONE_VERSION changes then we need to update here.
-PR_STATIC_ASSERT(kJSStructuredCloneVersion == JS_STRUCTURED_CLONE_VERSION);
-
-// Our own internal version flag.
-const PRUint32 kInternalDataVersion = 1;
-
-// The version stored inside SQLite databases encompassing both the structured
-// clone version and the internal version.
-const PRInt64 kDataVersion = MakeDataVersion(kJSStructuredCloneVersion,
-                                             kInternalDataVersion);
-
 nsresult
 GetDatabaseFile(const nsACString& aASCIIOrigin,
                 const nsAString& aName,
                 nsIFile** aDatabaseFile)
 {
   NS_ASSERTION(!aASCIIOrigin.IsEmpty() && !aName.IsEmpty(), "Bad arguments!");
 
   nsCOMPtr<nsIFile> dbFile;
@@ -303,17 +263,17 @@ CreateTables(mozIStorageConnection* aDBC
   // Need this to make cascading deletes from ai_object_data and object_store
   // fast.
   rv = aDBConn->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
     "CREATE INDEX ai_unique_index_data_ai_object_data_id_index "
     "ON ai_unique_index_data (ai_object_data_id);"
   ));
   NS_ENSURE_SUCCESS(rv, rv);
 
-  rv = aDBConn->SetSchemaVersion(kSQLiteSchemaVersion);
+  rv = aDBConn->SetSchemaVersion(DB_SCHEMA_VERSION);
   NS_ENSURE_SUCCESS(rv, rv);
 
   return NS_OK;
 }
 
 nsresult
 CreateMetaData(mozIStorageConnection* aConnection,
                const nsAString& aName)
@@ -326,75 +286,31 @@ CreateMetaData(mozIStorageConnection* aC
     "INSERT OR REPLACE INTO database (name, dataVersion) "
     "VALUES (:name, :dataVersion)"
   ), getter_AddRefs(stmt));
   NS_ENSURE_SUCCESS(rv, rv);
 
   rv = stmt->BindStringByName(NS_LITERAL_CSTRING("name"), aName);
   NS_ENSURE_SUCCESS(rv, rv);
 
-  rv = stmt->BindInt64ByName(NS_LITERAL_CSTRING("dataVersion"), kDataVersion);
+  rv = stmt->BindInt64ByName(NS_LITERAL_CSTRING("dataVersion"),
+                             JS_STRUCTURED_CLONE_VERSION);
   NS_ENSURE_SUCCESS(rv, rv);
 
   return stmt->Execute();
 }
 
 nsresult
-GetDataVersion(mozIStorageConnection* aConnection,
-               PRInt64* aDataVersion)
-{
-  nsCOMPtr<mozIStorageStatement> stmt;
-  nsresult rv = aConnection->CreateStatement(NS_LITERAL_CSTRING(
-    "SELECT dataVersion "
-    "FROM database"
-  ), getter_AddRefs(stmt));
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  bool hasResult;
-  rv = stmt->ExecuteStep(&hasResult);
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  if (!hasResult) {
-    NS_ERROR("Database has no dataVersion!");
-    return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
-  }
-
-  PRInt64 dataVersion;
-  rv = stmt->GetInt64(0, &dataVersion);
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  *aDataVersion = dataVersion;
-  return NS_OK;
-}
-
-nsresult
-SetDataVersion(mozIStorageConnection* aConnection,
-               PRInt64 aDataVersion)
-{
-  nsCOMPtr<mozIStorageStatement> stmt;
-  nsresult rv = aConnection->CreateStatement(NS_LITERAL_CSTRING(
-    "UPDATE database "
-    "SET dataVersion = :version"
-  ), getter_AddRefs(stmt));
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  rv = stmt->BindInt64ByName(NS_LITERAL_CSTRING("version"), aDataVersion);
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  rv = stmt->Execute();
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  return NS_OK;
-}
-
-nsresult
 UpgradeSchemaFrom4To5(mozIStorageConnection* aConnection)
 {
   nsresult rv;
 
+  mozStorageTransaction transaction(aConnection, false,
+                                 mozIStorageConnection::TRANSACTION_IMMEDIATE);
+
   // All we changed is the type of the version column, so lets try to
   // convert that to an integer, and if we fail, set it to 0.
   nsCOMPtr<mozIStorageStatement> stmt;
   rv = aConnection->CreateStatement(NS_LITERAL_CSTRING(
     "SELECT name, version, dataVersion "
     "FROM database"
   ), getter_AddRefs(stmt));
   NS_ENSURE_SUCCESS(rv, rv);
@@ -461,22 +377,28 @@ UpgradeSchemaFrom4To5(mozIStorageConnect
 
     rv = stmt->Execute();
     NS_ENSURE_SUCCESS(rv, rv);
   }
 
   rv = aConnection->SetSchemaVersion(5);
   NS_ENSURE_SUCCESS(rv, rv);
 
+  rv = transaction.Commit();
+  NS_ENSURE_SUCCESS(rv, rv);
+
   return NS_OK;
 }
 
 nsresult
 UpgradeSchemaFrom5To6(mozIStorageConnection* aConnection)
 {
+  mozStorageTransaction transaction(aConnection, false,
+                                 mozIStorageConnection::TRANSACTION_IMMEDIATE);
+
   // Turn off foreign key constraints before we do anything here.
   nsresult rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
     "PRAGMA foreign_keys = OFF;"
   ));
   NS_ENSURE_SUCCESS(rv, rv);
 
   // First, drop all the indexes we're no longer going to use.
   rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
@@ -820,100 +742,17 @@ UpgradeSchemaFrom5To6(mozIStorageConnect
     "CREATE INDEX ai_unique_index_data_ai_object_data_id_index "
     "ON ai_unique_index_data (ai_object_data_id);"
   ));
   NS_ENSURE_SUCCESS(rv, rv);
 
   rv = aConnection->SetSchemaVersion(6);
   NS_ENSURE_SUCCESS(rv, rv);
 
-  return NS_OK;
-}
-
-class CompressDataBlobsFunction : public mozIStorageFunction
-{
-public:
-  NS_DECL_ISUPPORTS
-
-  NS_IMETHOD
-  OnFunctionCall(mozIStorageValueArray* aArguments,
-                 nsIVariant** aResult)
-  {
-    PRUint32 argc;
-    nsresult rv = aArguments->GetNumEntries(&argc);
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    if (argc != 1) {
-      NS_WARNING("Don't call me with the wrong number of arguments!");
-      return NS_ERROR_UNEXPECTED;
-    }
-
-    PRInt32 type;
-    rv = aArguments->GetTypeOfIndex(0, &type);
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    if (type != mozIStorageStatement::VALUE_TYPE_BLOB) {
-      NS_WARNING("Don't call me with the wrong type of arguments!");
-      return NS_ERROR_UNEXPECTED;
-    }
-
-    const PRUint8* uncompressed;
-    PRUint32 uncompressedLength;
-    rv = aArguments->GetSharedBlob(0, &uncompressedLength, &uncompressed);
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    size_t compressedLength = snappy::MaxCompressedLength(uncompressedLength);
-    nsAutoArrayPtr<char> compressed(new char[compressedLength]);
-
-    snappy::RawCompress(reinterpret_cast<const char*>(uncompressed),
-                        uncompressedLength, compressed.get(),
-                        &compressedLength);
-
-    std::pair<const void *, int> data(static_cast<void*>(compressed.get()),
-                                      int(compressedLength));
-
-    // XXX This copies the buffer again... There doesn't appear to be any way to
-    //     preallocate space and write directly to a BlobVariant at the moment.
-    nsCOMPtr<nsIVariant> result = new mozilla::storage::BlobVariant(data);
-
-    result.forget(aResult);
-    return NS_OK;
-  }
-};
-
-NS_IMPL_ISUPPORTS1(CompressDataBlobsFunction, mozIStorageFunction)
-
-nsresult
-UpgradeInternalDataFrom0To1(mozIStorageConnection* aConnection,
-                            PRUint32 aStructuredCloneVersion)
-{
-  // This change is all about adding snappy compression to a previously existing
-  // database.
-  NS_NAMED_LITERAL_CSTRING(compressorName, "compress");
-
-  nsCOMPtr<mozIStorageFunction> compressor = new CompressDataBlobsFunction();
-
-  nsresult rv = aConnection->CreateFunction(compressorName, 1, compressor);
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  // Turn off foreign key constraints before we do anything here.
-  rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
-    "UPDATE object_data SET data = compress(data);"
-  ));
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
-    "UPDATE ai_object_data SET data = compress(data);"
-  ));
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  rv = aConnection->RemoveFunction(compressorName);
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  rv = SetDataVersion(aConnection, MakeDataVersion(aStructuredCloneVersion, 1));
+  rv = transaction.Commit();
   NS_ENSURE_SUCCESS(rv, rv);
 
   return NS_OK;
 }
 
 nsresult
 CreateDatabaseConnection(const nsAString& aName,
                          nsIFile* aDBFile,
@@ -940,60 +779,58 @@ CreateDatabaseConnection(const nsAString
   }
   NS_ENSURE_SUCCESS(rv, rv);
 
   // Check to make sure that the database schema is correct.
   PRInt32 schemaVersion;
   rv = connection->GetSchemaVersion(&schemaVersion);
   NS_ENSURE_SUCCESS(rv, rv);
 
-  if (schemaVersion != kSQLiteSchemaVersion) {
+  if (!schemaVersion) {
+    // Brand new file, initialize our tables.
     mozStorageTransaction transaction(connection, false,
                                   mozIStorageConnection::TRANSACTION_IMMEDIATE);
 
-    if (!schemaVersion) {
-      // Brand new file, initialize our tables.
-      rv = CreateTables(connection);
-      NS_ENSURE_SUCCESS(rv, rv);
+    rv = CreateTables(connection);
+    NS_ENSURE_SUCCESS(rv, rv);
 
-      rv = CreateMetaData(connection, aName);
-      NS_ENSURE_SUCCESS(rv, rv);
+    rv = CreateMetaData(connection, aName);
+    NS_ENSURE_SUCCESS(rv, rv);
 
-      NS_ASSERTION(NS_SUCCEEDED(connection->GetSchemaVersion(&schemaVersion)) &&
-                   schemaVersion == kSQLiteSchemaVersion,
-                   "CreateTables set a bad schema version!");
-    }
-    else {
-      // This logic needs to change next time we change the schema!
-      PR_STATIC_ASSERT(kSQLiteSchemaVersion == 6);
+    rv = transaction.Commit();
+    NS_ENSURE_SUCCESS(rv, rv);
+
+    NS_ASSERTION(NS_SUCCEEDED(connection->GetSchemaVersion(&schemaVersion)) &&
+                 schemaVersion == DB_SCHEMA_VERSION,
+                 "CreateTables set a bad schema version!");
+  }
+  else if (schemaVersion != DB_SCHEMA_VERSION) {
+    // This logic needs to change next time we change the schema!
+    PR_STATIC_ASSERT(DB_SCHEMA_VERSION == 6);
 
 #define UPGRADE_SCHEMA_CASE(_from, _to)                                        \
   if (schemaVersion == _from) {                                                \
     rv = UpgradeSchemaFrom##_from##To##_to (connection);                       \
     NS_ENSURE_SUCCESS(rv, rv);                                                 \
                                                                                \
     rv = connection->GetSchemaVersion(&schemaVersion);                         \
     NS_ENSURE_SUCCESS(rv, rv);                                                 \
                                                                                \
     NS_ASSERTION(schemaVersion == _to, "Bad upgrade function!");               \
   }
 
-      UPGRADE_SCHEMA_CASE(4, 5)
-      UPGRADE_SCHEMA_CASE(5, 6)
+    UPGRADE_SCHEMA_CASE(4, 5)
+    UPGRADE_SCHEMA_CASE(5, 6)
 
 #undef UPGRADE_SCHEMA_CASE
 
-      if (schemaVersion != kSQLiteSchemaVersion) {
-        NS_WARNING("Unable to open IndexedDB database, schema doesn't match");
-        return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
-      }
+    if (schemaVersion != DB_SCHEMA_VERSION) {
+      NS_WARNING("Unable to open IndexedDB database, schema doesn't match");
+      return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
     }
-
-    rv = transaction.Commit();
-    NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
   }
 
   // Turn on foreign key constraints.
   rv = connection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
     "PRAGMA foreign_keys = ON;"
   ));
   NS_ENSURE_SUCCESS(rv, rv);
 
@@ -1284,111 +1121,65 @@ OpenDatabaseHelper::DoDatabaseWork()
 
   rv = mgr->EnsureQuotaManagementForDirectory(dbDirectory);
   NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
   nsCOMPtr<mozIStorageConnection> connection;
   rv = CreateDatabaseConnection(mName, dbFile, getter_AddRefs(connection));
   NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
-  if (!mForDeletion) {
-    // Get the data version, and maybe vacuum if we upgrade (hopefully because
-    // we figured out a way to save some disk space).
-    PRInt64 dataVersion;
-    rv = GetDataVersion(connection, &dataVersion);
-    NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
-
-    // This logic needs to change next time we change the data version!
-    PR_STATIC_ASSERT(kJSStructuredCloneVersion == 1);
-    PR_STATIC_ASSERT(kInternalDataVersion == 1);
-
-    bool vacuumNeeded = false;
-
-    if (dataVersion != kDataVersion && !mForDeletion) {
-      mozStorageTransaction transaction(connection, false,
-                                  mozIStorageConnection::TRANSACTION_IMMEDIATE);
-
-      PRUint32 structuredCloneVersion = GetStructuredCloneVersion(dataVersion);
-      PRUint32 internalDataVersion = GetInternalDataVersion(dataVersion);
+  // Get the data version.
+  nsCOMPtr<mozIStorageStatement> stmt;
+  rv = connection->CreateStatement(NS_LITERAL_CSTRING(
+    "SELECT dataVersion "
+    "FROM database"
+  ), getter_AddRefs(stmt));
+  NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
-      if (structuredCloneVersion != kJSStructuredCloneVersion) {
-        // We've never changed this value before so nothing we can upgrade here.
-        NS_WARNING("Bad structured clone version, newer or corrupt database?");
-        return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
-      }
-
-      NS_ASSERTION(structuredCloneVersion == kJSStructuredCloneVersion,
-                    "Should have upgraded!");
+  bool hasResult;
+  rv = stmt->ExecuteStep(&hasResult);
+  NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
-      if (internalDataVersion > kInternalDataVersion) {
-        NS_WARNING("Bad internal data version, newer or corrupt database?");
-        return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
-      }
-
-#define UPGRADE_INTERNAL_DATA_VERSION_CASE(_from, _to)                         \
-  if (internalDataVersion == _from) {                                          \
-    rv = UpgradeInternalDataFrom##_from##To##_to (connection,                  \
-                                                  structuredCloneVersion);     \
-    NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);                 \
-                                                                               \
-    rv = GetDataVersion(connection, &dataVersion);                             \
-    NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);                 \
-                                                                               \
-    internalDataVersion = GetInternalDataVersion(dataVersion);                 \
-    vacuumNeeded = true;                                                       \
+  if (!hasResult) {
+    NS_ERROR("Database has no dataVersion!");
+    return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
   }
 
-      UPGRADE_INTERNAL_DATA_VERSION_CASE(0, 1)
-
-#undef UPGRADE_INTERNAL_DATA_VERSION_CASE
-
-      NS_ASSERTION(structuredCloneVersion == kJSStructuredCloneVersion,
-                    "Should have upgraded structuredCloneVersion!");
-      NS_ASSERTION(internalDataVersion == kInternalDataVersion,
-                    "Should have upgraded internalDataVersion!");
-
-      dataVersion = MakeDataVersion(structuredCloneVersion,
-                                    internalDataVersion);
-      NS_ASSERTION(dataVersion == kDataVersion, "Didn't upgrade correctly!");
+  PRInt64 dataVersion;
+  rv = stmt->GetInt64(0, &dataVersion);
+  NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
-      if (dataVersion != kDataVersion) {
-        NS_WARNING("Bad data version, newer or corrupt database?");
-        return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
-      }
-
-      rv = transaction.Commit();
-      NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
-    }
-
-    if (vacuumNeeded) {
-      rv = connection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
-        "VACUUM;"
-      ));
-      NS_ENSURE_SUCCESS(rv, rv);
-    }
+  if (dataVersion > JS_STRUCTURED_CLONE_VERSION) {
+    NS_ERROR("Bad data version!");
+    return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
   }
 
-  rv = IDBFactory::LoadDatabaseInformation(connection, mDatabaseId,
-                                           &mCurrentVersion, mObjectStores);
-  NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
+  if (dataVersion < JS_STRUCTURED_CLONE_VERSION) {
+    // Need to upgrade the database, here, before returning to the main thread.
+    NS_NOTYETIMPLEMENTED("Implement me!");
+  }
 
-  if (mForDeletion) {
-    mState = eDeletePending;
-    return NS_OK;
-  }
+  rv = IDBFactory::LoadDatabaseInformation(connection, mDatabaseId, &mCurrentVersion,
+                                           mObjectStores);
+  NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
 
   for (PRUint32 i = 0; i < mObjectStores.Length(); i++) {
     nsAutoPtr<ObjectStoreInfo>& objectStoreInfo = mObjectStores[i];
     for (PRUint32 j = 0; j < objectStoreInfo->indexes.Length(); j++) {
       IndexInfo& indexInfo = objectStoreInfo->indexes[j];
       mLastIndexId = NS_MAX(indexInfo.id, mLastIndexId);
     }
     mLastObjectStoreId = NS_MAX(objectStoreInfo->id, mLastObjectStoreId);
   }
 
+  if (mForDeletion) {
+    mState = eDeletePending;
+    return NS_OK;
+  }
+
   // See if we need to do a VERSION_CHANGE transaction
 
   // Optional version semantics.
   if (!mRequestedVersion) {
     // If the requested version was not specified and the database was created,
     // treat it as if version 1 were requested.
     if (mCurrentVersion == 0) {
       mRequestedVersion = 1;
--- a/dom/indexedDB/OpenDatabaseHelper.h
+++ b/dom/indexedDB/OpenDatabaseHelper.h
@@ -55,19 +55,19 @@ class OpenDatabaseHelper : public Helper
 public:
   OpenDatabaseHelper(IDBOpenDBRequest* aRequest,
                      const nsAString& aName,
                      const nsACString& aASCIIOrigin,
                      PRUint64 aRequestedVersion,
                      bool aForDeletion)
     : HelperBase(aRequest), mOpenDBRequest(aRequest), mName(aName),
       mASCIIOrigin(aASCIIOrigin), mRequestedVersion(aRequestedVersion),
-      mForDeletion(aForDeletion), mCurrentVersion(0), mDatabaseId(0),
-      mLastObjectStoreId(0), mLastIndexId(0), mState(eCreated),
-      mResultCode(NS_OK)
+      mForDeletion(aForDeletion), mCurrentVersion(0),
+      mDataVersion(DB_SCHEMA_VERSION), mDatabaseId(0), mLastObjectStoreId(0),
+      mLastIndexId(0), mState(eCreated), mResultCode(NS_OK)
   {
     NS_ASSERTION(!aForDeletion || !aRequestedVersion,
                  "Can't be for deletion and request a version!");
   }
 
   NS_DECL_ISUPPORTS
   NS_DECL_NSIRUNNABLE
 
@@ -123,16 +123,17 @@ private:
   nsCString mASCIIOrigin;
   PRUint64 mRequestedVersion;
   bool mForDeletion;
   nsCOMPtr<nsIAtom> mDatabaseId;
 
   // Out-params.
   nsTArray<nsAutoPtr<ObjectStoreInfo> > mObjectStores;
   PRUint64 mCurrentVersion;
+  PRUint32 mDataVersion;
   nsString mDatabaseFilePath;
   PRInt64 mLastObjectStoreId;
   PRInt64 mLastIndexId;
   nsRefPtr<IDBDatabase> mDatabase;
 
   // State variables
   enum OpenDatabaseState {
     eCreated = 0, // Not yet dispatched to the DB thread
deleted file mode 100644
--- a/other-licenses/snappy/Makefile.in
+++ /dev/null
@@ -1,73 +0,0 @@
-# ***** BEGIN LICENSE BLOCK *****
-# Version: MPL 1.1/GPL 2.0/LGPL 2.1
-#
-# The contents of this file are subject to the Mozilla Public License Version
-# 1.1 (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-# http://www.mozilla.org/MPL/
-#
-# Software distributed under the License is distributed on an "AS IS" basis,
-# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
-# for the specific language governing rights and limitations under the
-# License.
-#
-# The Original Code is Mozilla Firefox
-#
-# The Initial Developer of the Original Code is
-#   The Mozilla Foundation.
-# Portions created by the Initial Developer are Copyright (C) 2011
-# the Initial Developer. All Rights Reserved.
-#
-# Contributor(s):
-#   Ben Turner <bent.mozilla@gmail.com>
-#
-# Alternatively, the contents of this file may be used under the terms of
-# either the GNU General Public License Version 2 or later (the "GPL"), or
-# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
-# in which case the provisions of the GPL or the LGPL are applicable instead
-# of those above. If you wish to allow use of your version of this file only
-# under the terms of either the GPL or the LGPL, and not to allow others to
-# use your version of this file under the terms of the MPL, indicate your
-# decision by deleting the provisions above and replace them with the notice
-# and other provisions required by the GPL or the LGPL. If you do not delete
-# the provisions above, a recipient may use your version of this file under
-# the terms of any one of the MPL, the GPL or the LGPL.
-#
-# ***** END LICENSE BLOCK *****
-
-DEPTH     = ../..
-topsrcdir = @top_srcdir@
-srcdir    = @srcdir@
-
-VPATH = \
-  @srcdir@ \
-  $(topsrcdir)/other-licenses/snappy/src
-  $(NULL)
-
-include $(DEPTH)/config/autoconf.mk
-
-LIBRARY_NAME = snappy_s
-
-FORCE_STATIC_LIB = 1
-LIBXUL_LIBRARY = 1
-EXPORT_LIBRARY = 1
-
-CPPSRCS = \
-  snappy.cc \
-  snappy-sinksource.cc \
-  snappy-stubs-internal.cc \
-  $(NULL)
-
-CSRCS = \
-  snappy-c.cc \
-  $(NULL)
-
-EXPORTS_NAMESPACES = snappy
-
-EXPORTS_snappy = \
-  snappy.h \
-  snappy-c.h \
-  snappy-stubs-public.h \
-  $(NULL)
-
-include $(topsrcdir)/config/rules.mk
deleted file mode 100644
--- a/other-licenses/snappy/README
+++ /dev/null
@@ -1,22 +0,0 @@
-See src/README for the README that ships with snappy.
-
-Mozilla does not modify the actual snappy source with the exception of the
-'snappy-stubs-public.h' header. We have replaced its build system with our own.
-
-Snappy comes from:
-  http://code.google.com/p/snappy/
-
-To upgrade to a newer version:
-  1. Check out the new code using subversion.
-  2. Update 'snappy-stubs-public.h' in this directory with any changes that were
-     made to 'snappy-stubs-public.h.in' in the new source.
-  3. Copy the major/minor/patch versions from 'configure.ac' into
-     'snappy-stubs-public.h'.
-  4. Copy all source files from the new version into the src subdirectory. The
-     following files are not needed:
-       - 'autom4te.cache' subdirectory
-       - 'm4' subdirectory
-       - 'testdata' subdirectory
-       - 'autogen.sh'
-       - 'configure.ac'
-       - 'Makefile.am'
deleted file mode 100644
--- a/other-licenses/snappy/snappy-stubs-public.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-// Author: sesse@google.com (Steinar H. Gunderson)
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Various type stubs for the open-source version of Snappy.
-//
-// This file cannot include config.h, as it is included from snappy.h,
-// which is a public header. Instead, snappy-stubs-public.h is generated by
-// from snappy-stubs-public.h.in at configure time.
-
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
-
-#include "prtypes.h"
-
-#define SNAPPY_MAJOR 1
-#define SNAPPY_MINOR 0
-#define SNAPPY_PATCHLEVEL 4
-#define SNAPPY_VERSION \
-    ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
-
-#include <string>
-
-namespace snappy {
-
-typedef PRInt8 int8;
-typedef PRUint8 uint8;
-typedef PRInt16 int16;
-typedef PRUint16 uint16;
-typedef PRInt32 int32;
-typedef PRUint32 uint32;
-typedef PRInt64 int64;
-typedef PRUint64 uint64;
-
-typedef std::string string;
-
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
-  TypeName(const TypeName&);               \
-  void operator=(const TypeName&)
-
-}  // namespace snappy
-
-#endif  // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
deleted file mode 100644
--- a/other-licenses/snappy/src/AUTHORS
+++ /dev/null
@@ -1,1 +0,0 @@
-opensource@google.com
deleted file mode 100644
--- a/other-licenses/snappy/src/COPYING
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright 2011, Google Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-    * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
deleted file mode 100644
--- a/other-licenses/snappy/src/ChangeLog
+++ /dev/null
@@ -1,801 +0,0 @@
-------------------------------------------------------------------------
-r49 | snappy.mirrorbot@gmail.com | 2011-09-15 11:50:05 +0200 (Thu, 15 Sep 2011) | 5 lines
-
-Fix public issue #50: Include generic byteswap macros.
-Also include Solaris 10 and FreeBSD versions.
-
-R=csilvers
-
-------------------------------------------------------------------------
-r48 | snappy.mirrorbot@gmail.com | 2011-08-10 20:57:27 +0200 (Wed, 10 Aug 2011) | 5 lines
-
-Partially fix public issue 50: Remove an extra comma from the end of some
-enum declarations, as it seems the Sun compiler does not like it.
-
-Based on patch by Travis Vitek.
-
-------------------------------------------------------------------------
-r47 | snappy.mirrorbot@gmail.com | 2011-08-10 20:44:16 +0200 (Wed, 10 Aug 2011) | 4 lines
-
-Use the right #ifdef test for sys/mman.h.
-
-Based on patch by Travis Vitek.
-
-------------------------------------------------------------------------
-r46 | snappy.mirrorbot@gmail.com | 2011-08-10 03:22:09 +0200 (Wed, 10 Aug 2011) | 6 lines
-
-Fix public issue #47: Small comment cleanups in the unit test.
-
-Originally based on a patch by Patrick Pelletier.
-
-R=sanjay
-
-------------------------------------------------------------------------
-r45 | snappy.mirrorbot@gmail.com | 2011-08-10 03:14:43 +0200 (Wed, 10 Aug 2011) | 8 lines
-
-Fix public issue #46: Format description said "3-byte offset"
-instead of "4-byte offset" for the longest copies.
-
-Also fix an inconsistency in the heading for section 2.2.3.
-Both patches by Patrick Pelletier.
-
-R=csilvers
-
-------------------------------------------------------------------------
-r44 | snappy.mirrorbot@gmail.com | 2011-06-28 13:40:25 +0200 (Tue, 28 Jun 2011) | 8 lines
-
-Fix public issue #44: Make the definition and declaration of CompressFragment
-identical, even regarding cv-qualifiers.
-
-This is required to work around a bug in the Solaris Studio C++ compiler
-(it does not properly disregard cv-qualifiers when doing name mangling).
-
-R=sanjay
-
-------------------------------------------------------------------------
-r43 | snappy.mirrorbot@gmail.com | 2011-06-04 12:19:05 +0200 (Sat, 04 Jun 2011) | 7 lines
-
-Correct an inaccuracy in the Snappy format description. 
-(I stumbled into this when changing the way we decompress literals.) 
-
-R=csilvers
-
-Revision created by MOE tool push_codebase.
-
-------------------------------------------------------------------------
-r42 | snappy.mirrorbot@gmail.com | 2011-06-03 22:53:06 +0200 (Fri, 03 Jun 2011) | 50 lines
-
-Speed up decompression by removing a fast-path attempt.
-
-Whenever we try to enter a copy fast-path, there is a certain cost in checking
-that all the preconditions are in place, but it's normally offset by the fact
-that we can usually take the cheaper path. However, in a certain path we've
-already established that "avail < literal_length", which usually means that
-either the available space is small, or the literal is big. Both will disqualify
-us from taking the fast path, and thus we take the hit from the precondition
-checking without gaining much from having a fast path. Thus, simply don't try
-the fast path in this situation -- we're already on a slow path anyway
-(one where we need to refill more data from the reader).
-
-I'm a bit surprised at how much this gained; it could be that this path is
-more common than I thought, or that the simpler structure somehow makes the
-compiler happier. I haven't looked at the assembler, but it's a win across
-the board on both Core 2, Core i7 and Opteron, at least for the cases we
-typically care about. The gains seem to be the largest on Core i7, though.
-Results from my Core i7 workstation:
-
-
-  Benchmark            Time(ns)    CPU(ns) Iterations
-  ---------------------------------------------------
-  BM_UFlat/0              73337      73091     190996 1.3GB/s  html      [ +1.7%]
-  BM_UFlat/1             696379     693501      20173 965.5MB/s  urls    [ +2.7%]
-  BM_UFlat/2               9765       9734    1472135 12.1GB/s  jpg      [ +0.7%]
-  BM_UFlat/3              29720      29621     472973 3.0GB/s  pdf       [ +1.8%]
-  BM_UFlat/4             294636     293834      47782 1.3GB/s  html4     [ +2.3%]
-  BM_UFlat/5              28399      28320     494700 828.5MB/s  cp      [ +3.5%]
-  BM_UFlat/6              12795      12760    1000000 833.3MB/s  c       [ +1.2%]
-  BM_UFlat/7               3984       3973    3526448 893.2MB/s  lsp     [ +5.7%]
-  BM_UFlat/8             991996     989322      14141 992.6MB/s  xls     [ +3.3%]
-  BM_UFlat/9             228620     227835      61404 636.6MB/s  txt1    [ +4.0%]
-  BM_UFlat/10            197114     196494      72165 607.5MB/s  txt2    [ +3.5%]
-  BM_UFlat/11            605240     603437      23217 674.4MB/s  txt3    [ +3.7%]
-  BM_UFlat/12            804157     802016      17456 573.0MB/s  txt4    [ +3.9%]
-  BM_UFlat/13            347860     346998      40346 1.4GB/s  bin       [ +1.2%]
-  BM_UFlat/14             44684      44559     315315 818.4MB/s  sum     [ +2.3%]
-  BM_UFlat/15              5120       5106    2739726 789.4MB/s  man     [ +3.3%]
-  BM_UFlat/16             76591      76355     183486 1.4GB/s  pb        [ +2.8%]
-  BM_UFlat/17            238564     237828      58824 739.1MB/s  gaviota [ +1.6%]
-  BM_UValidate/0          42194      42060     333333 2.3GB/s  html      [ -0.1%]
-  BM_UValidate/1         433182     432005      32407 1.5GB/s  urls      [ -0.1%]
-  BM_UValidate/2            197        196   71428571 603.3GB/s  jpg     [ +0.5%]
-  BM_UValidate/3          14494      14462     972222 6.1GB/s  pdf       [ +0.5%]
-  BM_UValidate/4         168444     167836      83832 2.3GB/s  html4     [ +0.1%]
-	
-R=jeff
-
-Revision created by MOE tool push_codebase.
-
-------------------------------------------------------------------------
-r41 | snappy.mirrorbot@gmail.com | 2011-06-03 22:47:14 +0200 (Fri, 03 Jun 2011) | 43 lines
-
-Speed up decompression by not needing a lookup table for literal items.
-
-Looking up into and decoding the values from char_table has long shown up as a
-hotspot in the decompressor. While it turns out that it's hard to make a more
-efficient decoder for the copy ops, the literals are simple enough that we can
-decode them without needing a table lookup. (This means that 1/4 of the table
-is now unused, although that in itself doesn't buy us anything.)
-
-The gains are small, but definitely present; some tests win as much as 10%,
-but 1-4% is more typical. These results are from Core i7, in 64-bit mode;
-Core 2 and Opteron show similar results. (I've run with more iterations
-than unusual to make sure the smaller gains don't drown entirely in noise.)
-
-  Benchmark            Time(ns)    CPU(ns) Iterations
-  ---------------------------------------------------
-  BM_UFlat/0              74665      74428     182055 1.3GB/s  html      [ +3.1%]
-  BM_UFlat/1             714106     711997      19663 940.4MB/s  urls    [ +4.4%]
-  BM_UFlat/2               9820       9789    1427115 12.1GB/s  jpg      [ -1.2%]
-  BM_UFlat/3              30461      30380     465116 2.9GB/s  pdf       [ +0.8%]
-  BM_UFlat/4             301445     300568      46512 1.3GB/s  html4     [ +2.2%]
-  BM_UFlat/5              29338      29263     479452 801.8MB/s  cp      [ +1.6%]
-  BM_UFlat/6              13004      12970    1000000 819.9MB/s  c       [ +2.1%]
-  BM_UFlat/7               4180       4168    3349282 851.4MB/s  lsp     [ +1.3%]
-  BM_UFlat/8            1026149    1024000      10000 959.0MB/s  xls     [+10.7%]
-  BM_UFlat/9             237441     236830      59072 612.4MB/s  txt1    [ +0.3%]
-  BM_UFlat/10            203966     203298      69307 587.2MB/s  txt2    [ +0.8%]
-  BM_UFlat/11            627230     625000      22400 651.2MB/s  txt3    [ +0.7%]
-  BM_UFlat/12            836188     833979      16787 551.0MB/s  txt4    [ +1.3%]
-  BM_UFlat/13            351904     350750      39886 1.4GB/s  bin       [ +3.8%]
-  BM_UFlat/14             45685      45562     308370 800.4MB/s  sum     [ +5.9%]
-  BM_UFlat/15              5286       5270    2656546 764.9MB/s  man     [ +1.5%]
-  BM_UFlat/16             78774      78544     178117 1.4GB/s  pb        [ +4.3%]
-  BM_UFlat/17            242270     241345      58091 728.3MB/s  gaviota [ +1.2%]
-  BM_UValidate/0          42149      42000     333333 2.3GB/s  html      [ -3.0%]
-  BM_UValidate/1         432741     431303      32483 1.5GB/s  urls      [ +7.8%]
-  BM_UValidate/2            198        197   71428571 600.7GB/s  jpg     [+16.8%]
-  BM_UValidate/3          14560      14521     965517 6.1GB/s  pdf       [ -4.1%]
-  BM_UValidate/4         169065     168671      83832 2.3GB/s  html4     [ -2.9%]
-
-R=jeff
-
-Revision created by MOE tool push_codebase.
-
-------------------------------------------------------------------------
-r40 | snappy.mirrorbot@gmail.com | 2011-06-03 00:57:41 +0200 (Fri, 03 Jun 2011) | 2 lines
-
-Release Snappy 1.0.3.
-
-------------------------------------------------------------------------
-r39 | snappy.mirrorbot@gmail.com | 2011-06-02 20:06:54 +0200 (Thu, 02 Jun 2011) | 11 lines
-
-Remove an unneeded goto in the decompressor; it turns out that the
-state of ip_ after decompression (or attempted decompresion) is
-completely irrelevant, so we don't need the trailer.
-
-Performance is, as expected, mostly flat -- there's a curious ~3–5%
-loss in the “lsp” test, but that test case is so short it is hard to say
-anything definitive about why (most likely, it's some sort of
-unrelated effect).
-
-R=jeff
-
-------------------------------------------------------------------------
-r38 | snappy.mirrorbot@gmail.com | 2011-06-02 19:59:40 +0200 (Thu, 02 Jun 2011) | 52 lines
-
-Speed up decompression by caching ip_.
-
-It is seemingly hard for the compiler to understand that ip_, the current input
-pointer into the compressed data stream, can not alias on anything else, and
-thus using it directly will incur memory traffic as it cannot be kept in a
-register. The code already knew about this and cached it into a local
-variable, but since Step() only decoded one tag, it had to move ip_ back into
-place between every tag. This seems to have cost us a significant amount of
-performance, so changing Step() into a function that decodes as much as it can
-before it saves ip_ back and returns. (Note that Step() was already inlined,
-so it is not the manual inlining that buys the performance here.)
-
-The wins are about 3–6% for Core 2, 6–13% on Core i7 and 5–12% on Opteron
-(for plain array-to-array decompression, in 64-bit opt mode).
-
-There is a tiny difference in the behavior here; if an invalid literal is
-encountered (ie., the writer refuses the Append() operation), ip_ will now
-point to the byte past the tag byte, instead of where the literal was
-originally thought to end. However, we don't use ip_ for anything after
-DecompressAllTags() has returned, so this should not change external behavior
-in any way.
-
-Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
-
-Benchmark            Time(ns)    CPU(ns) Iterations
----------------------------------------------------
-BM_UFlat/0              79134      79110       8835 1.2GB/s  html      [ +6.2%]
-BM_UFlat/1             786126     786096        891 851.8MB/s  urls    [+10.0%]
-BM_UFlat/2               9948       9948      69125 11.9GB/s  jpg      [ -1.3%]
-BM_UFlat/3              31999      31998      21898 2.7GB/s  pdf       [ +6.5%]
-BM_UFlat/4             318909     318829       2204 1.2GB/s  html4     [ +6.5%]
-BM_UFlat/5              31384      31390      22363 747.5MB/s  cp      [ +9.2%]
-BM_UFlat/6              14037      14034      49858 757.7MB/s  c       [+10.6%]
-BM_UFlat/7               4612       4612     151395 769.5MB/s  lsp     [ +9.5%]
-BM_UFlat/8            1203174    1203007        582 816.3MB/s  xls     [+19.3%]
-BM_UFlat/9             253869     253955       2757 571.1MB/s  txt1    [+11.4%]
-BM_UFlat/10            219292     219290       3194 544.4MB/s  txt2    [+12.1%]
-BM_UFlat/11            672135     672131       1000 605.5MB/s  txt3    [+11.2%]
-BM_UFlat/12            902512     902492        776 509.2MB/s  txt4    [+12.5%]
-BM_UFlat/13            372110     371998       1881 1.3GB/s  bin       [ +5.8%]
-BM_UFlat/14             50407      50407      10000 723.5MB/s  sum     [+13.5%]
-BM_UFlat/15              5699       5701     100000 707.2MB/s  man     [+12.4%]
-BM_UFlat/16             83448      83424       8383 1.3GB/s  pb        [ +5.7%]
-BM_UFlat/17            256958     256963       2723 684.1MB/s  gaviota [ +7.9%]
-BM_UValidate/0          42795      42796      16351 2.2GB/s  html      [+25.8%]
-BM_UValidate/1         490672     490622       1427 1.3GB/s  urls      [+22.7%]
-BM_UValidate/2            237        237    2950297 499.0GB/s  jpg     [+24.9%]
-BM_UValidate/3          14610      14611      47901 6.0GB/s  pdf       [+26.8%]
-BM_UValidate/4         171973     171990       4071 2.2GB/s  html4     [+25.7%]
-
-
-
-------------------------------------------------------------------------
-r37 | snappy.mirrorbot@gmail.com | 2011-05-17 10:48:25 +0200 (Tue, 17 May 2011) | 10 lines
-
-
-Fix the numbering of the headlines in the Snappy format description.
-
-R=csilvers
-DELTA=4  (0 added, 0 deleted, 4 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1906
-
-------------------------------------------------------------------------
-r36 | snappy.mirrorbot@gmail.com | 2011-05-16 10:59:18 +0200 (Mon, 16 May 2011) | 12 lines
-
-
-Fix public issue #32: Add compressed format documentation for Snappy.
-This text is new, but an earlier version from Zeev Tarantov was used
-as reference.
-
-R=csilvers
-DELTA=112  (111 added, 0 deleted, 1 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1867
-
-------------------------------------------------------------------------
-r35 | snappy.mirrorbot@gmail.com | 2011-05-09 23:29:02 +0200 (Mon, 09 May 2011) | 12 lines
-
-
-Fix public issue #39: Pick out the median runs based on CPU time,
-not real time. Also, use nth_element instead of sort, since we
-only need one element.
-
-R=csilvers
-DELTA=5  (3 added, 0 deleted, 2 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1799
-
-------------------------------------------------------------------------
-r34 | snappy.mirrorbot@gmail.com | 2011-05-09 23:28:45 +0200 (Mon, 09 May 2011) | 19 lines
-
-
-Fix public issue #38: Make the microbenchmark framework handle
-properly cases where gettimeofday() can stand return the same
-result twice (as sometimes on GNU/Hurd) or go backwards
-(as when the user adjusts the clock). We avoid a division-by-zero,
-and put a lower bound on the number of iterations -- the same
-amount as we use to calibrate.
-
-We should probably use CLOCK_MONOTONIC for platforms that support
-it, to be robust against clock adjustments; we already use Windows'
-monotonic timers. However, that's for a later changelist.
-
-R=csilvers
-DELTA=7  (5 added, 0 deleted, 2 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1798
-
-------------------------------------------------------------------------
-r33 | snappy.mirrorbot@gmail.com | 2011-05-04 01:22:52 +0200 (Wed, 04 May 2011) | 11 lines
-
-
-Fix public issue #37: Only link snappy_unittest against -lz and other autodetected
-libraries, not libsnappy.so (which doesn't need any such dependency).
-
-R=csilvers
-DELTA=20  (14 added, 0 deleted, 6 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1710
-
-------------------------------------------------------------------------
-r32 | snappy.mirrorbot@gmail.com | 2011-05-04 01:22:33 +0200 (Wed, 04 May 2011) | 11 lines
-
-
-Release Snappy 1.0.2, to get the license change and various other fixes into
-a release.
-
-R=csilvers
-DELTA=239  (236 added, 0 deleted, 3 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1709
-
-------------------------------------------------------------------------
-r31 | snappy.mirrorbot@gmail.com | 2011-04-26 14:34:55 +0200 (Tue, 26 Apr 2011) | 15 lines
-
-
-Fix public issue #30: Stop using gettimeofday() altogether on Win32,
-as MSVC doesn't include it. Replace with QueryPerformanceCounter(),
-which is monotonic and probably reasonably high-resolution.
-(Some machines have traditionally had bugs in QPC, but they should
-be relatively rare these days, and there's really no much better
-alternative that I know of.)
-
-R=csilvers
-DELTA=74  (55 added, 19 deleted, 0 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1556
-
-------------------------------------------------------------------------
-r30 | snappy.mirrorbot@gmail.com | 2011-04-26 14:34:37 +0200 (Tue, 26 Apr 2011) | 11 lines
-
-
-Fix public issue #31: Don't reset PATH in autogen.sh; instead, do the trickery
-we need for our own build system internally.
-
-R=csilvers
-DELTA=16  (13 added, 1 deleted, 2 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1555
-
-------------------------------------------------------------------------
-r29 | snappy.mirrorbot@gmail.com | 2011-04-16 00:55:56 +0200 (Sat, 16 Apr 2011) | 12 lines
-
-
-When including <windows.h>, define WIN32_LEAN_AND_MEAN first,
-so we won't pull in macro definitions of things like min() and max(),
-which can conflict with <algorithm>.
-
-R=csilvers
-DELTA=1  (1 added, 0 deleted, 0 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1485
-
-------------------------------------------------------------------------
-r28 | snappy.mirrorbot@gmail.com | 2011-04-11 11:07:01 +0200 (Mon, 11 Apr 2011) | 15 lines
-
-
-Fix public issue #29: Write CPU timing code for Windows, based on GetProcessTimes()
-instead of getursage().
-
-I thought I'd already committed this patch, so that the 1.0.1 release already
-would have a Windows-compatible snappy_unittest, but I'd seemingly deleted it
-instead, so this is a reconstruction.
-
-R=csilvers
-DELTA=43  (39 added, 3 deleted, 1 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1295
-
-------------------------------------------------------------------------
-r27 | snappy.mirrorbot@gmail.com | 2011-04-08 11:51:53 +0200 (Fri, 08 Apr 2011) | 22 lines
-
-
-Include C bindings of Snappy, contributed by Martin Gieseking.
-
-I've made a few changes since Martin's version; mostly style nits, but also
-a semantic change -- most functions that return bool in the C++ version now
-return an enum, to better match typical C (and zlib) semantics.
-
-I've kept the copyright notice, since Martin is obviously the author here;
-he has signed the contributor license agreement, though, so this should not
-hinder Google's use in the future.
-
-We'll need to update the libtool version number to match the added interface,
-but as of http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
-I'm going to wait until public release.
-
-R=csilvers
-DELTA=238  (233 added, 0 deleted, 5 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1294
-
-------------------------------------------------------------------------
-r26 | snappy.mirrorbot@gmail.com | 2011-04-07 18:36:43 +0200 (Thu, 07 Apr 2011) | 13 lines
-
-
-Replace geo.protodata with a newer version.
-
-The data compresses/decompresses slightly faster than the old data, and has
-similar density.
-
-R=lookingbill
-DELTA=1  (0 added, 0 deleted, 1 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1288
-
-------------------------------------------------------------------------
-r25 | snappy.mirrorbot@gmail.com | 2011-03-30 22:27:53 +0200 (Wed, 30 Mar 2011) | 12 lines
-
-
-Fix public issue #27: Add HAVE_CONFIG_H tests around the config.h
-inclusion in snappy-stubs-internal.h, which eases compiling outside the
-automake/autoconf framework.
-
-R=csilvers
-DELTA=5  (4 added, 1 deleted, 0 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1152
-
-------------------------------------------------------------------------
-r24 | snappy.mirrorbot@gmail.com | 2011-03-30 22:27:39 +0200 (Wed, 30 Mar 2011) | 13 lines
-
-
-Fix public issue #26: Take memory allocation and reallocation entirely out of the
-Measure() loop. This gives all algorithms a small speed boost, except Snappy which
-already didn't do reallocation (so the measurements were slightly biased in its
-favor).
-
-R=csilvers
-DELTA=92  (69 added, 9 deleted, 14 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1151
-
-------------------------------------------------------------------------
-r23 | snappy.mirrorbot@gmail.com | 2011-03-30 22:25:09 +0200 (Wed, 30 Mar 2011) | 18 lines
-
-
-Renamed "namespace zippy" to "namespace snappy" to reduce
-the differences from the opensource code.  Will make it easier
-in the future to mix-and-match third-party code that uses
-snappy with google code.
-
-Currently, csearch shows that the only external user of
-"namespace zippy" is some bigtable code that accesses
-a TEST variable, which is temporarily kept in the zippy
-namespace.
-
-R=sesse
-DELTA=123  (18 added, 3 deleted, 102 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1150
-
-------------------------------------------------------------------------
-r22 | snappy.mirrorbot@gmail.com | 2011-03-29 00:17:04 +0200 (Tue, 29 Mar 2011) | 11 lines
-
-
-Put back the final few lines of what was truncated during the
-license header change.
-
-R=csilvers
-DELTA=5  (4 added, 0 deleted, 1 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1094
-
-------------------------------------------------------------------------
-r21 | snappy.mirrorbot@gmail.com | 2011-03-26 03:34:34 +0100 (Sat, 26 Mar 2011) | 20 lines
-
-
-Change on 2011-03-25 19:18:00-07:00 by sesse
-
-	Replace the Apache 2.0 license header by the BSD-type license header;
-	somehow a lot of the files were missed in the last round.
-
-	R=dannyb,csilvers
-	DELTA=147  (74 added, 2 deleted, 71 changed)
-
-Change on 2011-03-25 19:25:07-07:00 by sesse
-
-	Unbreak the build; the relicensing removed a bit too much (only comments
-	were intended, but I also accidentially removed some of the top lines of
-	the actual source).
-
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1072
-
-------------------------------------------------------------------------
-r20 | snappy.mirrorbot@gmail.com | 2011-03-25 17:14:41 +0100 (Fri, 25 Mar 2011) | 10 lines
-
-
-Change Snappy from the Apache 2.0 to a BSD-type license.
-
-R=dannyb
-DELTA=328  (80 added, 184 deleted, 64 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1061
-
-------------------------------------------------------------------------
-r19 | snappy.mirrorbot@gmail.com | 2011-03-25 01:39:01 +0100 (Fri, 25 Mar 2011) | 11 lines
-
-
-Release Snappy 1.0.1, to soup up all the various small changes
-that have been made since release.
-
-R=csilvers
-DELTA=266  (260 added, 0 deleted, 6 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1057
-
-------------------------------------------------------------------------
-r18 | snappy.mirrorbot@gmail.com | 2011-03-24 20:15:54 +0100 (Thu, 24 Mar 2011) | 11 lines
-
-
-Fix a microbenchmark crash on mingw32; seemingly %lld is not universally
-supported on Windows, and %I64d is recommended instead.
-
-R=csilvers
-DELTA=6  (5 added, 0 deleted, 1 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1034
-
-------------------------------------------------------------------------
-r17 | snappy.mirrorbot@gmail.com | 2011-03-24 20:15:27 +0100 (Thu, 24 Mar 2011) | 13 lines
-
-
-Fix public issue #19: Fix unit test when Google Test is installed but the
-gflags package isn't (Google Test is not properly initialized).
-
-Patch by Martin Gieseking.
-
-R=csilvers
-DELTA=2  (1 added, 0 deleted, 1 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1033
-
-------------------------------------------------------------------------
-r16 | snappy.mirrorbot@gmail.com | 2011-03-24 20:13:57 +0100 (Thu, 24 Mar 2011) | 15 lines
-
-
-Make the unit test work on systems without mmap(). This is required for,
-among others, Windows support. For Windows in specific, we could have used
-CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer
-to compiling, and is of course also relevant for embedded systems with no MMU.
-
-(Part 2/2)
-
-R=csilvers
-DELTA=15  (12 added, 3 deleted, 0 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1032
-
-------------------------------------------------------------------------
-r15 | snappy.mirrorbot@gmail.com | 2011-03-24 20:12:27 +0100 (Thu, 24 Mar 2011) | 15 lines
-
-
-Make the unit test work on systems without mmap(). This is required for,
-among others, Windows support. For Windows in specific, we could have used
-CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer
-to compiling, and is of course also relevant for embedded systems with no MMU.
-
-(Part 1/2)
-
-R=csilvers
-DELTA=9  (8 added, 0 deleted, 1 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1031
-
-------------------------------------------------------------------------
-r14 | snappy.mirrorbot@gmail.com | 2011-03-24 00:17:36 +0100 (Thu, 24 Mar 2011) | 14 lines
-
-
-Fix public issue #12: Don't keep autogenerated auto* files in Subversion;
-it causes problems with others sending patches etc..
-
-We can't get this 100% hermetic anyhow, due to files like lt~obsolete.m4,
-so we can just as well go cleanly in the other direction.
-
-R=csilvers
-DELTA=21038  (0 added, 21036 deleted, 2 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1012
-
-------------------------------------------------------------------------
-r13 | snappy.mirrorbot@gmail.com | 2011-03-23 18:50:49 +0100 (Wed, 23 Mar 2011) | 11 lines
-
-
-Fix public issue tracker bug #3: Call AC_SUBST([LIBTOOL_DEPS]), or the rule
-to rebuild libtool in Makefile.am won't work.
-
-R=csilvers
-DELTA=1  (1 added, 0 deleted, 0 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=997
-
-------------------------------------------------------------------------
-r12 | snappy.mirrorbot@gmail.com | 2011-03-23 12:16:39 +0100 (Wed, 23 Mar 2011) | 11 lines
-
-
-Fix public issue #10: Don't add GTEST_CPPFLAGS to snappy_unittest_CXXFLAGS;
-it's not needed (CPPFLAGS are always included when compiling).
-
-R=csilvers
-DELTA=1  (0 added, 1 deleted, 0 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=994
-
-------------------------------------------------------------------------
-r11 | snappy.mirrorbot@gmail.com | 2011-03-23 12:16:18 +0100 (Wed, 23 Mar 2011) | 11 lines
-
-
-Fix public issue #9: Add -Wall -Werror to automake flags.
-(This concerns automake itself, not the C++ compiler.)
-
-R=csilvers
-DELTA=4  (3 added, 0 deleted, 1 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=993
-
-------------------------------------------------------------------------
-r10 | snappy.mirrorbot@gmail.com | 2011-03-23 12:13:37 +0100 (Wed, 23 Mar 2011) | 10 lines
-
-
-Fix a typo in the Snappy README file.
-
-R=csilvers
-DELTA=1  (0 added, 0 deleted, 1 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=992
-
-------------------------------------------------------------------------
-r9 | snappy.mirrorbot@gmail.com | 2011-03-23 12:13:13 +0100 (Wed, 23 Mar 2011) | 11 lines
-
-
-Fix public issue #6: Add a --with-gflags for disabling gflags autodetection
-and using a manually given setting (use/don't use) instead.
-
-R=csilvers
-DELTA=16  (13 added, 0 deleted, 3 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=991
-
-------------------------------------------------------------------------
-r8 | snappy.mirrorbot@gmail.com | 2011-03-23 12:12:44 +0100 (Wed, 23 Mar 2011) | 12 lines
-
-
-Fix public issue #5: Replace the EXTRA_LIBSNAPPY_LDFLAGS setup with something
-slightly more standard, that also doesn't leak libtool command-line into
-configure.ac.
-
-R=csilvers
-DELTA=7  (0 added, 4 deleted, 3 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=990
-
-------------------------------------------------------------------------
-r7 | snappy.mirrorbot@gmail.com | 2011-03-23 12:12:22 +0100 (Wed, 23 Mar 2011) | 10 lines
-
-
-Fix public issue #4: Properly quote all macro arguments in configure.ac.
-
-R=csilvers
-DELTA=16  (0 added, 0 deleted, 16 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=989
-
-------------------------------------------------------------------------
-r6 | snappy.mirrorbot@gmail.com | 2011-03-23 12:11:54 +0100 (Wed, 23 Mar 2011) | 11 lines
-
-
-Fix public issue #7: Don't use internal variables named ac_*, as those belong
-to autoconf's namespace.
-
-R=csilvers
-DELTA=6  (0 added, 0 deleted, 6 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=988
-
-------------------------------------------------------------------------
-r5 | snappy.mirrorbot@gmail.com | 2011-03-23 12:11:09 +0100 (Wed, 23 Mar 2011) | 10 lines
-
-
-Add missing licensing headers to a few files. (Part 2/2.)
-
-R=csilvers
-DELTA=12  (12 added, 0 deleted, 0 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=987
-
-------------------------------------------------------------------------
-r4 | snappy.mirrorbot@gmail.com | 2011-03-23 12:10:39 +0100 (Wed, 23 Mar 2011) | 10 lines
-
-
-Add mising licensing headers to a few files. (Part 1/2.)
-
-R=csilvers
-DELTA=24  (24 added, 0 deleted, 0 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=986
-
-------------------------------------------------------------------------
-r3 | snappy.mirrorbot@gmail.com | 2011-03-23 12:10:04 +0100 (Wed, 23 Mar 2011) | 11 lines
-
-
-Use the correct license file for the Apache 2.0 license;
-spotted by Florian Weimer.
-
-R=csilvers
-DELTA=202  (174 added, 0 deleted, 28 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=985
-
-------------------------------------------------------------------------
-r2 | snappy.mirrorbot@gmail.com | 2011-03-18 18:14:15 +0100 (Fri, 18 Mar 2011) | 6 lines
-
-
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=
-
-------------------------------------------------------------------------
-r1 | sesse@google.com | 2011-03-18 18:13:52 +0100 (Fri, 18 Mar 2011) | 2 lines
-
-Create trunk directory.
-
-------------------------------------------------------------------------
deleted file mode 100644
--- a/other-licenses/snappy/src/NEWS
+++ /dev/null
@@ -1,58 +0,0 @@
-Snappy v1.0.4, September 15th 2011:
-
-  * Speeded up the decompressor somewhat; typically about 2–8%
-    for Core i7, in 64-bit mode (comparable for Opteron).
-    Somewhat more for some tests, almost no gain for others.
-  
-  * Make Snappy compile on certain platforms it didn't before
-    (Solaris with SunPro C++, HP-UX, AIX).
-
-  * Correct some minor errors in the format description.
-
-
-Snappy v1.0.3, June 2nd 2011:
-
-  * Speeded up the decompressor somewhat; about 3-6% for Core 2,
-    6-13% for Core i7, and 5-12% for Opteron (all in 64-bit mode).
-
-  * Added compressed format documentation. This text is new,
-    but an earlier version from Zeev Tarantov was used as reference.
-
-  * Only link snappy_unittest against -lz and other autodetected
-    libraries, not libsnappy.so (which doesn't need any such dependency).
-
-  * Fixed some display issues in the microbenchmarks, one of which would
-    frequently make the test crash on GNU/Hurd.
-
-
-Snappy v1.0.2, April 29th 2011:
-
-  * Relicense to a BSD-type license.
-
-  * Added C bindings, contributed by Martin Gieseking.
-
-  * More Win32 fixes, in particular for MSVC.
-
-  * Replace geo.protodata with a newer version.
-
-  * Fix timing inaccuracies in the unit test when comparing Snappy
-    to other algorithms.
-
-
-Snappy v1.0.1, March 25th 2011:
-
-This is a maintenance release, mostly containing minor fixes.
-There is no new functionality. The most important fixes include:
-
-  * The COPYING file and all licensing headers now correctly state that
-    Snappy is licensed under the Apache 2.0 license.
-
-  * snappy_unittest should now compile natively under Windows,
-    as well as on embedded systems with no mmap().
-
-  * Various autotools nits have been fixed.
-
-
-Snappy v1.0, March 17th 2011:
-
-  * Initial version.
deleted file mode 100644
--- a/other-licenses/snappy/src/README
+++ /dev/null
@@ -1,135 +0,0 @@
-Snappy, a fast compressor/decompressor.
-
-
-Introduction
-============
-
-Snappy is a compression/decompression library. It does not aim for maximum
-compression, or compatibility with any other compression library; instead,
-it aims for very high speeds and reasonable compression. For instance,
-compared to the fastest mode of zlib, Snappy is an order of magnitude faster
-for most inputs, but the resulting compressed files are anywhere from 20% to
-100% bigger. (For more information, see "Performance", below.)
-
-Snappy has the following properties:
-
- * Fast: Compression speeds at 250 MB/sec and beyond, with no assembler code.
-   See "Performance" below.
- * Stable: Over the last few years, Snappy has compressed and decompressed
-   petabytes of data in Google's production environment. The Snappy bitstream
-   format is stable and will not change between versions.
- * Robust: The Snappy decompressor is designed not to crash in the face of
-   corrupted or malicious input.
- * Free and open source software: Snappy is licensed under a BSD-type license.
-   For more information, see the included COPYING file.
-
-Snappy has previously been called "Zippy" in some Google presentations
-and the like.
-
-
-Performance
-===========
- 
-Snappy is intended to be fast. On a single core of a Core i7 processor
-in 64-bit mode, it compresses at about 250 MB/sec or more and decompresses at
-about 500 MB/sec or more. (These numbers are for the slowest inputs in our
-benchmark suite; others are much faster.) In our tests, Snappy usually
-is faster than algorithms in the same class (e.g. LZO, LZF, FastLZ, QuickLZ,
-etc.) while achieving comparable compression ratios.
-
-Typical compression ratios (based on the benchmark suite) are about 1.5-1.7x
-for plain text, about 2-4x for HTML, and of course 1.0x for JPEGs, PNGs and
-other already-compressed data. Similar numbers for zlib in its fastest mode
-are 2.6-2.8x, 3-7x and 1.0x, respectively. More sophisticated algorithms are
-capable of achieving yet higher compression rates, although usually at the
-expense of speed. Of course, compression ratio will vary significantly with
-the input.
-
-Although Snappy should be fairly portable, it is primarily optimized
-for 64-bit x86-compatible processors, and may run slower in other environments.
-In particular:
-
- - Snappy uses 64-bit operations in several places to process more data at
-   once than would otherwise be possible.
- - Snappy assumes unaligned 32- and 64-bit loads and stores are cheap.
-   On some platforms, these must be emulated with single-byte loads 
-   and stores, which is much slower.
- - Snappy assumes little-endian throughout, and needs to byte-swap data in
-   several places if running on a big-endian platform.
-
-Experience has shown that even heavily tuned code can be improved.
-Performance optimizations, whether for 64-bit x86 or other platforms,
-are of course most welcome; see "Contact", below.
-
-
-Usage
-=====
-
-Note that Snappy, both the implementation and the main interface,
-is written in C++. However, several third-party bindings to other languages
-are available; see the Google Code page at http://code.google.com/p/snappy/
-for more information. Also, if you want to use Snappy from C code, you can
-use the included C bindings in snappy-c.h.
-
-To use Snappy from your own C++ program, include the file "snappy.h" from
-your calling file, and link against the compiled library.
-
-There are many ways to call Snappy, but the simplest possible is
-
-  snappy::Compress(input.data(), input.size(), &output);
-
-and similarly
-
-  snappy::Uncompress(input.data(), input.size(), &output);
-
-where "input" and "output" are both instances of std::string.
-
-There are other interfaces that are more flexible in various ways, including
-support for custom (non-array) input sources. See the header file for more
-information.
-
-
-Tests and benchmarks
-====================
-
-When you compile Snappy, snappy_unittest is compiled in addition to the
-library itself. You do not need it to use the compressor from your own library,
-but it contains several useful components for Snappy development.
-
-First of all, it contains unit tests, verifying correctness on your machine in
-various scenarios. If you want to change or optimize Snappy, please run the
-tests to verify you have not broken anything. Note that if you have the
-Google Test library installed, unit test behavior (especially failures) will be
-significantly more user-friendly. You can find Google Test at
-
-  http://code.google.com/p/googletest/
-
-You probably also want the gflags library for handling of command-line flags;
-you can find it at
-
-  http://code.google.com/p/google-gflags/
-
-In addition to the unit tests, snappy contains microbenchmarks used to
-tune compression and decompression performance. These are automatically run
-before the unit tests, but you can disable them using the flag
---run_microbenchmarks=false if you have gflags installed (otherwise you will
-need to edit the source).
-
-Finally, snappy can benchmark Snappy against a few other compression libraries
-(zlib, LZO, LZF, FastLZ and QuickLZ), if they were detected at configure time.
-To benchmark using a given file, give the compression algorithm you want to test
-Snappy against (e.g. --zlib) and then a list of one or more file names on the
-command line. The testdata/ directory contains the files used by the
-microbenchmark, which should provide a reasonably balanced starting point for
-benchmarking. (Note that baddata[1-3].snappy are not intended as benchmarks; they
-are used to verify correctness in the presence of corrupted data in the unit
-test.)
-
-
-Contact
-=======
-
-Snappy is distributed through Google Code. For the latest version, a bug tracker,
-and other information, see
-
-  http://code.google.com/p/snappy/
deleted file mode 100644
--- a/other-licenses/snappy/src/format_description.txt
+++ /dev/null
@@ -1,110 +0,0 @@
-Snappy compressed format description
-Last revised: 2011-10-05
-
-
-This is not a formal specification, but should suffice to explain most
-relevant parts of how the Snappy format works. It is originally based on
-text by Zeev Tarantov.
-
-Snappy is a LZ77-type compressor with a fixed, byte-oriented encoding.
-There is no entropy encoder backend nor framing layer -- the latter is
-assumed to be handled by other parts of the system.
-
-This document only describes the format, not how the Snappy compressor nor
-decompressor actually works. The correctness of the decompressor should not
-depend on implementation details of the compressor, and vice versa.
-
-
-1. Preamble
-
-The stream starts with the uncompressed length (up to a maximum of 2^32 - 1),
-stored as a little-endian varint. Varints consist of a series of bytes,
-where the lower 7 bits are data and the upper bit is set iff there are
-more bytes to be read. In other words, an uncompressed length of 64 would
-be stored as 0x40, and an uncompressed length of 2097150 (0x1FFFFE)
-would be stored as 0xFE 0xFF 0x7F.
-
-
-2. The compressed stream itself
-
-There are two types of elements in a Snappy stream: Literals and
-copies (backreferences). There is no restriction on the order of elements,
-except that the stream naturally cannot start with a copy. (Having
-two literals in a row is never optimal from a compression point of
-view, but nevertheless fully permitted.) Each element starts with a tag byte,
-and the lower two bits of this tag byte signal what type of element will
-follow:
-
-  00: Literal
-  01: Copy with 1-byte offset
-  10: Copy with 2-byte offset
-  11: Copy with 4-byte offset
-
-The interpretation of the upper six bits are element-dependent.
-
-
-2.1. Literals (00)
-
-Literals are uncompressed data stored directly in the byte stream.
-The literal length is stored differently depending on the length
-of the literal:
-
- - For literals up to and including 60 bytes in length, the upper
-   six bits of the tag byte contain (len-1). The literal follows
-   immediately thereafter in the bytestream.
- - For longer literals, the (len-1) value is stored after the tag byte,
-   little-endian. The upper six bits of the tag byte describe how
-   many bytes are used for the length; 60, 61, 62 or 63 for
-   1-4 bytes, respectively. The literal itself follows after the
-   length.
-
-
-2.2. Copies
-
-Copies are references back into previous decompressed data, telling
-the decompressor to reuse data it has previously decoded.
-They encode two values: The _offset_, saying how many bytes back
-from the current position to read, and the _length_, how many bytes
-to copy. Offsets of zero can be encoded, but are not legal;
-similarly, it is possible to encode backreferences that would
-go past the end of the block (offset > current decompressed position),
-which is also nonsensical and thus not allowed.
-
-As in most LZ77-based compressors, the length can be larger than the offset,
-yielding a form of run-length encoding (RLE). For instance,
-"xababab" could be encoded as
-
-  <literal: "xab"> <copy: offset=2 length=4>
-
-Note that since the current Snappy compressor works in 32 kB
-blocks and does not do matching across blocks, it will never produce
-a bitstream with offsets larger than about 32768. However, the
-decompressor should not rely on this, as it may change in the future.
-
-There are several different kinds of copy elements, depending on
-the amount of bytes to be copied (length), and how far back the
-data to be copied is (offset).
-
-
-2.2.1. Copy with 1-byte offset (01)
-
-These elements can encode lengths between [4..11] bytes and offsets
-between [0..2047] bytes. (len-4) occupies three bits and is stored
-in bits [2..4] of the tag byte. The offset occupies 11 bits, of which the
-upper three are stored in the upper three bits ([5..7]) of the tag byte,
-and the lower eight are stored in a byte following the tag byte.
-
-
-2.2.2. Copy with 2-byte offset (10)
-
-These elements can encode lengths between [1..64] and offsets from
-[0..65535]. (len-1) occupies six bits and is stored in the upper
-six bits ([2..7]) of the tag byte. The offset is stored as a
-little-endian 16-bit integer in the two bytes following the tag byte.
-
-
-2.2.3. Copy with 4-byte offset (11)
-
-These are like the copies with 2-byte offsets (see previous subsection),
-except that the offset is stored as a 32-bit integer instead of a
-16-bit integer (and thus will occupy four bytes).
deleted file mode 100644
--- a/other-licenses/snappy/src/snappy-c.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2011 Martin Gieseking <martin.gieseking@uos.de>.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "snappy.h"
-#include "snappy-c.h"
-
-extern "C" {
-
-snappy_status snappy_compress(const char* input,
-                              size_t input_length,
-                              char* compressed,
-                              size_t *compressed_length) {
-  if (*compressed_length < snappy_max_compressed_length(input_length)) {
-    return SNAPPY_BUFFER_TOO_SMALL;
-  }
-  snappy::RawCompress(input, input_length, compressed, compressed_length);
-  return SNAPPY_OK;
-}
-
-snappy_status snappy_uncompress(const char* compressed,
-                                size_t compressed_length,
-                                char* uncompressed,
-                                size_t* uncompressed_length) {
-  size_t real_uncompressed_length;
-  if (!snappy::GetUncompressedLength(compressed,
-                                     compressed_length,
-                                     &real_uncompressed_length)) {
-    return SNAPPY_INVALID_INPUT;
-  }
-  if (*uncompressed_length < real_uncompressed_length) {
-    return SNAPPY_BUFFER_TOO_SMALL;
-  }
-  if (!snappy::RawUncompress(compressed, compressed_length, uncompressed)) {
-    return SNAPPY_INVALID_INPUT;
-  }
-  *uncompressed_length = real_uncompressed_length;
-  return SNAPPY_OK;
-}
-
-size_t snappy_max_compressed_length(size_t source_length) {
-  return snappy::MaxCompressedLength(source_length);
-}
-
-snappy_status snappy_uncompressed_length(const char *compressed,
-                                         size_t compressed_length,
-                                         size_t *result) {
-  if (snappy::GetUncompressedLength(compressed,
-                                    compressed_length,
-                                    result)) {
-    return SNAPPY_OK;
-  } else {
-    return SNAPPY_INVALID_INPUT;
-  }
-}
-
-snappy_status snappy_validate_compressed_buffer(const char *compressed,
-                                                size_t compressed_length) {
-  if (snappy::IsValidCompressedBuffer(compressed, compressed_length)) {
-    return SNAPPY_OK;
-  } else {
-    return SNAPPY_INVALID_INPUT;
-  }
-}
-
-}  // extern "C"
deleted file mode 100644
--- a/other-licenses/snappy/src/snappy-c.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright 2011 Martin Gieseking <martin.gieseking@uos.de>.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Plain C interface (a wrapper around the C++ implementation).
- */
-
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_C_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_C_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stddef.h>
-
-/*
- * Return values; see the documentation for each function to know
- * what each can return.
- */
-typedef enum {
-  SNAPPY_OK = 0,
-  SNAPPY_INVALID_INPUT = 1,
-  SNAPPY_BUFFER_TOO_SMALL = 2,
-} snappy_status;
-
-/*
- * Takes the data stored in "input[0..input_length-1]" and stores
- * it in the array pointed to by "compressed".
- *
- * <compressed_length> signals the space available in "compressed".
- * If it is not at least equal to "snappy_max_compressed_length(input_length)",
- * SNAPPY_BUFFER_TOO_SMALL is returned. After successful compression,
- * <compressed_length> contains the true length of the compressed output,
- * and SNAPPY_OK is returned.
- *
- * Example:
- *   size_t output_length = snappy_max_compressed_length(input_length);
- *   char* output = (char*)malloc(output_length);
- *   if (snappy_compress(input, input_length, output, &output_length)
- *       == SNAPPY_OK) {
- *     ... Process(output, output_length) ...
- *   }
- *   free(output);
- */
-snappy_status snappy_compress(const char* input,
-                              size_t input_length,
-                              char* compressed,
-                              size_t* compressed_length);
-
-/*
- * Given data in "compressed[0..compressed_length-1]" generated by
- * calling the snappy_compress routine, this routine stores
- * the uncompressed data to
- *   uncompressed[0..uncompressed_length-1].
- * Returns failure (a value not equal to SNAPPY_OK) if the message
- * is corrupted and could not be decrypted.
- *
- * <uncompressed_length> signals the space available in "uncompressed".
- * If it is not at least equal to the value returned by
- * snappy_uncompressed_length for this stream, SNAPPY_BUFFER_TOO_SMALL
- * is returned. After successful decompression, <uncompressed_length>
- * contains the true length of the decompressed output.
- *
- * Example:
- *   size_t output_length;
- *   if (snappy_uncompressed_length(input, input_length, &output_length)
- *       != SNAPPY_OK) {
- *     ... fail ...
- *   }
- *   char* output = (char*)malloc(output_length);
- *   if (snappy_uncompress(input, input_length, output, &output_length)
- *       == SNAPPY_OK) {
- *     ... Process(output, output_length) ...
- *   }
- *   free(output);
- */
-snappy_status snappy_uncompress(const char* compressed,
-                                size_t compressed_length,
-                                char* uncompressed,
-                                size_t* uncompressed_length);
-
-/*
- * Returns the maximal size of the compressed representation of
- * input data that is "source_length" bytes in length.
- */
-size_t snappy_max_compressed_length(size_t source_length);
-
-/*
- * REQUIRES: "compressed[]" was produced by snappy_compress()
- * Returns SNAPPY_OK and stores the length of the uncompressed data in
- * *result normally. Returns SNAPPY_INVALID_INPUT on parsing error.
- * This operation takes O(1) time.
- */
-snappy_status snappy_uncompressed_length(const char* compressed,
-                                         size_t compressed_length,
-                                         size_t* result);
-
-/*
- * Check if the contents of "compressed[]" can be uncompressed successfully.
- * Does not return the uncompressed data; if so, returns SNAPPY_OK,
- * or if not, returns SNAPPY_INVALID_INPUT.
- * Takes time proportional to compressed_length, but is usually at least a
- * factor of four faster than actual decompression.
- */
-snappy_status snappy_validate_compressed_buffer(const char* compressed,
-                                                size_t compressed_length);
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  /* UTIL_SNAPPY_OPENSOURCE_SNAPPY_C_H_ */
deleted file mode 100644
--- a/other-licenses/snappy/src/snappy-internal.h
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2008 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Internals shared between the Snappy implementation and its unittest.
-
-#ifndef UTIL_SNAPPY_SNAPPY_INTERNAL_H_
-#define UTIL_SNAPPY_SNAPPY_INTERNAL_H_
-
-#include "snappy-stubs-internal.h"
-
-namespace snappy {
-namespace internal {
-
-class WorkingMemory {
- public:
-  WorkingMemory() : large_table_(NULL) { }
-  ~WorkingMemory() { delete[] large_table_; }
-
-  // Allocates and clears a hash table using memory in "*this",
-  // stores the number of buckets in "*table_size" and returns a pointer to
-  // the base of the hash table.
-  uint16* GetHashTable(size_t input_size, int* table_size);
-
- private:
-  uint16 small_table_[1<<10];    // 2KB
-  uint16* large_table_;          // Allocated only when needed
-
-  DISALLOW_COPY_AND_ASSIGN(WorkingMemory);
-};
-
-// Flat array compression that does not emit the "uncompressed length"
-// prefix. Compresses "input" string to the "*op" buffer.
-//
-// REQUIRES: "input_length <= kBlockSize"
-// REQUIRES: "op" points to an array of memory that is at least
-// "MaxCompressedLength(input_length)" in size.
-// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
-// REQUIRES: "table_size" is a power of two
-//
-// Returns an "end" pointer into "op" buffer.
-// "end - op" is the compressed size of "input".
-char* CompressFragment(const char* input,
-                       size_t input_length,
-                       char* op,
-                       uint16* table,
-                       const int table_size);
-
-// Return the largest n such that
-//
-//   s1[0,n-1] == s2[0,n-1]
-//   and n <= (s2_limit - s2).
-//
-// Does not read *s2_limit or beyond.
-// Does not read *(s1 + (s2_limit - s2)) or beyond.
-// Requires that s2_limit >= s2.
-//
-// Separate implementation for x86_64, for speed.  Uses the fact that
-// x86_64 is little endian.
-#if defined(ARCH_K8)
-static inline int FindMatchLength(const char* s1,
-                                  const char* s2,
-                                  const char* s2_limit) {
-  DCHECK_GE(s2_limit, s2);
-  int matched = 0;
-
-  // Find out how long the match is. We loop over the data 64 bits at a
-  // time until we find a 64-bit block that doesn't match; then we find
-  // the first non-matching bit and use that to calculate the total
-  // length of the match.
-  while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
-    if (PREDICT_FALSE(UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched))) {
-      s2 += 8;
-      matched += 8;
-    } else {
-      // On current (mid-2008) Opteron models there is a 3% more
-      // efficient code sequence to find the first non-matching byte.
-      // However, what follows is ~10% better on Intel Core 2 and newer,
-      // and we expect AMD's bsf instruction to improve.
-      uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
-      int matching_bits = Bits::FindLSBSetNonZero64(x);
-      matched += matching_bits >> 3;
-      return matched;
-    }
-  }
-  while (PREDICT_TRUE(s2 < s2_limit)) {
-    if (PREDICT_TRUE(s1[matched] == *s2)) {
-      ++s2;
-      ++matched;
-    } else {
-      return matched;
-    }
-  }
-  return matched;
-}
-#else
-static inline int FindMatchLength(const char* s1,
-                                  const char* s2,
-                                  const char* s2_limit) {
-  // Implementation based on the x86-64 version, above.
-  DCHECK_GE(s2_limit, s2);
-  int matched = 0;
-
-  while (s2 <= s2_limit - 4 &&
-         UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
-    s2 += 4;
-    matched += 4;
-  }
-  if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
-    uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
-    int matching_bits = Bits::FindLSBSetNonZero(x);
-    matched += matching_bits >> 3;
-  } else {
-    while ((s2 < s2_limit) && (s1[matched] == *s2)) {
-      ++s2;
-      ++matched;
-    }
-  }
-  return matched;
-}
-#endif
-
-}  // end namespace internal
-}  // end namespace snappy
-
-#endif  // UTIL_SNAPPY_SNAPPY_INTERNAL_H_
deleted file mode 100644
--- a/other-licenses/snappy/src/snappy-sinksource.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <string.h>
-
-#include "snappy-sinksource.h"
-
-namespace snappy {
-
-Source::~Source() { }
-
-Sink::~Sink() { }
-
-char* Sink::GetAppendBuffer(size_t length, char* scratch) {
-  return scratch;
-}
-
-ByteArraySource::~ByteArraySource() { }
-
-size_t ByteArraySource::Available() const { return left_; }
-
-const char* ByteArraySource::Peek(size_t* len) {
-  *len = left_;
-  return ptr_;
-}
-
-void ByteArraySource::Skip(size_t n) {
-  left_ -= n;
-  ptr_ += n;
-}
-
-UncheckedByteArraySink::~UncheckedByteArraySink() { }
-
-void UncheckedByteArraySink::Append(const char* data, size_t n) {
-  // Do no copying if the caller filled in the result of GetAppendBuffer()
-  if (data != dest_) {
-    memcpy(dest_, data, n);
-  }
-  dest_ += n;
-}
-
-char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
-  return dest_;
-}
-
-
-}
deleted file mode 100644
--- a/other-licenses/snappy/src/snappy-sinksource.h
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
-#define UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
-
-#include <stddef.h>
-
-
-namespace snappy {
-
-// A Sink is an interface that consumes a sequence of bytes.
-class Sink {
- public:
-  Sink() { }
-  virtual ~Sink();
-
-  // Append "bytes[0,n-1]" to this.
-  virtual void Append(const char* bytes, size_t n) = 0;
-
-  // Returns a writable buffer of the specified length for appending.
-  // May return a pointer to the caller-owned scratch buffer which
-  // must have at least the indicated length.  The returned buffer is
-  // only valid until the next operation on this Sink.
-  //
-  // After writing at most "length" bytes, call Append() with the
-  // pointer returned from this function and the number of bytes
-  // written.  Many Append() implementations will avoid copying
-  // bytes if this function returned an internal buffer.
-  //
-  // If a non-scratch buffer is returned, the caller may only pass a
-  // prefix of it to Append().  That is, it is not correct to pass an
-  // interior pointer of the returned array to Append().
-  //
-  // The default implementation always returns the scratch buffer.
-  virtual char* GetAppendBuffer(size_t length, char* scratch);
-
- private:
-  // No copying
-  Sink(const Sink&);
-  void operator=(const Sink&);
-};
-
-// A Source is an interface that yields a sequence of bytes
-class Source {
- public:
-  Source() { }
-  virtual ~Source();
-
-  // Return the number of bytes left to read from the source
-  virtual size_t Available() const = 0;
-
-  // Peek at the next flat region of the source.  Does not reposition
-  // the source.  The returned region is empty iff Available()==0.
-  //
-  // Returns a pointer to the beginning of the region and store its
-  // length in *len.
-  //
-  // The returned region is valid until the next call to Skip() or
-  // until this object is destroyed, whichever occurs first.
-  //
-  // The returned region may be larger than Available() (for example
-  // if this ByteSource is a view on a substring of a larger source).
-  // The caller is responsible for ensuring that it only reads the
-  // Available() bytes.
-  virtual const char* Peek(size_t* len) = 0;
-
-  // Skip the next n bytes.  Invalidates any buffer returned by
-  // a previous call to Peek().
-  // REQUIRES: Available() >= n
-  virtual void Skip(size_t n) = 0;
-
- private:
-  // No copying
-  Source(const Source&);
-  void operator=(const Source&);
-};
-
-// A Source implementation that yields the contents of a flat array
-class ByteArraySource : public Source {
- public:
-  ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { }
-  virtual ~ByteArraySource();
-  virtual size_t Available() const;
-  virtual const char* Peek(size_t* len);
-  virtual void Skip(size_t n);
- private:
-  const char* ptr_;
-  size_t left_;
-};
-
-// A Sink implementation that writes to a flat array without any bound checks.
-class UncheckedByteArraySink : public Sink {
- public:
-  explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
-  virtual ~UncheckedByteArraySink();
-  virtual void Append(const char* data, size_t n);
-  virtual char* GetAppendBuffer(size_t len, char* scratch);
-
-  // Return the current output pointer so that a caller can see how
-  // many bytes were produced.
-  // Note: this is not a Sink method.
-  char* CurrentDestination() const { return dest_; }
- private:
-  char* dest_;
-};
-
-
-}
-
-#endif  // UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
deleted file mode 100644
--- a/other-licenses/snappy/src/snappy-stubs-internal.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <algorithm>
-#include <string>
-
-#include "snappy-stubs-internal.h"
-
-namespace snappy {
-
-void Varint::Append32(string* s, uint32 value) {
-  char buf[Varint::kMax32];
-  const char* p = Varint::Encode32(buf, value);
-  s->append(buf, p - buf);
-}
-
-}  // namespace snappy
deleted file mode 100644
--- a/other-licenses/snappy/src/snappy-stubs-internal.h
+++ /dev/null
@@ -1,515 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Various stubs for the open-source version of Snappy.
-
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include <iostream>
-#include <string>
-
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-
-#ifdef HAVE_SYS_MMAN_H
-#include <sys/mman.h>
-#endif
-
-#include "snappy-stubs-public.h"
-
-#if defined(__x86_64__)
-
-// Enable 64-bit optimized versions of some routines.
-#define ARCH_K8 1
-
-#endif
-
-// Needed by OS X, among others.
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-// Pull in std::min, std::ostream, and the likes. This is safe because this
-// header file is never used from any public header files.
-using namespace std;
-
-// The size of an array, if known at compile-time.
-// Will give unexpected results if used on a pointer.
-// We undefine it first, since some compilers already have a definition.
-#ifdef ARRAYSIZE
-#undef ARRAYSIZE
-#endif
-#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
-
-// Static prediction hints.
-#ifdef HAVE_BUILTIN_EXPECT
-#define PREDICT_FALSE(x) (__builtin_expect(x, 0))
-#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
-#else
-#define PREDICT_FALSE(x) x
-#define PREDICT_TRUE(x) x
-#endif
-
-// This is only used for recomputing the tag byte table used during
-// decompression; for simplicity we just remove it from the open-source
-// version (anyone who wants to regenerate it can just do the call
-// themselves within main()).
-#define DEFINE_bool(flag_name, default_value, description) \
-  bool FLAGS_ ## flag_name = default_value;
-#define DECLARE_bool(flag_name) \
-  extern bool FLAGS_ ## flag_name;
-#define REGISTER_MODULE_INITIALIZER(name, code)
-
-namespace snappy {
-
-static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
-static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
-
-// Logging.
-
-#define LOG(level) LogMessage()
-#define VLOG(level) true ? (void)0 : \
-    snappy::LogMessageVoidify() & snappy::LogMessage()
-
-class LogMessage {
- public:
-  LogMessage() { }
-  ~LogMessage() {
-    cerr << endl;
-  }
-
-  LogMessage& operator<<(const std::string& msg) {
-    cerr << msg;
-    return *this;
-  }
-  LogMessage& operator<<(int x) {
-    cerr << x;
-    return *this;
-  }
-};
-
-// Asserts, both versions activated in debug mode only,
-// and ones that are always active.
-
-#define CRASH_UNLESS(condition) \
-    PREDICT_TRUE(condition) ? (void)0 : \
-    snappy::LogMessageVoidify() & snappy::LogMessageCrash()
-
-class LogMessageCrash : public LogMessage {
- public:
-  LogMessageCrash() { }
-  ~LogMessageCrash() {
-    cerr << endl;
-    abort();
-  }
-};
-
-// This class is used to explicitly ignore values in the conditional
-// logging macros.  This avoids compiler warnings like "value computed
-// is not used" and "statement has no effect".
-
-class LogMessageVoidify {
- public:
-  LogMessageVoidify() { }
-  // This has to be an operator with a precedence lower than << but
-  // higher than ?:
-  void operator&(const LogMessage&) { }
-};
-
-#define CHECK(cond) CRASH_UNLESS(cond)
-#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
-#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
-#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
-#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
-#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
-#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
-
-#ifdef NDEBUG
-
-#define DCHECK(cond) CRASH_UNLESS(true)
-#define DCHECK_LE(a, b) CRASH_UNLESS(true)
-#define DCHECK_GE(a, b) CRASH_UNLESS(true)
-#define DCHECK_EQ(a, b) CRASH_UNLESS(true)
-#define DCHECK_NE(a, b) CRASH_UNLESS(true)
-#define DCHECK_LT(a, b) CRASH_UNLESS(true)
-#define DCHECK_GT(a, b) CRASH_UNLESS(true)
-
-#else
-
-#define DCHECK(cond) CHECK(cond)
-#define DCHECK_LE(a, b) CHECK_LE(a, b)
-#define DCHECK_GE(a, b) CHECK_GE(a, b)
-#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
-#define DCHECK_NE(a, b) CHECK_NE(a, b)
-#define DCHECK_LT(a, b) CHECK_LT(a, b)
-#define DCHECK_GT(a, b) CHECK_GT(a, b)
-
-#endif
-
-// Potentially unaligned loads and stores.
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
-
-#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
-#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
-#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
-
-#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
-#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
-#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
-
-#else
-
-// These functions are provided for architectures that don't support
-// unaligned loads and stores.
-
-inline uint16 UNALIGNED_LOAD16(const void *p) {
-  uint16 t;
-  memcpy(&t, p, sizeof t);
-  return t;
-}
-
-inline uint32 UNALIGNED_LOAD32(const void *p) {
-  uint32 t;
-  memcpy(&t, p, sizeof t);
-  return t;
-}
-
-inline uint64 UNALIGNED_LOAD64(const void *p) {
-  uint64 t;
-  memcpy(&t, p, sizeof t);
-  return t;
-}
-
-inline void UNALIGNED_STORE16(void *p, uint16 v) {
-  memcpy(p, &v, sizeof v);
-}
-
-inline void UNALIGNED_STORE32(void *p, uint32 v) {
-  memcpy(p, &v, sizeof v);
-}
-
-inline void UNALIGNED_STORE64(void *p, uint64 v) {
-  memcpy(p, &v, sizeof v);
-}
-
-#endif
-
-// The following guarantees declaration of the byte swap functions.
-#ifdef WORDS_BIGENDIAN
-
-#ifdef HAVE_SYS_BYTEORDER_H
-#include <sys/byteorder.h>
-#endif
-
-#ifdef HAVE_SYS_ENDIAN_H
-#include <sys/endian.h>
-#endif
-
-#ifdef _MSC_VER
-#include <stdlib.h>
-#define bswap_16(x) _byteswap_ushort(x)
-#define bswap_32(x) _byteswap_ulong(x)
-#define bswap_64(x) _byteswap_uint64(x)
-
-#elif defined(__APPLE__)
-// Mac OS X / Darwin features
-#include <libkern/OSByteOrder.h>
-#define bswap_16(x) OSSwapInt16(x)
-#define bswap_32(x) OSSwapInt32(x)
-#define bswap_64(x) OSSwapInt64(x)
-
-#elif defined(HAVE_BYTESWAP_H)
-#include <byteswap.h>
-
-#elif defined(bswap32)
-// FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
-#define bswap_16(x) bswap16(x)
-#define bswap_32(x) bswap32(x)
-#define bswap_64(x) bswap64(x)
-
-#elif defined(BSWAP_64)
-// Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
-#define bswap_16(x) BSWAP_16(x)
-#define bswap_32(x) BSWAP_32(x)
-#define bswap_64(x) BSWAP_64(x)
-
-#else
-
-inline uint16 bswap_16(uint16 x) {
-  return (x << 8) | (x >> 8);
-}
-
-inline uint32 bswap_32(uint32 x) {
-  x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
-  return (x >> 16) | (x << 16);
-}
-
-inline uint64 bswap_64(uint64 x) {
-  x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
-  x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
-  return (x >> 32) | (x << 32);
-}
-
-#endif
-
-#endif  // WORDS_BIGENDIAN
-
-// Convert to little-endian storage, opposite of network format.
-// Convert x from host to little endian: x = LittleEndian.FromHost(x);
-// convert x from little endian to host: x = LittleEndian.ToHost(x);
-//
-//  Store values into unaligned memory converting to little endian order:
-//    LittleEndian.Store16(p, x);
-//
-//  Load unaligned values stored in little endian converting to host order:
-//    x = LittleEndian.Load16(p);
-class LittleEndian {
- public:
-  // Conversion functions.
-#ifdef WORDS_BIGENDIAN
-
-  static uint16 FromHost16(uint16 x) { return bswap_16(x); }
-  static uint16 ToHost16(uint16 x) { return bswap_16(x); }
-
-  static uint32 FromHost32(uint32 x) { return bswap_32(x); }
-  static uint32 ToHost32(uint32 x) { return bswap_32(x); }
-
-  static bool IsLittleEndian() { return false; }
-
-#else  // !defined(WORDS_BIGENDIAN)
-
-  static uint16 FromHost16(uint16 x) { return x; }
-  static uint16 ToHost16(uint16 x) { return x; }
-
-  static uint32 FromHost32(uint32 x) { return x; }
-  static uint32 ToHost32(uint32 x) { return x; }
-
-  static bool IsLittleEndian() { return true; }
-
-#endif  // !defined(WORDS_BIGENDIAN)
-
-  // Functions to do unaligned loads and stores in little-endian order.
-  static uint16 Load16(const void *p) {
-    return ToHost16(UNALIGNED_LOAD16(p));
-  }
-
-  static void Store16(void *p, uint16 v) {
-    UNALIGNED_STORE16(p, FromHost16(v));
-  }
-
-  static uint32 Load32(const void *p) {
-    return ToHost32(UNALIGNED_LOAD32(p));
-  }
-
-  static void Store32(void *p, uint32 v) {
-    UNALIGNED_STORE32(p, FromHost32(v));
-  }
-};
-
-// Some bit-manipulation functions.
-class Bits {
- public:
-  // Return floor(log2(n)) for positive integer n.  Returns -1 iff n == 0.
-  static int Log2Floor(uint32 n);
-
-  // Return the first set least / most significant bit, 0-indexed.  Returns an
-  // undefined value if n == 0.  FindLSBSetNonZero() is similar to ffs() except
-  // that it's 0-indexed.
-  static int FindLSBSetNonZero(uint32 n);
-  static int FindLSBSetNonZero64(uint64 n);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(Bits);
-};
-
-#ifdef HAVE_BUILTIN_CTZ
-
-inline int Bits::Log2Floor(uint32 n) {
-  return n == 0 ? -1 : 31 ^ __builtin_clz(n);
-}
-
-inline int Bits::FindLSBSetNonZero(uint32 n) {
-  return __builtin_ctz(n);
-}
-
-inline int Bits::FindLSBSetNonZero64(uint64 n) {
-  return __builtin_ctzll(n);
-}
-
-#else  // Portable versions.
-
-inline int Bits::Log2Floor(uint32 n) {
-  if (n == 0)
-    return -1;
-  int log = 0;
-  uint32 value = n;
-  for (int i = 4; i >= 0; --i) {
-    int shift = (1 << i);
-    uint32 x = value >> shift;
-    if (x != 0) {
-      value = x;
-      log += shift;
-    }
-  }
-  assert(value == 1);
-  return log;
-}
-
-inline int Bits::FindLSBSetNonZero(uint32 n) {
-  int rc = 31;
-  for (int i = 4, shift = 1 << 4; i >= 0; --i) {
-    const uint32 x = n << shift;
-    if (x != 0) {
-      n = x;
-      rc -= shift;
-    }
-    shift >>= 1;
-  }
-  return rc;
-}
-
-// FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
-inline int Bits::FindLSBSetNonZero64(uint64 n) {
-  const uint32 bottombits = static_cast<uint32>(n);
-  if (bottombits == 0) {
-    // Bottom bits are zero, so scan in top bits
-    return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
-  } else {
-    return FindLSBSetNonZero(bottombits);
-  }
-}
-
-#endif  // End portable versions.
-
-// Variable-length integer encoding.
-class Varint {
- public:
-  // Maximum lengths of varint encoding of uint32.
-  static const int kMax32 = 5;
-
-  // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
-  // Never reads a character at or beyond limit.  If a valid/terminated varint32
-  // was found in the range, stores it in *OUTPUT and returns a pointer just
-  // past the last byte of the varint32. Else returns NULL.  On success,
-  // "result <= limit".
-  static const char* Parse32WithLimit(const char* ptr, const char* limit,
-                                      uint32* OUTPUT);
-
-  // REQUIRES   "ptr" points to a buffer of length sufficient to hold "v".
-  // EFFECTS    Encodes "v" into "ptr" and returns a pointer to the
-  //            byte just past the last encoded byte.
-  static char* Encode32(char* ptr, uint32 v);
-
-  // EFFECTS    Appends the varint representation of "value" to "*s".
-  static void Append32(string* s, uint32 value);
-};
-
-inline const char* Varint::Parse32WithLimit(const char* p,
-                                            const char* l,
-                                            uint32* OUTPUT) {
-  const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
-  const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
-  uint32 b, result;
-  if (ptr >= limit) return NULL;
-  b = *(ptr++); result = b & 127;          if (b < 128) goto done;
-  if (ptr >= limit) return NULL;
-  b = *(ptr++); result |= (b & 127) <<  7; if (b < 128) goto done;
-  if (ptr >= limit) return NULL;
-  b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
-  if (ptr >= limit) return NULL;
-  b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
-  if (ptr >= limit) return NULL;
-  b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
-  return NULL;       // Value is too long to be a varint32
- done:
-  *OUTPUT = result;
-  return reinterpret_cast<const char*>(ptr);
-}
-
-inline char* Varint::Encode32(char* sptr, uint32 v) {
-  // Operate on characters as unsigneds
-  unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
-  static const int B = 128;
-  if (v < (1<<7)) {
-    *(ptr++) = v;
-  } else if (v < (1<<14)) {
-    *(ptr++) = v | B;
-    *(ptr++) = v>>7;
-  } else if (v < (1<<21)) {
-    *(ptr++) = v | B;
-    *(ptr++) = (v>>7) | B;
-    *(ptr++) = v>>14;
-  } else if (v < (1<<28)) {
-    *(ptr++) = v | B;
-    *(ptr++) = (v>>7) | B;
-    *(ptr++) = (v>>14) | B;
-    *(ptr++) = v>>21;
-  } else {
-    *(ptr++) = v | B;
-    *(ptr++) = (v>>7) | B;
-    *(ptr++) = (v>>14) | B;
-    *(ptr++) = (v>>21) | B;
-    *(ptr++) = v>>28;
-  }
-  return reinterpret_cast<char*>(ptr);
-}
-
-// If you know the internal layout of the std::string in use, you can
-// replace this function with one that resizes the string without
-// filling the new space with zeros (if applicable) --
-// it will be non-portable but faster.
-inline void STLStringResizeUninitialized(string* s, size_t new_size) {
-  s->resize(new_size);
-}
-
-// Return a mutable char* pointing to a string's internal buffer,
-// which may not be null-terminated. Writing through this pointer will
-// modify the string.
-//
-// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
-// next call to a string method that invalidates iterators.
-//
-// As of 2006-04, there is no standard-blessed way of getting a
-// mutable reference to a string's internal buffer. However, issue 530
-// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
-// proposes this as the method. It will officially be part of the standard
-// for C++0x. This should already work on all current implementations.
-inline char* string_as_array(string* str) {
-  return str->empty() ? NULL : &*str->begin();
-}
-
-}  // namespace snappy
-
-#endif  // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
deleted file mode 100644
--- a/other-licenses/snappy/src/snappy-stubs-public.h.in
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-// Author: sesse@google.com (Steinar H. Gunderson)
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Various type stubs for the open-source version of Snappy.
-//
-// This file cannot include config.h, as it is included from snappy.h,
-// which is a public header. Instead, snappy-stubs-public.h is generated by
-// from snappy-stubs-public.h.in at configure time.
-
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
-
-#if @ac_cv_have_stdint_h@
-#include <stdint.h>
-#endif
-
-#if @ac_cv_have_stddef_h@
-#include <stddef.h>
-#endif
-
-#define SNAPPY_MAJOR @SNAPPY_MAJOR@
-#define SNAPPY_MINOR @SNAPPY_MINOR@
-#define SNAPPY_PATCHLEVEL @SNAPPY_PATCHLEVEL@
-#define SNAPPY_VERSION \
-    ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
-
-#include <string>
-
-namespace snappy {
-
-#if @ac_cv_have_stdint_h@
-typedef int8_t int8;
-typedef uint8_t uint8;
-typedef int16_t int16;
-typedef uint16_t uint16;
-typedef int32_t int32;
-typedef uint32_t uint32;
-typedef int64_t int64;
-typedef uint64_t uint64;
-#else
-typedef signed char int8;
-typedef unsigned char uint8;
-typedef short int16;
-typedef unsigned short uint16;
-typedef int int32;
-typedef unsigned int uint32;
-typedef long long int64;
-typedef unsigned long long uint64;
-#endif
-
-typedef std::string string;
-
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
-  TypeName(const TypeName&);               \
-  void operator=(const TypeName&)
-
-}  // namespace snappy
-
-#endif  // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
deleted file mode 100644
--- a/other-licenses/snappy/src/snappy-test.cc
+++ /dev/null
@@ -1,596 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Various stubs for the unit tests for the open-source version of Snappy.
-
-#include "snappy-test.h"
-
-#ifdef HAVE_WINDOWS_H
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#endif
-
-#include <algorithm>
-
-DEFINE_bool(run_microbenchmarks, true,
-            "Run microbenchmarks before doing anything else.");
-
-namespace snappy {
-
-string ReadTestDataFile(const string& base) {
-  string contents;
-  const char* srcdir = getenv("srcdir");  // This is set by Automake.
-  if (srcdir) {
-    File::ReadFileToStringOrDie(
-        string(srcdir) + "/testdata/" + base, &contents);
-  } else {
-    File::ReadFileToStringOrDie("testdata/" + base, &contents);
-  }
-  return contents;
-}
-
-string StringPrintf(const char* format, ...) {
-  char buf[4096];
-  va_list ap;
-  va_start(ap, format);
-  vsnprintf(buf, sizeof(buf), format, ap);
-  va_end(ap);
-  return buf;
-}
-
-bool benchmark_running = false;
-int64 benchmark_real_time_us = 0;
-int64 benchmark_cpu_time_us = 0;
-string *benchmark_label = NULL;
-int64 benchmark_bytes_processed = 0;
-
-void ResetBenchmarkTiming() {
-  benchmark_real_time_us = 0;
-  benchmark_cpu_time_us = 0;
-}
-
-#ifdef WIN32
-LARGE_INTEGER benchmark_start_real;
-FILETIME benchmark_start_cpu;
-#else  // WIN32
-struct timeval benchmark_start_real;
-struct rusage benchmark_start_cpu;
-#endif  // WIN32
-
-void StartBenchmarkTiming() {
-#ifdef WIN32
-  QueryPerformanceCounter(&benchmark_start_real);
-  FILETIME dummy;
-  CHECK(GetProcessTimes(
-      GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_start_cpu));
-#else
-  gettimeofday(&benchmark_start_real, NULL);
-  if (getrusage(RUSAGE_SELF, &benchmark_start_cpu) == -1) {
-    perror("getrusage(RUSAGE_SELF)");
-    exit(1);
-  }
-#endif
-  benchmark_running = true;
-}
-
-void StopBenchmarkTiming() {
-  if (!benchmark_running) {
-    return;
-  }
-
-#ifdef WIN32
-  LARGE_INTEGER benchmark_stop_real;
-  LARGE_INTEGER benchmark_frequency;
-  QueryPerformanceCounter(&benchmark_stop_real);
-  QueryPerformanceFrequency(&benchmark_frequency);
-
-  double elapsed_real = static_cast<double>(
-      benchmark_stop_real.QuadPart - benchmark_start_real.QuadPart) /
-      benchmark_frequency.QuadPart;
-  benchmark_real_time_us += elapsed_real * 1e6 + 0.5;
-
-  FILETIME benchmark_stop_cpu, dummy;
-  CHECK(GetProcessTimes(
-      GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_stop_cpu));
-
-  ULARGE_INTEGER start_ulargeint;
-  start_ulargeint.LowPart = benchmark_start_cpu.dwLowDateTime;
-  start_ulargeint.HighPart = benchmark_start_cpu.dwHighDateTime;
-
-  ULARGE_INTEGER stop_ulargeint;
-  stop_ulargeint.LowPart = benchmark_stop_cpu.dwLowDateTime;
-  stop_ulargeint.HighPart = benchmark_stop_cpu.dwHighDateTime;
-
-  benchmark_cpu_time_us +=
-      (stop_ulargeint.QuadPart - start_ulargeint.QuadPart + 5) / 10;
-#else  // WIN32
-  struct timeval benchmark_stop_real;
-  gettimeofday(&benchmark_stop_real, NULL);
-  benchmark_real_time_us +=
-      1000000 * (benchmark_stop_real.tv_sec - benchmark_start_real.tv_sec);
-  benchmark_real_time_us +=
-      (benchmark_stop_real.tv_usec - benchmark_start_real.tv_usec);
-
-  struct rusage benchmark_stop_cpu;
-  if (getrusage(RUSAGE_SELF, &benchmark_stop_cpu) == -1) {
-    perror("getrusage(RUSAGE_SELF)");
-    exit(1);
-  }
-  benchmark_cpu_time_us += 1000000 * (benchmark_stop_cpu.ru_utime.tv_sec -
-                                      benchmark_start_cpu.ru_utime.tv_sec);
-  benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec -
-                            benchmark_start_cpu.ru_utime.tv_usec);
-#endif  // WIN32
-
-  benchmark_running = false;
-}
-
-void SetBenchmarkLabel(const string& str) {
-  if (benchmark_label) {
-    delete benchmark_label;
-  }
-  benchmark_label = new string(str);
-}
-
-void SetBenchmarkBytesProcessed(int64 bytes) {
-  benchmark_bytes_processed = bytes;
-}
-
-struct BenchmarkRun {
-  int64 real_time_us;
-  int64 cpu_time_us;
-};
-
-struct BenchmarkCompareCPUTime {
-  bool operator() (const BenchmarkRun& a, const BenchmarkRun& b) const {
-    return a.cpu_time_us < b.cpu_time_us;
-  }
-};
-
-void Benchmark::Run() {
-  for (int test_case_num = start_; test_case_num <= stop_; ++test_case_num) {
-    // Run a few iterations first to find out approximately how fast
-    // the benchmark is.
-    const int kCalibrateIterations = 100;
-    ResetBenchmarkTiming();
-    StartBenchmarkTiming();
-    (*function_)(kCalibrateIterations, test_case_num);
-    StopBenchmarkTiming();
-
-    // Let each test case run for about 200ms, but at least as many
-    // as we used to calibrate.
-    // Run five times and pick the median.
-    const int kNumRuns = 5;
-    const int kMedianPos = kNumRuns / 2;
-    int num_iterations = 0;
-    if (benchmark_real_time_us > 0) {
-      num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
-    }
-    num_iterations = max(num_iterations, kCalibrateIterations);
-    BenchmarkRun benchmark_runs[kNumRuns];
-
-    for (int run = 0; run < kNumRuns; ++run) {
-      ResetBenchmarkTiming();
-      StartBenchmarkTiming();
-      (*function_)(num_iterations, test_case_num);
-      StopBenchmarkTiming();
-
-      benchmark_runs[run].real_time_us = benchmark_real_time_us;
-      benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
-    }
-
-    nth_element(benchmark_runs,
-                benchmark_runs + kMedianPos,
-                benchmark_runs + kNumRuns,
-                BenchmarkCompareCPUTime());
-    int64 real_time_us = benchmark_runs[kMedianPos].real_time_us;
-    int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
-    int64 bytes_per_second = benchmark_bytes_processed * 1000000 / cpu_time_us;
-
-    string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num);
-    string human_readable_speed;
-    if (bytes_per_second < 1024) {
-      human_readable_speed = StringPrintf("%dB/s", bytes_per_second);
-    } else if (bytes_per_second < 1024 * 1024) {
-      human_readable_speed = StringPrintf(
-          "%.1fkB/s", bytes_per_second / 1024.0f);
-    } else if (bytes_per_second < 1024 * 1024 * 1024) {
-      human_readable_speed = StringPrintf(
-          "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
-    } else {
-      human_readable_speed = StringPrintf(
-          "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
-    }
-
-    fprintf(stderr,
-#ifdef WIN32
-            "%-18s %10I64d %10I64d %10d %s  %s\n",
-#else
-            "%-18s %10lld %10lld %10d %s  %s\n",
-#endif
-            heading.c_str(),
-            static_cast<long long>(real_time_us * 1000 / num_iterations),
-            static_cast<long long>(cpu_time_us * 1000 / num_iterations),
-            num_iterations,
-            human_readable_speed.c_str(),
-            benchmark_label->c_str());
-  }
-}
-
-#ifdef HAVE_LIBZ
-
-ZLib::ZLib()
-    : comp_init_(false),
-      uncomp_init_(false) {
-  Reinit();
-}
-
-ZLib::~ZLib() {
-  if (comp_init_)   { deflateEnd(&comp_stream_); }
-  if (uncomp_init_) { inflateEnd(&uncomp_stream_); }
-}
-
-void ZLib::Reinit() {
-  compression_level_ = Z_DEFAULT_COMPRESSION;
-  window_bits_ = MAX_WBITS;
-  mem_level_ =  8;  // DEF_MEM_LEVEL
-  if (comp_init_) {
-    deflateEnd(&comp_stream_);
-    comp_init_ = false;
-  }
-  if (uncomp_init_) {
-    inflateEnd(&uncomp_stream_);
-    uncomp_init_ = false;
-  }
-  first_chunk_ = true;
-}
-
-void ZLib::Reset() {
-  first_chunk_ = true;
-}
-
-// --------- COMPRESS MODE
-
-// Initialization method to be called if we hit an error while
-// compressing. On hitting an error, call this method before returning
-// the error.
-void ZLib::CompressErrorInit() {
-  deflateEnd(&comp_stream_);
-  comp_init_ = false;
-  Reset();
-}
-
-int ZLib::DeflateInit() {
-  return deflateInit2(&comp_stream_,
-                      compression_level_,
-                      Z_DEFLATED,
-                      window_bits_,
-                      mem_level_,
-                      Z_DEFAULT_STRATEGY);
-}
-
-int ZLib::CompressInit(Bytef *dest, uLongf *destLen,
-                       const Bytef *source, uLong *sourceLen) {
-  int err;
-
-  comp_stream_.next_in = (Bytef*)source;
-  comp_stream_.avail_in = (uInt)*sourceLen;
-  if ((uLong)comp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
-  comp_stream_.next_out = dest;
-  comp_stream_.avail_out = (uInt)*destLen;
-  if ((uLong)comp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
-
-  if ( !first_chunk_ )   // only need to set up stream the first time through
-    return Z_OK;
-
-  if (comp_init_) {      // we've already initted it
-    err = deflateReset(&comp_stream_);
-    if (err != Z_OK) {
-      LOG(WARNING) << "ERROR: Can't reset compress object; creating a new one";
-      deflateEnd(&comp_stream_);
-      comp_init_ = false;
-    }
-  }
-  if (!comp_init_) {     // first use
-    comp_stream_.zalloc = (alloc_func)0;
-    comp_stream_.zfree = (free_func)0;
-    comp_stream_.opaque = (voidpf)0;
-    err = DeflateInit();
-    if (err != Z_OK) return err;
-    comp_init_ = true;
-  }
-  return Z_OK;
-}
-
-// In a perfect world we'd always have the full buffer to compress
-// when the time came, and we could just call Compress().  Alas, we
-// want to do chunked compression on our webserver.  In this
-// application, we compress the header, send it off, then compress the
-// results, send them off, then compress the footer.  Thus we need to
-// use the chunked compression features of zlib.
-int ZLib::CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
-                              const Bytef *source, uLong *sourceLen,
-                              int flush_mode) {   // Z_FULL_FLUSH or Z_FINISH
-  int err;
-
-  if ( (err=CompressInit(dest, destLen, source, sourceLen)) != Z_OK )
-    return err;
-
-  // This is used to figure out how many bytes we wrote *this chunk*
-  int compressed_size = comp_stream_.total_out;
-
-  // Some setup happens only for the first chunk we compress in a run
-  if ( first_chunk_ ) {
-    first_chunk_ = false;
-  }
-
-  // flush_mode is Z_FINISH for all mode, Z_SYNC_FLUSH for incremental
-  // compression.
-  err = deflate(&comp_stream_, flush_mode);
-
-  const uLong source_bytes_consumed = *sourceLen - comp_stream_.avail_in;
-  *sourceLen = comp_stream_.avail_in;
-
-  if ((err == Z_STREAM_END || err == Z_OK)
-      && comp_stream_.avail_in == 0
-      && comp_stream_.avail_out != 0 ) {
-    // we processed everything ok and the output buffer was large enough.
-    ;
-  } else if (err == Z_STREAM_END && comp_stream_.avail_in > 0) {
-    return Z_BUF_ERROR;                            // should never happen
-  } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
-    // an error happened
-    CompressErrorInit();
-    return err;
-  } else if (comp_stream_.avail_out == 0) {     // not enough space
-    err = Z_BUF_ERROR;
-  }
-
-  assert(err == Z_OK || err == Z_STREAM_END || err == Z_BUF_ERROR);
-  if (err == Z_STREAM_END)
-    err = Z_OK;
-
-  // update the crc and other metadata
-  compressed_size = comp_stream_.total_out - compressed_size;  // delta
-  *destLen = compressed_size;
-
-  return err;
-}
-
-int ZLib::CompressChunkOrAll(Bytef *dest, uLongf *destLen,
-                             const Bytef *source, uLong sourceLen,
-                             int flush_mode) {   // Z_FULL_FLUSH or Z_FINISH
-  const int ret =
-    CompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
-  if (ret == Z_BUF_ERROR)
-    CompressErrorInit();
-  return ret;
-}
-
-// This routine only initializes the compression stream once.  Thereafter, it
-// just does a deflateReset on the stream, which should be faster.
-int ZLib::Compress(Bytef *dest, uLongf *destLen,
-                   const Bytef *source, uLong sourceLen) {
-  int err;
-  const uLongf orig_destLen = *destLen;
-  if ( (err=CompressChunkOrAll(dest, destLen, source, sourceLen,
-                               Z_FINISH)) != Z_OK )
-    return err;
-  Reset();         // reset for next call to Compress
-
-  return Z_OK;
-}
-
-
-// --------- UNCOMPRESS MODE
-
-int ZLib::InflateInit() {
-  return inflateInit2(&uncomp_stream_, MAX_WBITS);
-}
-
-// Initialization method to be called if we hit an error while
-// uncompressing. On hitting an error, call this method before
-// returning the error.
-void ZLib::UncompressErrorInit() {
-  inflateEnd(&uncomp_stream_);
-  uncomp_init_ = false;
-  Reset();
-}
-
-int ZLib::UncompressInit(Bytef *dest, uLongf *destLen,
-                         const Bytef *source, uLong *sourceLen) {
-  int err;
-
-  uncomp_stream_.next_in = (Bytef*)source;
-  uncomp_stream_.avail_in = (uInt)*sourceLen;
-  // Check for source > 64K on 16-bit machine:
-  if ((uLong)uncomp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
-
-  uncomp_stream_.next_out = dest;
-  uncomp_stream_.avail_out = (uInt)*destLen;
-  if ((uLong)uncomp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
-
-  if ( !first_chunk_ )   // only need to set up stream the first time through
-    return Z_OK;
-
-  if (uncomp_init_) {    // we've already initted it
-    err = inflateReset(&uncomp_stream_);
-    if (err != Z_OK) {
-      LOG(WARNING)
-        << "ERROR: Can't reset uncompress object; creating a new one";
-      UncompressErrorInit();
-    }
-  }
-  if (!uncomp_init_) {
-    uncomp_stream_.zalloc = (alloc_func)0;
-    uncomp_stream_.zfree = (free_func)0;
-    uncomp_stream_.opaque = (voidpf)0;
-    err = InflateInit();
-    if (err != Z_OK) return err;
-    uncomp_init_ = true;
-  }
-  return Z_OK;
-}
-
-// If you compressed your data a chunk at a time, with CompressChunk,
-// you can uncompress it a chunk at a time with UncompressChunk.
-// Only difference bewteen chunked and unchunked uncompression
-// is the flush mode we use: Z_SYNC_FLUSH (chunked) or Z_FINISH (unchunked).
-int ZLib::UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
-                                const Bytef *source, uLong *sourceLen,
-                                int flush_mode) {  // Z_SYNC_FLUSH or Z_FINISH
-  int err = Z_OK;
-
-  if ( (err=UncompressInit(dest, destLen, source, sourceLen)) != Z_OK ) {
-    LOG(WARNING) << "UncompressInit: Error: " << err << " SourceLen: "
-                 << *sourceLen;
-    return err;
-  }
-
-  // This is used to figure out how many output bytes we wrote *this chunk*:
-  const uLong old_total_out = uncomp_stream_.total_out;
-
-  // This is used to figure out how many input bytes we read *this chunk*:
-  const uLong old_total_in = uncomp_stream_.total_in;
-
-  // Some setup happens only for the first chunk we compress in a run
-  if ( first_chunk_ ) {
-    first_chunk_ = false;                          // so we don't do this again
-
-    // For the first chunk *only* (to avoid infinite troubles), we let
-    // there be no actual data to uncompress.  This sometimes triggers
-    // when the input is only the gzip header, say.
-    if ( *sourceLen == 0 ) {
-      *destLen = 0;
-      return Z_OK;
-    }
-  }
-
-  // We'll uncompress as much as we can.  If we end OK great, otherwise
-  // if we get an error that seems to be the gzip footer, we store the
-  // gzip footer and return OK, otherwise we return the error.
-
-  // flush_mode is Z_SYNC_FLUSH for chunked mode, Z_FINISH for all mode.
-  err = inflate(&uncomp_stream_, flush_mode);
-
-  // Figure out how many bytes of the input zlib slurped up:
-  const uLong bytes_read = uncomp_stream_.total_in - old_total_in;
-  CHECK_LE(source + bytes_read, source + *sourceLen);
-  *sourceLen = uncomp_stream_.avail_in;
-
-  if ((err == Z_STREAM_END || err == Z_OK)  // everything went ok
-             && uncomp_stream_.avail_in == 0) {    // and we read it all
-    ;
-  } else if (err == Z_STREAM_END && uncomp_stream_.avail_in > 0) {
-    LOG(WARNING)
-      << "UncompressChunkOrAll: Received some extra data, bytes total: "
-      << uncomp_stream_.avail_in << " bytes: "
-      << string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
-                min(int(uncomp_stream_.avail_in), 20));
-    UncompressErrorInit();
-    return Z_DATA_ERROR;       // what's the extra data for?
-  } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
-    // an error happened
-    LOG(WARNING) << "UncompressChunkOrAll: Error: " << err
-                 << " avail_out: " << uncomp_stream_.avail_out;
-    UncompressErrorInit();
-    return err;
-  } else if (uncomp_stream_.avail_out == 0) {
-    err = Z_BUF_ERROR;
-  }
-
-  assert(err == Z_OK || err == Z_BUF_ERROR || err == Z_STREAM_END);
-  if (err == Z_STREAM_END)
-    err = Z_OK;
-
-  *destLen = uncomp_stream_.total_out - old_total_out;  // size for this call
-
-  return err;
-}
-
-int ZLib::UncompressChunkOrAll(Bytef *dest, uLongf *destLen,
-                               const Bytef *source, uLong sourceLen,
-                               int flush_mode) {  // Z_SYNC_FLUSH or Z_FINISH
-  const int ret =
-    UncompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
-  if (ret == Z_BUF_ERROR)
-    UncompressErrorInit();
-  return ret;
-}
-
-int ZLib::UncompressAtMost(Bytef *dest, uLongf *destLen,
-                          const Bytef *source, uLong *sourceLen) {
-  return UncompressAtMostOrAll(dest, destLen, source, sourceLen, Z_SYNC_FLUSH);
-}
-
-// We make sure we've uncompressed everything, that is, the current
-// uncompress stream is at a compressed-buffer-EOF boundary.  In gzip
-// mode, we also check the gzip footer to make sure we pass the gzip
-// consistency checks.  We RETURN true iff both types of checks pass.
-bool ZLib::UncompressChunkDone() {
-  assert(!first_chunk_ && uncomp_init_);
-  // Make sure we're at the end-of-compressed-data point.  This means
-  // if we call inflate with Z_FINISH we won't consume any input or
-  // write any output
-  Bytef dummyin, dummyout;
-  uLongf dummylen = 0;
-  if ( UncompressChunkOrAll(&dummyout, &dummylen, &dummyin, 0, Z_FINISH)
-       != Z_OK ) {
-    return false;
-  }
-
-  // Make sure that when we exit, we can start a new round of chunks later
-  Reset();
-
-  return true;
-}
-
-// Uncompresses the source buffer into the destination buffer.
-// The destination buffer must be long enough to hold the entire
-// decompressed contents.
-//
-// We only initialize the uncomp_stream once.  Thereafter, we use
-// inflateReset, which should be faster.
-//
-// Returns Z_OK on success, otherwise, it returns a zlib error code.
-int ZLib::Uncompress(Bytef *dest, uLongf *destLen,
-                     const Bytef *source, uLong sourceLen) {
-  int err;
-  if ( (err=UncompressChunkOrAll(dest, destLen, source, sourceLen,
-                                 Z_FINISH)) != Z_OK ) {
-    Reset();                           // let us try to compress again
-    return err;
-  }
-  if ( !UncompressChunkDone() )        // calls Reset()
-    return Z_DATA_ERROR;
-  return Z_OK;  // stream_end is ok
-}
-
-#endif  // HAVE_LIBZ
-
-}  // namespace snappy
deleted file mode 100644
--- a/other-licenses/snappy/src/snappy-test.h
+++ /dev/null
@@ -1,505 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Various stubs for the unit tests for the open-source version of Snappy.
-
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
-
-#include "snappy-stubs-internal.h"
-
-#include <stdio.h>
-#include <stdarg.h>
-
-#ifdef HAVE_SYS_MMAN_H
-#include <sys/mman.h>
-#endif
-
-#ifdef HAVE_SYS_RESOURCE_H
-#include <sys/resource.h>
-#endif
-
-#include <sys/time.h>
-
-#ifdef HAVE_WINDOWS_H
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#endif
-
-#include <string>
-
-#ifdef HAVE_GTEST
-
-#include <gtest/gtest.h>
-#undef TYPED_TEST
-#define TYPED_TEST TEST
-#define INIT_GTEST(argc, argv) ::testing::InitGoogleTest(argc, *argv)
-
-#else
-
-// Stubs for if the user doesn't have Google Test installed.
-
-#define TEST(test_case, test_subcase) \
-  void Test_ ## test_case ## _ ## test_subcase()
-#define INIT_GTEST(argc, argv)
-
-#define TYPED_TEST TEST
-#define EXPECT_EQ CHECK_EQ
-#define EXPECT_NE CHECK_NE
-#define EXPECT_FALSE(cond) CHECK(!(cond))
-
-#endif
-
-#ifdef HAVE_GFLAGS
-
-#include <gflags/gflags.h>
-
-// This is tricky; both gflags and Google Test want to look at the command line
-// arguments. Google Test seems to be the most happy with unknown arguments,
-// though, so we call it first and hope for the best.
-#define InitGoogle(argv0, argc, argv, remove_flags) \
-  INIT_GTEST(argc, argv); \
-  google::ParseCommandLineFlags(argc, argv, remove_flags);
-
-#else
-
-// If we don't have the gflags package installed, these can only be
-// changed at compile time.
-#define DEFINE_int32(flag_name, default_value, description) \
-  static int FLAGS_ ## flag_name = default_value;
-
-#define InitGoogle(argv0, argc, argv, remove_flags) \
-  INIT_GTEST(argc, argv)
-
-#endif
-
-#ifdef HAVE_LIBZ
-#include "zlib.h"
-#endif
-
-#ifdef HAVE_LIBLZO2
-#include "lzo/lzo1x.h"
-#endif
-
-#ifdef HAVE_LIBLZF
-extern "C" {
-#include "lzf.h"
-}
-#endif
-
-#ifdef HAVE_LIBFASTLZ
-#include "fastlz.h"
-#endif
-
-#ifdef HAVE_LIBQUICKLZ
-#include "quicklz.h"
-#endif
-
-namespace {
-namespace File {
-  void Init() { }
-
-  void ReadFileToStringOrDie(const char* filename, string* data) {
-    FILE* fp = fopen(filename, "rb");
-    if (fp == NULL) {
-      perror(filename);
-      exit(1);
-    }
-
-    data->clear();
-    while (!feof(fp)) {
-      char buf[4096];
-      size_t ret = fread(buf, 1, 4096, fp);
-      if (ret == -1) {
-        perror("fread");
-        exit(1);
-      }
-      data->append(string(buf, ret));
-    }
-
-    fclose(fp);
-  }
-
-  void ReadFileToStringOrDie(const string& filename, string* data) {
-    ReadFileToStringOrDie(filename.c_str(), data);
-  }
-
-  void WriteStringToFileOrDie(const string& str, const char* filename) {
-    FILE* fp = fopen(filename, "wb");
-    if (fp == NULL) {
-      perror(filename);
-      exit(1);
-    }
-
-    int ret = fwrite(str.data(), str.size(), 1, fp);
-    if (ret != 1) {
-      perror("fwrite");
-      exit(1);
-    }
-
-    fclose(fp);
-  }
-}  // namespace File
-}  // namespace
-
-namespace snappy {
-
-#define FLAGS_test_random_seed 301
-typedef string TypeParam;
-
-void Test_CorruptedTest_VerifyCorrupted();
-void Test_Snappy_SimpleTests();
-void Test_Snappy_MaxBlowup();
-void Test_Snappy_RandomData();
-void Test_Snappy_FourByteOffset();
-void Test_SnappyCorruption_TruncatedVarint();
-void Test_SnappyCorruption_UnterminatedVarint();
-void Test_Snappy_ReadPastEndOfBuffer();
-void Test_Snappy_FindMatchLength();
-void Test_Snappy_FindMatchLengthRandom();
-
-string ReadTestDataFile(const string& base);
-
-// A sprintf() variant that returns a std::string.
-// Not safe for general use due to truncation issues.
-string StringPrintf(const char* format, ...);
-
-// A simple, non-cryptographically-secure random generator.
-class ACMRandom {
- public:
-  explicit ACMRandom(uint32 seed) : seed_(seed) {}
-
-  int32 Next();
-
-  int32 Uniform(int32 n) {
-    return Next() % n;
-  }
-  uint8 Rand8() {
-    return static_cast<uint8>((Next() >> 1) & 0x000000ff);
-  }
-  bool OneIn(int X) { return Uniform(X) == 0; }
-
-  // Skewed: pick "base" uniformly from range [0,max_log] and then
-  // return "base" random bits.  The effect is to pick a number in the
-  // range [0,2^max_log-1] with bias towards smaller numbers.
-  int32 Skewed(int max_log);
-
- private:
-  static const uint32 M = 2147483647L;   // 2^31-1
-  uint32 seed_;
-};
-
-inline int32 ACMRandom::Next() {
-  static const uint64 A = 16807;  // bits 14, 8, 7, 5, 2, 1, 0
-  // We are computing
-  //       seed_ = (seed_ * A) % M,    where M = 2^31-1
-  //
-  // seed_ must not be zero or M, or else all subsequent computed values
-  // will be zero or M respectively.  For all other values, seed_ will end
-  // up cycling through every number in [1,M-1]
-  uint64 product = seed_ * A;
-
-  // Compute (product % M) using the fact that ((x << 31) % M) == x.
-  seed_ = (product >> 31) + (product & M);
-  // The first reduction may overflow by 1 bit, so we may need to repeat.
-  // mod == M is not possible; using > allows the faster sign-bit-based test.
-  if (seed_ > M) {
-    seed_ -= M;
-  }
-  return seed_;
-}
-
-inline int32 ACMRandom::Skewed(int max_log) {
-  const int32 base = (Next() - 1) % (max_log+1);
-  return (Next() - 1) & ((1u << base)-1);
-}
-
-// A wall-time clock. This stub is not super-accurate, nor resistant to the
-// system time changing.
-class CycleTimer {
- public:
-  CycleTimer() : real_time_us_(0) {}
-
-  void Start() {
-#ifdef WIN32
-    QueryPerformanceCounter(&start_);
-#else
-    gettimeofday(&start_, NULL);
-#endif
-  }
-
-  void Stop() {
-#ifdef WIN32
-    LARGE_INTEGER stop;
-    LARGE_INTEGER frequency;
-    QueryPerformanceCounter(&stop);
-    QueryPerformanceFrequency(&frequency);
-
-    double elapsed = static_cast<double>(stop.QuadPart - start_.QuadPart) /
-        frequency.QuadPart;
-    real_time_us_ += elapsed * 1e6 + 0.5;
-#else
-    struct timeval stop;
-    gettimeofday(&stop, NULL);
-
-    real_time_us_ += 1000000 * (stop.tv_sec - start_.tv_sec);
-    real_time_us_ += (stop.tv_usec - start_.tv_usec);
-#endif
-  }
-
-  double Get() {
-    return real_time_us_ * 1e-6;
-  }
-
- private:
-  int64 real_time_us_;
-#ifdef WIN32
-  LARGE_INTEGER start_;
-#else
-  struct timeval start_;
-#endif
-};
-
-// Minimalistic microbenchmark framework.
-
-typedef void (*BenchmarkFunction)(int, int);
-
-class Benchmark {
- public:
-  Benchmark(const string& name, BenchmarkFunction function) :
-      name_(name), function_(function) {}
-
-  Benchmark* DenseRange(int start, int stop) {
-    start_ = start;
-    stop_ = stop;
-    return this;
-  }
-
-  void Run();
-
- private:
-  const string name_;
-  const BenchmarkFunction function_;
-  int start_, stop_;
-};
-#define BENCHMARK(benchmark_name) \
-  Benchmark* Benchmark_ ## benchmark_name = \
-          (new Benchmark(#benchmark_name, benchmark_name))
-
-extern Benchmark* Benchmark_BM_UFlat;
-extern Benchmark* Benchmark_BM_UValidate;
-extern Benchmark* Benchmark_BM_ZFlat;
-
-void ResetBenchmarkTiming();
-void StartBenchmarkTiming();
-void StopBenchmarkTiming();
-void SetBenchmarkLabel(const string& str);
-void SetBenchmarkBytesProcessed(int64 bytes);
-
-#ifdef HAVE_LIBZ
-
-// Object-oriented wrapper around zlib.
-class ZLib {
- public:
-  ZLib();
-  ~ZLib();
-
-  // Wipe a ZLib object to a virgin state.  This differs from Reset()
-  // in that it also breaks any state.
-  void Reinit();
-
-  // Call this to make a zlib buffer as good as new.  Here's the only
-  // case where they differ:
-  //    CompressChunk(a); CompressChunk(b); CompressChunkDone();   vs
-  //    CompressChunk(a); Reset(); CompressChunk(b); CompressChunkDone();
-  // You'll want to use Reset(), then, when you interrupt a compress
-  // (or uncompress) in the middle of a chunk and want to start over.
-  void Reset();
-
-  // According to the zlib manual, when you Compress, the destination
-  // buffer must have size at least src + .1%*src + 12.  This function
-  // helps you calculate that.  Augment this to account for a potential
-  // gzip header and footer, plus a few bytes of slack.
-  static int MinCompressbufSize(int uncompress_size) {
-    return uncompress_size + uncompress_size/1000 + 40;
-  }
-
-  // Compresses the source buffer into the destination buffer.
-  // sourceLen is the byte length of the source buffer.
-  // Upon entry, destLen is the total size of the destination buffer,
-  // which must be of size at least MinCompressbufSize(sourceLen).
-  // Upon exit, destLen is the actual size of the compressed buffer.
-  //
-  // This function can be used to compress a whole file at once if the
-  // input file is mmap'ed.
-  //
-  // Returns Z_OK if success, Z_MEM_ERROR if there was not
-  // enough memory, Z_BUF_ERROR if there was not enough room in the
-  // output buffer. Note that if the output buffer is exactly the same
-  // size as the compressed result, we still return Z_BUF_ERROR.
-  // (check CL#1936076)
-  int Compress(Bytef *dest, uLongf *destLen,
-               const Bytef *source, uLong sourceLen);
-
-  // Uncompresses the source buffer into the destination buffer.
-  // The destination buffer must be long enough to hold the entire
-  // decompressed contents.
-  //
-  // Returns Z_OK on success, otherwise, it returns a zlib error code.
-  int Uncompress(Bytef *dest, uLongf *destLen,
-                 const Bytef *source, uLong sourceLen);
-
-  // Uncompress data one chunk at a time -- ie you can call this
-  // more than once.  To get this to work you need to call per-chunk
-  // and "done" routines.
-  //
-  // Returns Z_OK if success, Z_MEM_ERROR if there was not
-  // enough memory, Z_BUF_ERROR if there was not enough room in the
-  // output buffer.
-
-  int UncompressAtMost(Bytef *dest, uLongf *destLen,
-                       const Bytef *source, uLong *sourceLen);
-
-  // Checks gzip footer information, as needed.  Mostly this just
-  // makes sure the checksums match.  Whenever you call this, it
-  // will assume the last 8 bytes from the previous UncompressChunk
-  // call are the footer.  Returns true iff everything looks ok.
-  bool UncompressChunkDone();
-
- private:
-  int InflateInit();       // sets up the zlib inflate structure
-  int DeflateInit();       // sets up the zlib deflate structure
-
-  // These init the zlib data structures for compressing/uncompressing
-  int CompressInit(Bytef *dest, uLongf *destLen,
-                   const Bytef *source, uLong *sourceLen);
-  int UncompressInit(Bytef *dest, uLongf *destLen,
-                     const Bytef *source, uLong *sourceLen);
-  // Initialization method to be called if we hit an error while
-  // uncompressing. On hitting an error, call this method before
-  // returning the error.
-  void UncompressErrorInit();
-
-  // Helper function for Compress
-  int CompressChunkOrAll(Bytef *dest, uLongf *destLen,
-                         const Bytef *source, uLong sourceLen,
-                         int flush_mode);
-  int CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
-                          const Bytef *source, uLong *sourceLen,
-                          int flush_mode);
-
-  // Likewise for UncompressAndUncompressChunk
-  int UncompressChunkOrAll(Bytef *dest, uLongf *destLen,
-                           const Bytef *source, uLong sourceLen,
-                           int flush_mode);
-
-  int UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
-                            const Bytef *source, uLong *sourceLen,
-                            int flush_mode);
-
-  // Initialization method to be called if we hit an error while
-  // compressing. On hitting an error, call this method before
-  // returning the error.
-  void CompressErrorInit();
-
-  int compression_level_;   // compression level
-  int window_bits_;         // log base 2 of the window size used in compression
-  int mem_level_;           // specifies the amount of memory to be used by
-                            // compressor (1-9)
-  z_stream comp_stream_;    // Zlib stream data structure
-  bool comp_init_;          // True if we have initialized comp_stream_
-  z_stream uncomp_stream_;  // Zlib stream data structure
-  bool uncomp_init_;        // True if we have initialized uncomp_stream_
-
-  // These are used only with chunked compression.
-  bool first_chunk_;       // true if we need to emit headers with this chunk
-};
-
-#endif  // HAVE_LIBZ
-
-}  // namespace snappy
-
-DECLARE_bool(run_microbenchmarks);
-
-static void RunSpecifiedBenchmarks() {
-  if (!FLAGS_run_microbenchmarks) {
-    return;
-  }
-
-  fprintf(stderr, "Running microbenchmarks.\n");
-#ifndef NDEBUG
-  fprintf(stderr, "WARNING: Compiled with assertions enabled, will be slow.\n");
-#endif
-#ifndef __OPTIMIZE__
-  fprintf(stderr, "WARNING: Compiled without optimization, will be slow.\n");
-#endif
-  fprintf(stderr, "Benchmark            Time(ns)    CPU(ns) Iterations\n");
-  fprintf(stderr, "---------------------------------------------------\n");
-
-  snappy::Benchmark_BM_UFlat->Run();
-  snappy::Benchmark_BM_UValidate->Run();
-  snappy::Benchmark_BM_ZFlat->Run();
-
-  fprintf(stderr, "\n");
-}
-
-#ifndef HAVE_GTEST
-
-static inline int RUN_ALL_TESTS() {
-  fprintf(stderr, "Running correctness tests.\n");
-  snappy::Test_CorruptedTest_VerifyCorrupted();
-  snappy::Test_Snappy_SimpleTests();
-  snappy::Test_Snappy_MaxBlowup();
-  snappy::Test_Snappy_RandomData();
-  snappy::Test_Snappy_FourByteOffset();
-  snappy::Test_SnappyCorruption_TruncatedVarint();
-  snappy::Test_SnappyCorruption_UnterminatedVarint();
-  snappy::Test_Snappy_ReadPastEndOfBuffer();
-  snappy::Test_Snappy_FindMatchLength();
-  snappy::Test_Snappy_FindMatchLengthRandom();
-  fprintf(stderr, "All tests passed.\n");
-
-  return 0;
-}
-
-#endif  // HAVE_GTEST
-
-// For main().
-namespace snappy {
-
-static void CompressFile(const char* fname);
-static void UncompressFile(const char* fname);
-static void MeasureFile(const char* fname);
-
-}  // namespace
-
-using snappy::CompressFile;
-using snappy::UncompressFile;
-using snappy::MeasureFile;
-
-#endif  // UTIL_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
deleted file mode 100644
--- a/other-licenses/snappy/src/snappy.cc
+++ /dev/null
@@ -1,1030 +0,0 @@
-// Copyright 2005 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "snappy.h"
-#include "snappy-internal.h"
-#include "snappy-sinksource.h"
-
-#include <stdio.h>
-
-#include <algorithm>
-#include <string>
-#include <vector>
-
-
-namespace snappy {
-
-// Any hash function will produce a valid compressed bitstream, but a good
-// hash function reduces the number of collisions and thus yields better
-// compression for compressible input, and more speed for incompressible
-// input. Of course, it doesn't hurt if the hash function is reasonably fast
-// either, as it gets called a lot.
-static inline uint32 HashBytes(uint32 bytes, int shift) {
-  uint32 kMul = 0x1e35a7bd;
-  return (bytes * kMul) >> shift;
-}
-static inline uint32 Hash(const char* p, int shift) {
-  return HashBytes(UNALIGNED_LOAD32(p), shift);
-}
-
-size_t MaxCompressedLength(size_t source_len) {
-  // Compressed data can be defined as:
-  //    compressed := item* literal*
-  //    item       := literal* copy
-  //
-  // The trailing literal sequence has a space blowup of at most 62/60
-  // since a literal of length 60 needs one tag byte + one extra byte
-  // for length information.
-  //
-  // Item blowup is trickier to measure.  Suppose the "copy" op copies
-  // 4 bytes of data.  Because of a special check in the encoding code,
-  // we produce a 4-byte copy only if the offset is < 65536.  Therefore
-  // the copy op takes 3 bytes to encode, and this type of item leads
-  // to at most the 62/60 blowup for representing literals.
-  //
-  // Suppose the "copy" op copies 5 bytes of data.  If the offset is big
-  // enough, it will take 5 bytes to encode the copy op.  Therefore the
-  // worst case here is a one-byte literal followed by a five-byte copy.
-  // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
-  //
-  // This last factor dominates the blowup, so the final estimate is:
-  return 32 + source_len + source_len/6;
-}
-
-enum {
-  LITERAL = 0,
-  COPY_1_BYTE_OFFSET = 1,  // 3 bit length + 3 bits of offset in opcode
-  COPY_2_BYTE_OFFSET = 2,
-  COPY_4_BYTE_OFFSET = 3
-};
-
-// Copy "len" bytes from "src" to "op", one byte at a time.  Used for
-// handling COPY operations where the input and output regions may
-// overlap.  For example, suppose:
-//    src    == "ab"
-//    op     == src + 2
-//    len    == 20
-// After IncrementalCopy(src, op, len), the result will have
-// eleven copies of "ab"
-//    ababababababababababab
-// Note that this does not match the semantics of either memcpy()
-// or memmove().
-static inline void IncrementalCopy(const char* src, char* op, int len) {
-  DCHECK_GT(len, 0);
-  do {
-    *op++ = *src++;
-  } while (--len > 0);
-}
-
-// Equivalent to IncrementalCopy except that it can write up to ten extra
-// bytes after the end of the copy, and that it is faster.
-//
-// The main part of this loop is a simple copy of eight bytes at a time until
-// we've copied (at least) the requested amount of bytes.  However, if op and
-// src are less than eight bytes apart (indicating a repeating pattern of
-// length < 8), we first need to expand the pattern in order to get the correct
-// results. For instance, if the buffer looks like this, with the eight-byte
-// <src> and <op> patterns marked as intervals:
-//
-//    abxxxxxxxxxxxx
-//    [------]           src
-//      [------]         op
-//
-// a single eight-byte copy from <src> to <op> will repeat the pattern once,
-// after which we can move <op> two bytes without moving <src>:
-//
-//    ababxxxxxxxxxx
-//    [------]           src
-//        [------]       op
-//
-// and repeat the exercise until the two no longer overlap.
-//
-// This allows us to do very well in the special case of one single byte
-// repeated many times, without taking a big hit for more general cases.
-//
-// The worst case of extra writing past the end of the match occurs when
-// op - src == 1 and len == 1; the last copy will read from byte positions
-// [0..7] and write to [4..11], whereas it was only supposed to write to
-// position 1. Thus, ten excess bytes.
-
-namespace {
-
-const int kMaxIncrementCopyOverflow = 10;
-
-}  // namespace
-
-static inline void IncrementalCopyFastPath(const char* src, char* op, int len) {
-  while (op - src < 8) {
-    UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
-    len -= op - src;
-    op += op - src;
-  }
-  while (len > 0) {
-    UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
-    src += 8;
-    op += 8;
-    len -= 8;
-  }
-}
-
-static inline char* EmitLiteral(char* op,
-                                const char* literal,
-                                int len,
-                                bool allow_fast_path) {
-  int n = len - 1;      // Zero-length literals are disallowed
-  if (n < 60) {
-    // Fits in tag byte
-    *op++ = LITERAL | (n << 2);
-
-    // The vast majority of copies are below 16 bytes, for which a
-    // call to memcpy is overkill. This fast path can sometimes
-    // copy up to 15 bytes too much, but that is okay in the
-    // main loop, since we have a bit to go on for both sides:
-    //
-    //   - The input will always have kInputMarginBytes = 15 extra
-    //     available bytes, as long as we're in the main loop, and
-    //     if not, allow_fast_path = false.
-    //   - The output will always have 32 spare bytes (see
-    //     MaxCompressedLength).
-    if (allow_fast_path && len <= 16) {
-      UNALIGNED_STORE64(op, UNALIGNED_LOAD64(literal));
-      UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(literal + 8));
-      return op + len;
-    }
-  } else {
-    // Encode in upcoming bytes
-    char* base = op;
-    int count = 0;
-    op++;
-    while (n > 0) {
-      *op++ = n & 0xff;
-      n >>= 8;
-      count++;
-    }
-    assert(count >= 1);
-    assert(count <= 4);
-    *base = LITERAL | ((59+count) << 2);
-  }
-  memcpy(op, literal, len);
-  return op + len;
-}
-
-static inline char* EmitCopyLessThan64(char* op, int offset, int len) {
-  DCHECK_LE(len, 64);
-  DCHECK_GE(len, 4);
-  DCHECK_LT(offset, 65536);
-
-  if ((len < 12) && (offset < 2048)) {
-    int len_minus_4 = len - 4;
-    assert(len_minus_4 < 8);            // Must fit in 3 bits
-    *op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
-    *op++ = offset & 0xff;
-  } else {
-    *op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2);
-    LittleEndian::Store16(op, offset);
-    op += 2;
-  }
-  return op;
-}
-
-static inline char* EmitCopy(char* op, int offset, int len) {
-  // Emit 64 byte copies but make sure to keep at least four bytes reserved
-  while (len >= 68) {
-    op = EmitCopyLessThan64(op, offset, 64);
-    len -= 64;
-  }
-
-  // Emit an extra 60 byte copy if have too much data to fit in one copy
-  if (len > 64) {
-    op = EmitCopyLessThan64(op, offset, 60);
-    len -= 60;
-  }
-
-  // Emit remainder
-  op = EmitCopyLessThan64(op, offset, len);
-  return op;
-}
-
-
-bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
-  uint32 v = 0;
-  const char* limit = start + n;
-  if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
-    *result = v;
-    return true;
-  } else {
-    return false;
-  }
-}
-
-namespace internal {
-uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
-  // Use smaller hash table when input.size() is smaller, since we
-  // fill the table, incurring O(hash table size) overhead for
-  // compression, and if the input is short, we won't need that
-  // many hash table entries anyway.
-  assert(kMaxHashTableSize >= 256);
-  int htsize = 256;
-  while (htsize < kMaxHashTableSize && htsize < input_size) {
-    htsize <<= 1;
-  }
-  CHECK_EQ(0, htsize & (htsize - 1)) << ": must be power of two";
-  CHECK_LE(htsize, kMaxHashTableSize) << ": hash table too large";
-
-  uint16* table;
-  if (htsize <= ARRAYSIZE(small_table_)) {
-    table = small_table_;
-  } else {
-    if (large_table_ == NULL) {
-      large_table_ = new uint16[kMaxHashTableSize];
-    }
-    table = large_table_;
-  }
-
-  *table_size = htsize;
-  memset(table, 0, htsize * sizeof(*table));
-  return table;
-}
-}  // end namespace internal
-
-// For 0 <= offset <= 4, GetUint32AtOffset(UNALIGNED_LOAD64(p), offset) will
-// equal UNALIGNED_LOAD32(p + offset).  Motivation: On x86-64 hardware we have
-// empirically found that overlapping loads such as
-//  UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
-// are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
-static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
-  DCHECK(0 <= offset && offset <= 4) << offset;
-  return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
-}
-
-// Flat array compression that does not emit the "uncompressed length"
-// prefix. Compresses "input" string to the "*op" buffer.
-//
-// REQUIRES: "input" is at most "kBlockSize" bytes long.
-// REQUIRES: "op" points to an array of memory that is at least
-// "MaxCompressedLength(input.size())" in size.
-// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
-// REQUIRES: "table_size" is a power of two
-//
-// Returns an "end" pointer into "op" buffer.
-// "end - op" is the compressed size of "input".
-namespace internal {
-char* CompressFragment(const char* input,
-                       size_t input_size,
-                       char* op,
-                       uint16* table,
-                       const int table_size) {
-  // "ip" is the input pointer, and "op" is the output pointer.
-  const char* ip = input;
-  CHECK_LE(input_size, kBlockSize);
-  CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
-  const int shift = 32 - Bits::Log2Floor(table_size);
-  DCHECK_EQ(kuint32max >> shift, table_size - 1);
-  const char* ip_end = input + input_size;
-  const char* base_ip = ip;
-  // Bytes in [next_emit, ip) will be emitted as literal bytes.  Or
-  // [next_emit, ip_end) after the main loop.
-  const char* next_emit = ip;
-
-  const int kInputMarginBytes = 15;
-  if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
-    const char* ip_limit = input + input_size - kInputMarginBytes;
-
-    for (uint32 next_hash = Hash(++ip, shift); ; ) {
-      DCHECK_LT(next_emit, ip);
-      // The body of this loop calls EmitLiteral once and then EmitCopy one or
-      // more times.  (The exception is that when we're close to exhausting
-      // the input we goto emit_remainder.)
-      //
-      // In the first iteration of this loop we're just starting, so
-      // there's nothing to copy, so calling EmitLiteral once is
-      // necessary.  And we only start a new iteration when the
-      // current iteration has determined that a call to EmitLiteral will
-      // precede the next call to EmitCopy (if any).
-      //
-      // Step 1: Scan forward in the input looking for a 4-byte-long match.
-      // If we get close to exhausting the input then goto emit_remainder.
-      //
-      // Heuristic match skipping: If 32 bytes are scanned with no matches
-      // found, start looking only at every other byte. If 32 more bytes are
-      // scanned, look at every third byte, etc.. When a match is found,
-      // immediately go back to looking at every byte. This is a small loss
-      // (~5% performance, ~0.1% density) for compressible data due to more
-      // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
-      // win since the compressor quickly "realizes" the data is incompressible
-      // and doesn't bother looking for matches everywhere.
-      //
-      // The "skip" variable keeps track of how many bytes there are since the
-      // last match; dividing it by 32 (ie. right-shifting by five) gives the
-      // number of bytes to move ahead for each iteration.
-      uint32 skip = 32;
-
-      const char* next_ip = ip;
-      const char* candidate;
-      do {
-        ip = next_ip;
-        uint32 hash = next_hash;
-        DCHECK_EQ(hash, Hash(ip, shift));
-        uint32 bytes_between_hash_lookups = skip++ >> 5;
-        next_ip = ip + bytes_between_hash_lookups;
-        if (PREDICT_FALSE(next_ip > ip_limit)) {
-          goto emit_remainder;
-        }
-        next_hash = Hash(next_ip, shift);
-        candidate = base_ip + table[hash];
-        DCHECK_GE(candidate, base_ip);
-        DCHECK_LT(candidate, ip);
-
-        table[hash] = ip - base_ip;
-      } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
-                            UNALIGNED_LOAD32(candidate)));
-
-      // Step 2: A 4-byte match has been found.  We'll later see if more
-      // than 4 bytes match.  But, prior to the match, input
-      // bytes [next_emit, ip) are unmatched.  Emit them as "literal bytes."
-      DCHECK_LE(next_emit + 16, ip_end);
-      op = EmitLiteral(op, next_emit, ip - next_emit, true);
-
-      // Step 3: Call EmitCopy, and then see if another EmitCopy could
-      // be our next move.  Repeat until we find no match for the
-      // input immediately after what was consumed by the last EmitCopy call.
-      //
-      // If we exit this loop normally then we need to call EmitLiteral next,
-      // though we don't yet know how big the literal will be.  We handle that
-      // by proceeding to the next iteration of the main loop.  We also can exit
-      // this loop via goto if we get close to exhausting the input.
-      uint64 input_bytes = 0;
-      uint32 candidate_bytes = 0;
-
-      do {
-        // We have a 4-byte match at ip, and no need to emit any
-        // "literal bytes" prior to ip.
-        const char* base = ip;
-        int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
-        ip += matched;
-        int offset = base - candidate;
-        DCHECK_EQ(0, memcmp(base, candidate, matched));
-        op = EmitCopy(op, offset, matched);
-        // We could immediately start working at ip now, but to improve
-        // compression we first update table[Hash(ip - 1, ...)].
-        const char* insert_tail = ip - 1;
-        next_emit = ip;
-        if (PREDICT_FALSE(ip >= ip_limit)) {
-          goto emit_remainder;
-        }
-        input_bytes = UNALIGNED_LOAD64(insert_tail);
-        uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
-        table[prev_hash] = ip - base_ip - 1;
-        uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
-        candidate = base_ip + table[cur_hash];
-        candidate_bytes = UNALIGNED_LOAD32(candidate);
-        table[cur_hash] = ip - base_ip;
-      } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
-
-      next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
-      ++ip;
-    }
-  }
-
- emit_remainder:
-  // Emit the remaining bytes as a literal
-  if (next_emit < ip_end) {
-    op = EmitLiteral(op, next_emit, ip_end - next_emit, false);
-  }
-
-  return op;
-}
-}  // end namespace internal
-
-// Signature of output types needed by decompression code.
-// The decompression code is templatized on a type that obeys this
-// signature so that we do not pay virtual function call overhead in
-// the middle of a tight decompression loop.
-//
-// class DecompressionWriter {
-//  public:
-//   // Called before decompression
-//   void SetExpectedLength(size_t length);
-//
-//   // Called after decompression
-//   bool CheckLength() const;
-//
-//   // Called repeatedly during decompression
-//   bool Append(const char* ip, uint32 length, bool allow_fast_path);
-//   bool AppendFromSelf(uint32 offset, uint32 length);
-// };
-//
-// "allow_fast_path" is a parameter that says if there is at least 16
-// readable bytes in "ip". It is currently only used by SnappyArrayWriter.
-
-// -----------------------------------------------------------------------
-// Lookup table for decompression code.  Generated by ComputeTable() below.
-// -----------------------------------------------------------------------
-
-// Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
-static const uint32 wordmask[] = {
-  0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
-};
-
-// Data stored per entry in lookup table:
-//      Range   Bits-used       Description
-//      ------------------------------------
-//      1..64   0..7            Literal/copy length encoded in opcode byte
-//      0..7    8..10           Copy offset encoded in opcode byte / 256
-//      0..4    11..13          Extra bytes after opcode
-//
-// We use eight bits for the length even though 7 would have sufficed
-// because of efficiency reasons:
-//      (1) Extracting a byte is faster than a bit-field
-//      (2) It properly aligns copy offset so we do not need a <<8
-static const uint16 char_table[256] = {
-  0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
-  0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
-  0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
-  0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
-  0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
-  0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
-  0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
-  0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
-  0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
-  0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
-  0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
-  0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
-  0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
-  0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
-  0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
-  0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
-  0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
-  0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
-  0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
-  0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
-  0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
-  0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
-  0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
-  0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
-  0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
-  0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
-  0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
-  0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
-  0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
-  0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
-  0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
-  0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
-};
-
-// In debug mode, allow optional computation of the table at startup.
-// Also, check that the decompression table is correct.
-#ifndef NDEBUG
-DEFINE_bool(snappy_dump_decompression_table, false,
-            "If true, we print the decompression table at startup.");
-
-static uint16 MakeEntry(unsigned int extra,
-                        unsigned int len,
-                        unsigned int copy_offset) {
-  // Check that all of the fields fit within the allocated space
-  DCHECK_EQ(extra,       extra & 0x7);          // At most 3 bits
-  DCHECK_EQ(copy_offset, copy_offset & 0x7);    // At most 3 bits
-  DCHECK_EQ(len,         len & 0x7f);           // At most 7 bits
-  return len | (copy_offset << 8) | (extra << 11);
-}
-
-static void ComputeTable() {
-  uint16 dst[256];
-
-  // Place invalid entries in all places to detect missing initialization
-  int assigned = 0;
-  for (int i = 0; i < 256; i++) {
-    dst[i] = 0xffff;
-  }
-
-  // Small LITERAL entries.  We store (len-1) in the top 6 bits.
-  for (unsigned int len = 1; len <= 60; len++) {
-    dst[LITERAL | ((len-1) << 2)] = MakeEntry(0, len, 0);
-    assigned++;
-  }
-
-  // Large LITERAL entries.  We use 60..63 in the high 6 bits to
-  // encode the number of bytes of length info that follow the opcode.
-  for (unsigned int extra_bytes = 1; extra_bytes <= 4; extra_bytes++) {
-    // We set the length field in the lookup table to 1 because extra
-    // bytes encode len-1.
-    dst[LITERAL | ((extra_bytes+59) << 2)] = MakeEntry(extra_bytes, 1, 0);
-    assigned++;
-  }
-
-  // COPY_1_BYTE_OFFSET.
-  //
-  // The tag byte in the compressed data stores len-4 in 3 bits, and
-  // offset/256 in 5 bits.  offset%256 is stored in the next byte.
-  //
-  // This format is used for length in range [4..11] and offset in
-  // range [0..2047]
-  for (unsigned int len = 4; len < 12; len++) {
-    for (unsigned int offset = 0; offset < 2048; offset += 256) {
-      dst[COPY_1_BYTE_OFFSET | ((len-4)<<2) | ((offset>>8)<<5)] =
-        MakeEntry(1, len, offset>>8);
-      assigned++;
-    }
-  }
-
-  // COPY_2_BYTE_OFFSET.
-  // Tag contains len-1 in top 6 bits, and offset in next two bytes.
-  for (unsigned int len = 1; len <= 64; len++) {
-    dst[COPY_2_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(2, len, 0);
-    assigned++;
-  }
-
-  // COPY_4_BYTE_OFFSET.
-  // Tag contents len-1 in top 6 bits, and offset in next four bytes.
-  for (unsigned int len = 1; len <= 64; len++) {
-    dst[COPY_4_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(4, len, 0);
-    assigned++;
-  }
-
-  // Check that each entry was initialized exactly once.
-  CHECK_EQ(assigned, 256);
-  for (int i = 0; i < 256; i++) {
-    CHECK_NE(dst[i], 0xffff);
-  }
-
-  if (FLAGS_snappy_dump_decompression_table) {
-    printf("static const uint16 char_table[256] = {\n  ");
-    for (int i = 0; i < 256; i++) {
-      printf("0x%04x%s",
-             dst[i],
-             ((i == 255) ? "\n" : (((i%8) == 7) ? ",\n  " : ", ")));
-    }
-    printf("};\n");
-  }
-
-  // Check that computed table matched recorded table
-  for (int i = 0; i < 256; i++) {
-    CHECK_EQ(dst[i], char_table[i]);
-  }
-}
-REGISTER_MODULE_INITIALIZER(snappy, ComputeTable());
-#endif /* !NDEBUG */
-
-// Helper class for decompression
-class SnappyDecompressor {
- private:
-  Source*       reader_;         // Underlying source of bytes to decompress
-  const char*   ip_;             // Points to next buffered byte
-  const char*   ip_limit_;       // Points just past buffered bytes
-  uint32        peeked_;         // Bytes peeked from reader (need to skip)
-  bool          eof_;            // Hit end of input without an error?
-  char          scratch_[5];     // Temporary buffer for PeekFast() boundaries
-
-  // Ensure that all of the tag metadata for the next tag is available
-  // in [ip_..ip_limit_-1].  Also ensures that [ip,ip+4] is readable even
-  // if (ip_limit_ - ip_ < 5).
-  //
-  // Returns true on success, false on error or end of input.
-  bool RefillTag();
-
- public:
-  explicit SnappyDecompressor(Source* reader)
-      : reader_(reader),
-        ip_(NULL),
-        ip_limit_(NULL),
-        peeked_(0),
-        eof_(false) {
-  }
-
-  ~SnappyDecompressor() {
-    // Advance past any bytes we peeked at from the reader
-    reader_->Skip(peeked_);
-  }
-
-  // Returns true iff we have hit the end of the input without an error.
-  bool eof() const {
-    return eof_;
-  }
-
-  // Read the uncompressed length stored at the start of the compressed data.
-  // On succcess, stores the length in *result and returns true.
-  // On failure, returns false.
-  bool ReadUncompressedLength(uint32* result) {
-    DCHECK(ip_ == NULL);       // Must not have read anything yet
-    // Length is encoded in 1..5 bytes
-    *result = 0;
-    uint32 shift = 0;
-    while (true) {
-      if (shift >= 32) return false;
-      size_t n;
-      const char* ip = reader_->Peek(&n);
-      if (n == 0) return false;
-      const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
-      reader_->Skip(1);
-      *result |= static_cast<uint32>(c & 0x7f) << shift;
-      if (c < 128) {
-        break;
-      }
-      shift += 7;
-    }
-    return true;
-  }
-
-  // Process the next item found in the input.
-  // Returns true if successful, false on error or end of input.
-  template <class Writer>
-  void DecompressAllTags(Writer* writer) {
-    const char* ip = ip_;
-    for ( ;; ) {
-      if (ip_limit_ - ip < 5) {
-        ip_ = ip;
-        if (!RefillTag()) return;
-        ip = ip_;
-      }
-
-      const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
-
-      if ((c & 0x3) == LITERAL) {
-        uint32 literal_length = c >> 2;
-        if (PREDICT_FALSE(literal_length >= 60)) {
-          // Long literal.
-          const uint32 literal_length_length = literal_length - 59;
-          literal_length =
-              LittleEndian::Load32(ip) & wordmask[literal_length_length];
-          ip += literal_length_length;
-        }
-        ++literal_length;
-
-        uint32 avail = ip_limit_ - ip;
-        while (avail < literal_length) {
-          if (!writer->Append(ip, avail, false)) return;
-          literal_length -= avail;
-          reader_->Skip(peeked_);
-          size_t n;
-          ip = reader_->Peek(&n);
-          avail = n;
-          peeked_ = avail;
-          if (avail == 0) return;  // Premature end of input
-          ip_limit_ = ip + avail;
-        }
-        bool allow_fast_path = (avail >= 16);
-        if (!writer->Append(ip, literal_length, allow_fast_path)) {
-          return;
-        }
-        ip += literal_length;
-      } else {
-        const uint32 entry = char_table[c];
-        const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11];
-        const uint32 length = entry & 0xff;
-        ip += entry >> 11;
-
-        // copy_offset/256 is encoded in bits 8..10.  By just fetching
-        // those bits, we get copy_offset (since the bit-field starts at
-        // bit 8).
-        const uint32 copy_offset = entry & 0x700;
-        if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
-          return;
-        }
-      }
-    }
-  }
-};
-
-bool SnappyDecompressor::RefillTag() {
-  const char* ip = ip_;
-  if (ip == ip_limit_) {
-    // Fetch a new fragment from the reader
-    reader_->Skip(peeked_);   // All peeked bytes are used up
-    size_t n;
-    ip = reader_->Peek(&n);
-    peeked_ = n;
-    if (n == 0) {
-      eof_ = true;
-      return false;
-    }
-    ip_limit_ = ip + n;
-  }
-
-  // Read the tag character
-  DCHECK_LT(ip, ip_limit_);
-  const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
-  const uint32 entry = char_table[c];
-  const uint32 needed = (entry >> 11) + 1;  // +1 byte for 'c'
-  DCHECK_LE(needed, sizeof(scratch_));
-
-  // Read more bytes from reader if needed
-  uint32 nbuf = ip_limit_ - ip;
-  if (nbuf < needed) {
-    // Stitch together bytes from ip and reader to form the word
-    // contents.  We store the needed bytes in "scratch_".  They
-    // will be consumed immediately by the caller since we do not
-    // read more than we need.
-    memmove(scratch_, ip, nbuf);
-    reader_->Skip(peeked_);  // All peeked bytes are used up
-    peeked_ = 0;
-    while (nbuf < needed) {
-      size_t length;
-      const char* src = reader_->Peek(&length);
-      if (length == 0) return false;
-      uint32 to_add = min<uint32>(needed - nbuf, length);
-      memcpy(scratch_ + nbuf, src, to_add);
-      nbuf += to_add;
-      reader_->Skip(to_add);
-    }
-    DCHECK_EQ(nbuf, needed);
-    ip_ = scratch_;
-    ip_limit_ = scratch_ + needed;
-  } else if (nbuf < 5) {
-    // Have enough bytes, but move into scratch_ so that we do not
-    // read past end of input
-    memmove(scratch_, ip, nbuf);
-    reader_->Skip(peeked_);  // All peeked bytes are used up
-    peeked_ = 0;
-    ip_ = scratch_;
-    ip_limit_ = scratch_ + nbuf;
-  } else {
-    // Pass pointer to buffer returned by reader_.
-    ip_ = ip;
-  }
-  return true;
-}
-
-template <typename Writer>
-static bool InternalUncompress(Source* r,
-                               Writer* writer,
-                               uint32 max_len) {
-  // Read the uncompressed length from the front of the compressed input
-  SnappyDecompressor decompressor(r);
-  uint32 uncompressed_len = 0;
-  if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
-  // Protect against possible DoS attack
-  if (static_cast<uint64>(uncompressed_len) > max_len) {
-    return false;
-  }
-
-  writer->SetExpectedLength(uncompressed_len);
-
-  // Process the entire input
-  decompressor.DecompressAllTags(writer);
-  return (decompressor.eof() && writer->CheckLength());
-}
-
-bool GetUncompressedLength(Source* source, uint32* result) {
-  SnappyDecompressor decompressor(source);
-  return decompressor.ReadUncompressedLength(result);
-}
-
-size_t Compress(Source* reader, Sink* writer) {
-  size_t written = 0;
-  int N = reader->Available();
-  char ulength[Varint::kMax32];
-  char* p = Varint::Encode32(ulength, N);
-  writer->Append(ulength, p-ulength);
-  written += (p - ulength);
-
-  internal::WorkingMemory wmem;
-  char* scratch = NULL;
-  char* scratch_output = NULL;
-
-  while (N > 0) {
-    // Get next block to compress (without copying if possible)
-    size_t fragment_size;
-    const char* fragment = reader->Peek(&fragment_size);
-    DCHECK_NE(fragment_size, 0) << ": premature end of input";
-    const int num_to_read = min(N, kBlockSize);
-    size_t bytes_read = fragment_size;
-
-    int pending_advance = 0;
-    if (bytes_read >= num_to_read) {
-      // Buffer returned by reader is large enough
-      pending_advance = num_to_read;
-      fragment_size = num_to_read;
-    } else {
-      // Read into scratch buffer
-      if (scratch == NULL) {
-        // If this is the last iteration, we want to allocate N bytes
-        // of space, otherwise the max possible kBlockSize space.
-        // num_to_read contains exactly the correct value
-        scratch = new char[num_to_read];
-      }
-      memcpy(scratch, fragment, bytes_read);
-      reader->Skip(bytes_read);
-
-      while (bytes_read < num_to_read) {
-        fragment = reader->Peek(&fragment_size);
-        size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
-        memcpy(scratch + bytes_read, fragment, n);
-        bytes_read += n;
-        reader->Skip(n);
-      }
-      DCHECK_EQ(bytes_read, num_to_read);
-      fragment = scratch;
-      fragment_size = num_to_read;
-    }
-    DCHECK_EQ(fragment_size, num_to_read);
-
-    // Get encoding table for compression
-    int table_size;
-    uint16* table = wmem.GetHashTable(num_to_read, &table_size);
-
-    // Compress input_fragment and append to dest
-    const int max_output = MaxCompressedLength(num_to_read);
-
-    // Need a scratch buffer for the output, in case the byte sink doesn't
-    // have room for us directly.
-    if (scratch_output == NULL) {
-      scratch_output = new char[max_output];
-    } else {
-      // Since we encode kBlockSize regions followed by a region
-      // which is <= kBlockSize in length, a previously allocated
-      // scratch_output[] region is big enough for this iteration.
-    }
-    char* dest = writer->GetAppendBuffer(max_output, scratch_output);
-    char* end = internal::CompressFragment(fragment, fragment_size,
-                                           dest, table, table_size);
-    writer->Append(dest, end - dest);
-    written += (end - dest);
-
-    N -= num_to_read;
-    reader->Skip(pending_advance);
-  }
-
-  delete[] scratch;
-  delete[] scratch_output;
-
-  return written;
-}
-
-// -----------------------------------------------------------------------
-// Flat array interfaces
-// -----------------------------------------------------------------------
-
-// A type that writes to a flat array.
-// Note that this is not a "ByteSink", but a type that matches the
-// Writer template argument to SnappyDecompressor::DecompressAllTags().
-class SnappyArrayWriter {
- private:
-  char* base_;
-  char* op_;
-  char* op_limit_;
-
- public:
-  inline explicit SnappyArrayWriter(char* dst)
-      : base_(dst),
-        op_(dst) {
-  }
-
-  inline void SetExpectedLength(size_t len) {
-    op_limit_ = op_ + len;
-  }
-
-  inline bool CheckLength() const {
-    return op_ == op_limit_;
-  }
-
-  inline bool Append(const char* ip, uint32 len, bool allow_fast_path) {
-    char* op = op_;
-    const int space_left = op_limit_ - op;
-    if (allow_fast_path && len <= 16 && space_left >= 16) {
-      // Fast path, used for the majority (about 90%) of dynamic invocations.
-      UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
-      UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
-    } else {
-      if (space_left < len) {
-        return false;
-      }
-      memcpy(op, ip, len);
-    }
-    op_ = op + len;
-    return true;
-  }
-
-  inline bool AppendFromSelf(uint32 offset, uint32 len) {
-    char* op = op_;
-    const int space_left = op_limit_ - op;
-
-    if (op - base_ <= offset - 1u) {  // -1u catches offset==0
-      return false;
-    }
-    if (len <= 16 && offset >= 8 && space_left >= 16) {
-      // Fast path, used for the majority (70-80%) of dynamic invocations.
-      UNALIGNED_STORE64(op, UNALIGNED_LOAD64(op - offset));
-      UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(op - offset + 8));
-    } else {
-      if (space_left >= len + kMaxIncrementCopyOverflow) {
-        IncrementalCopyFastPath(op - offset, op, len);
-      } else {
-        if (space_left < len) {
-          return false;
-        }
-        IncrementalCopy(op - offset, op, len);
-      }
-    }
-
-    op_ = op + len;
-    return true;
-  }
-};
-
-bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
-  ByteArraySource reader(compressed, n);
-  return RawUncompress(&reader, uncompressed);
-}
-
-bool RawUncompress(Source* compressed, char* uncompressed) {
-  SnappyArrayWriter output(uncompressed);
-  return InternalUncompress(compressed, &output, kuint32max);
-}
-
-bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
-  size_t ulength;
-  if (!GetUncompressedLength(compressed, n, &ulength)) {
-    return false;
-  }
-  // Protect against possible DoS attack
-  if ((static_cast<uint64>(ulength) + uncompressed->size()) >
-      uncompressed->max_size()) {
-    return false;
-  }
-  STLStringResizeUninitialized(uncompressed, ulength);
-  return RawUncompress(compressed, n, string_as_array(uncompressed));
-}
-
-
-// A Writer that drops everything on the floor and just does validation
-class SnappyDecompressionValidator {
- private:
-  size_t expected_;
-  size_t produced_;
-
- public:
-  inline SnappyDecompressionValidator() : produced_(0) { }
-  inline void SetExpectedLength(size_t len) {
-    expected_ = len;
-  }
-  inline bool CheckLength() const {
-    return expected_ == produced_;
-  }
-  inline bool Append(const char* ip, uint32 len, bool allow_fast_path) {
-    produced_ += len;
-    return produced_ <= expected_;
-  }
-  inline bool AppendFromSelf(uint32 offset, uint32 len) {
-    if (produced_ <= offset - 1u) return false;  // -1u catches offset==0
-    produced_ += len;
-    return produced_ <= expected_;
-  }
-};
-
-bool IsValidCompressedBuffer(const char* compressed, size_t n) {
-  ByteArraySource reader(compressed, n);
-  SnappyDecompressionValidator writer;
-  return InternalUncompress(&reader, &writer, kuint32max);
-}
-
-void RawCompress(const char* input,
-                 size_t input_length,
-                 char* compressed,
-                 size_t* compressed_length) {
-  ByteArraySource reader(input, input_length);
-  UncheckedByteArraySink writer(compressed);
-  Compress(&reader, &writer);
-
-  // Compute how many bytes were added
-  *compressed_length = (writer.CurrentDestination() - compressed);
-}
-
-size_t Compress(const char* input, size_t input_length, string* compressed) {
-  // Pre-grow the buffer to the max length of the compressed output
-  compressed->resize(MaxCompressedLength(input_length));
-
-  size_t compressed_length;
-  RawCompress(input, input_length, string_as_array(compressed),
-              &compressed_length);
-  compressed->resize(compressed_length);
-  return compressed_length;
-}
-
-
-} // end namespace snappy
-
deleted file mode 100644
--- a/other-licenses/snappy/src/snappy.h
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2005 and onwards Google Inc.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// A light-weight compression algorithm.  It is designed for speed of
-// compression and decompression, rather than for the utmost in space
-// savings.
-//
-// For getting better compression ratios when you are compressing data
-// with long repeated sequences or compressing data that is similar to
-// other data, while still compressing fast, you might look at first
-// using BMDiff and then compressing the output of BMDiff with
-// Snappy.
-
-#ifndef UTIL_SNAPPY_SNAPPY_H__
-#define UTIL_SNAPPY_SNAPPY_H__
-
-#include <stddef.h>
-#include <string>
-
-#include "snappy-stubs-public.h"
-
-namespace snappy {
-  class Source;
-  class Sink;
-
-  // ------------------------------------------------------------------------
-  // Generic compression/decompression routines.
-  // ------------------------------------------------------------------------
-
-  // Compress the bytes read from "*source" and append to "*sink". Return the
-  // number of bytes written.
-  size_t Compress(Source* source, Sink* sink);
-
-  bool GetUncompressedLength(Source* source, uint32* result);
-
-  // ------------------------------------------------------------------------
-  // Higher-level string based routines (should be sufficient for most users)
-  // ------------------------------------------------------------------------
-
-  // Sets "*output" to the compressed version of "input[0,input_length-1]".
-  // Original contents of *output are lost.
-  //
-  // REQUIRES: "input[]" is not an alias of "*output".
-  size_t Compress(const char* input, size_t input_length, string* output);
-
-  // Decompresses "compressed[0,compressed_length-1]" to "*uncompressed".
-  // Original contents of "*uncompressed" are lost.
-  //
-  // REQUIRES: "compressed[]" is not an alias of "*uncompressed".
-  //
-  // returns false if the message is corrupted and could not be decompressed
-  bool Uncompress(const char* compressed, size_t compressed_length,
-                  string* uncompressed);
-
-
-  // ------------------------------------------------------------------------
-  // Lower-level character array based routines.  May be useful for
-  // efficiency reasons in certain circumstances.
-  // ------------------------------------------------------------------------
-
-  // REQUIRES: "compressed" must point to an area of memory that is at
-  // least "MaxCompressedLength(input_length)" bytes in length.
-  //
-  // Takes the data stored in "input[0..input_length]" and stores
-  // it in the array pointed to by "compressed".
-  //
-  // "*compressed_length" is set to the length of the compressed output.
-  //
-  // Example:
-  //    char* output = new char[snappy::MaxCompressedLength(input_length)];
-  //    size_t output_length;
-  //    RawCompress(input, input_length, output, &output_length);
-  //    ... Process(output, output_length) ...
-  //    delete [] output;
-  void RawCompress(const char* input,
-                   size_t input_length,
-                   char* compressed,
-                   size_t* compressed_length);
-
-  // Given data in "compressed[0..compressed_length-1]" generated by
-  // calling the Snappy::Compress routine, this routine
-  // stores the uncompressed data to
-  //    uncompressed[0..GetUncompressedLength(compressed)-1]
-  // returns false if the message is corrupted and could not be decrypted
-  bool RawUncompress(const char* compressed, size_t compressed_length,
-                     char* uncompressed);
-
-  // Given data from the byte source 'compressed' generated by calling
-  // the Snappy::Compress routine, this routine stores the uncompressed
-  // data to
-  //    uncompressed[0..GetUncompressedLength(compressed,compressed_length)-1]
-  // returns false if the message is corrupted and could not be decrypted
-  bool RawUncompress(Source* compressed, char* uncompressed);
-
-  // Returns the maximal size of the compressed representation of
-  // input data that is "source_bytes" bytes in length;
-  size_t MaxCompressedLength(size_t source_bytes);
-
-  // REQUIRES: "compressed[]" was produced by RawCompress() or Compress()
-  // Returns true and stores the length of the uncompressed data in
-  // *result normally.  Returns false on parsing error.
-  // This operation takes O(1) time.
-  bool GetUncompressedLength(const char* compressed, size_t compressed_length,
-                             size_t* result);
-
-  // Returns true iff the contents of "compressed[]" can be uncompressed
-  // successfully.  Does not return the uncompressed data.  Takes
-  // time proportional to compressed_length, but is usually at least
-  // a factor of four faster than actual decompression.
-  bool IsValidCompressedBuffer(const char* compressed,
-                               size_t compressed_length);
-
-  // *** DO NOT CHANGE THE VALUE OF kBlockSize ***
-  //
-  // New Compression code chops up the input into blocks of at most
-  // the following size.  This ensures that back-references in the
-  // output never cross kBlockSize block boundaries.  This can be
-  // helpful in implementing blocked decompression.  However the
-  // decompression code should not rely on this guarantee since older
-  // compression code may not obey it.
-  static const int kBlockLog = 15;
-  static const int kBlockSize = 1 << kBlockLog;
-
-  static const int kMaxHashTableBits = 14;
-  static const int kMaxHashTableSize = 1 << kMaxHashTableBits;
-
-}  // end namespace snappy
-
-
-#endif  // UTIL_SNAPPY_SNAPPY_H__
deleted file mode 100644
--- a/other-licenses/snappy/src/snappy_unittest.cc
+++ /dev/null
@@ -1,1153 +0,0 @@
-// Copyright 2005 and onwards Google Inc.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <math.h>
-#include <stdlib.h>
-
-
-#include <algorithm>
-#include <string>
-#include <vector>
-
-#include "snappy.h"
-#include "snappy-internal.h"
-#include "snappy-test.h"
-#include "snappy-sinksource.h"
-
-DEFINE_int32(start_len, -1,
-             "Starting prefix size for testing (-1: just full file contents)");
-DEFINE_int32(end_len, -1,
-             "Starting prefix size for testing (-1: just full file contents)");
-DEFINE_int32(bytes, 10485760,
-             "How many bytes to compress/uncompress per file for timing");
-
-DEFINE_bool(zlib, false,
-            "Run zlib compression (http://www.zlib.net)");
-DEFINE_bool(lzo, false,
-            "Run LZO compression (http://www.oberhumer.com/opensource/lzo/)");
-DEFINE_bool(quicklz, false,
-            "Run quickLZ compression (http://www.quicklz.com/)");
-DEFINE_bool(liblzf, false,
-            "Run libLZF compression "
-            "(http://www.goof.com/pcg/marc/liblzf.html)");
-DEFINE_bool(fastlz, false,
-            "Run FastLZ compression (http://www.fastlz.org/");
-DEFINE_bool(snappy, true, "Run snappy compression");
-
-
-DEFINE_bool(write_compressed, false,
-            "Write compressed versions of each file to <file>.comp");
-DEFINE_bool(write_uncompressed, false,
-            "Write uncompressed versions of each file to <file>.uncomp");
-
-namespace snappy {
-
-
-#ifdef HAVE_FUNC_MMAP
-
-// To test against code that reads beyond its input, this class copies a
-// string to a newly allocated group of pages, the last of which
-// is made unreadable via mprotect. Note that we need to allocate the
-// memory with mmap(), as POSIX allows mprotect() only on memory allocated
-// with mmap(), and some malloc/posix_memalign implementations expect to
-// be able to read previously allocated memory while doing heap allocations.
-class DataEndingAtUnreadablePage {
- public:
-  explicit DataEndingAtUnreadablePage(const string& s) {
-    const size_t page_size = getpagesize();
-    const size_t size = s.size();
-    // Round up space for string to a multiple of page_size.
-    size_t space_for_string = (size + page_size - 1) & ~(page_size - 1);
-    alloc_size_ = space_for_string + page_size;
-    mem_ = mmap(NULL, alloc_size_,
-                PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-    CHECK_NE(MAP_FAILED, mem_);
-    protected_page_ = reinterpret_cast<char*>(mem_) + space_for_string;
-    char* dst = protected_page_ - size;
-    memcpy(dst, s.data(), size);
-    data_ = dst;
-    size_ = size;
-    // Make guard page unreadable.
-    CHECK_EQ(0, mprotect(protected_page_, page_size, PROT_NONE));
-  }
-
-  ~DataEndingAtUnreadablePage() {
-    // Undo the mprotect.
-    CHECK_EQ(0, mprotect(protected_page_, getpagesize(), PROT_READ|PROT_WRITE));
-    CHECK_EQ(0, munmap(mem_, alloc_size_));
-  }
-
-  const char* data() const { return data_; }
-  size_t size() const { return size_; }
-
- private:
-  size_t alloc_size_;
-  void* mem_;
-  char* protected_page_;
-  const char* data_;
-  size_t size_;
-};
-
-#else  // HAVE_FUNC_MMAP
-
-// Fallback for systems without mmap.
-typedef string DataEndingAtUnreadablePage;
-
-#endif
-
-enum CompressorType {
-  ZLIB, LZO, LIBLZF, QUICKLZ, FASTLZ, SNAPPY
-};
-
-const char* names[] = {
-  "ZLIB", "LZO", "LIBLZF", "QUICKLZ", "FASTLZ", "SNAPPY"
-};
-
-static size_t MinimumRequiredOutputSpace(size_t input_size,
-                                         CompressorType comp) {
-  switch (comp) {
-#ifdef ZLIB_VERSION
-    case ZLIB:
-      return ZLib::MinCompressbufSize(input_size);
-#endif  // ZLIB_VERSION
-
-#ifdef LZO_VERSION
-    case LZO:
-      return input_size + input_size/64 + 16 + 3;
-#endif  // LZO_VERSION
-
-#ifdef LZF_VERSION
-    case LIBLZF:
-      return input_size;
-#endif  // LZF_VERSION
-
-#ifdef QLZ_VERSION_MAJOR
-    case QUICKLZ:
-      return input_size + 36000;  // 36000 is used for scratch.
-#endif  // QLZ_VERSION_MAJOR
-
-#ifdef FASTLZ_VERSION
-    case FASTLZ:
-      return max(static_cast<int>(ceil(input_size * 1.05)), 66);
-#endif  // FASTLZ_VERSION
-
-    case SNAPPY:
-      return snappy::MaxCompressedLength(input_size);
-
-    default:
-      LOG(FATAL) << "Unknown compression type number " << comp;
-  }
-}
-
-// Returns true if we successfully compressed, false otherwise.
-//
-// If compressed_is_preallocated is set, do not resize the compressed buffer.
-// This is typically what you want for a benchmark, in order to not spend
-// time in the memory allocator. If you do set this flag, however,
-// "compressed" must be preinitialized to at least MinCompressbufSize(comp)
-// number of bytes, and may contain junk bytes at the end after return.
-static bool Compress(const char* input, size_t input_size, CompressorType comp,
-                     string* compressed, bool compressed_is_preallocated) {
-  if (!compressed_is_preallocated) {
-    compressed->resize(MinimumRequiredOutputSpace(input_size, comp));
-  }
-
-  switch (comp) {
-#ifdef ZLIB_VERSION
-    case ZLIB: {
-      ZLib zlib;
-      uLongf destlen = compressed->size();
-      int ret = zlib.Compress(
-          reinterpret_cast<Bytef*>(string_as_array(compressed)),
-          &destlen,
-          reinterpret_cast<const Bytef*>(input),
-          input_size);
-      CHECK_EQ(Z_OK, ret);
-      if (!compressed_is_preallocated) {
-        compressed->resize(destlen);
-      }
-      return true;
-    }
-#endif  // ZLIB_VERSION
-
-#ifdef LZO_VERSION
-    case LZO: {
-      unsigned char* mem = new unsigned char[LZO1X_1_15_MEM_COMPRESS];
-      lzo_uint destlen;
-      int ret = lzo1x_1_15_compress(
-          reinterpret_cast<const uint8*>(input),
-          input_size,
-          reinterpret_cast<uint8*>(string_as_array(compressed)),
-          &destlen,
-          mem);
-      CHECK_EQ(LZO_E_OK, ret);
-      delete[] mem;
-      if (!compressed_is_preallocated) {
-        compressed->resize(destlen);
-      }
-      break;
-    }
-#endif  // LZO_VERSION
-
-#ifdef LZF_VERSION
-    case LIBLZF: {
-      int destlen = lzf_compress(input,
-                                 input_size,
-                                 string_as_array(compressed),
-                                 input_size);
-      if (destlen == 0) {
-        // lzf *can* cause lots of blowup when compressing, so they
-        // recommend to limit outsize to insize, and just not compress
-        // if it's bigger.  Ideally, we'd just swap input and output.
-        compressed->assign(input, input_size);
-        destlen = input_size;
-      }
-      if (!compressed_is_preallocated) {
-        compressed->resize(destlen);
-      }
-      break;
-    }
-#endif  // LZF_VERSION
-
-#ifdef QLZ_VERSION_MAJOR
-    case QUICKLZ: {
-      qlz_state_compress *state_compress = new qlz_state_compress;
-      int destlen = qlz_compress(input,
-                                 string_as_array(compressed),
-                                 input_size,
-                                 state_compress);
-      delete state_compress;
-      CHECK_NE(0, destlen);
-      if (!compressed_is_preallocated) {
-        compressed->resize(destlen);
-      }
-      break;
-    }
-#endif  // QLZ_VERSION_MAJOR
-
-#ifdef FASTLZ_VERSION
-    case FASTLZ: {
-      // Use level 1 compression since we mostly care about speed.
-      int destlen = fastlz_compress_level(
-          1,
-          input,
-          input_size,
-          string_as_array(compressed));
-      if (!compressed_is_preallocated) {
-        compressed->resize(destlen);
-      }
-      CHECK_NE(destlen, 0);
-      break;
-    }
-#endif  // FASTLZ_VERSION
-
-    case SNAPPY: {
-      size_t destlen;
-      snappy::RawCompress(input, input_size,
-                          string_as_array(compressed),
-                          &destlen);
-      CHECK_LE(destlen, snappy::MaxCompressedLength(input_size));
-      if (!compressed_is_preallocated) {
-        compressed->resize(destlen);
-      }
-      break;
-    }
-
-
-    default: {
-      return false;     // the asked-for library wasn't compiled in
-    }
-  }
-  return true;
-}
-
-static bool Uncompress(const string& compressed, CompressorType comp,
-                       int size, string* output) {
-  switch (comp) {
-#ifdef ZLIB_VERSION
-    case ZLIB: {
-      output->resize(size);
-      ZLib zlib;
-      uLongf destlen = output->size();
-      int ret = zlib.Uncompress(
-          reinterpret_cast<Bytef*>(string_as_array(output)),
-          &destlen,
-          reinterpret_cast<const Bytef*>(compressed.data()),
-          compressed.size());
-      CHECK_EQ(Z_OK, ret);
-      CHECK_EQ(destlen, size);
-      break;
-    }
-#endif  // ZLIB_VERSION
-
-#ifdef LZO_VERSION
-    case LZO: {
-      output->resize(size);
-      lzo_uint destlen;
-      int ret = lzo1x_decompress(
-          reinterpret_cast<const uint8*>(compressed.data()),
-          compressed.size(),
-          reinterpret_cast<uint8*>(string_as_array(output)),
-          &destlen,
-          NULL);
-      CHECK_EQ(LZO_E_OK, ret);
-      CHECK_EQ(destlen, size);
-      break;
-    }
-#endif  // LZO_VERSION
-
-#ifdef LZF_VERSION
-    case LIBLZF: {
-      output->resize(size);
-      int destlen = lzf_decompress(compressed.data(),
-                                   compressed.size(),
-                                   string_as_array(output),
-                                   output->size());
-      if (destlen == 0) {
-        // This error probably means we had decided not to compress,
-        // and thus have stored input in output directly.
-        output->assign(compressed.data(), compressed.size());
-        destlen = compressed.size();
-      }
-      CHECK_EQ(destlen, size);
-      break;
-    }
-#endif  // LZF_VERSION
-
-#ifdef QLZ_VERSION_MAJOR
-    case QUICKLZ: {
-      output->resize(size);
-      qlz_state_decompress *state_decompress = new qlz_state_decompress;
-      int destlen = qlz_decompress(compressed.data(),
-                                   string_as_array(output),
-                                   state_decompress);
-      delete state_decompress;
-      CHECK_EQ(destlen, size);
-      break;
-    }
-#endif  // QLZ_VERSION_MAJOR
-
-#ifdef FASTLZ_VERSION
-    case FASTLZ: {
-      output->resize(size);
-      int destlen = fastlz_decompress(compressed.data(),
-                                      compressed.length(),
-                                      string_as_array(output),
-                                      size);
-      CHECK_EQ(destlen, size);
-      break;
-    }
-#endif  // FASTLZ_VERSION
-
-    case SNAPPY: {
-      snappy::RawUncompress(compressed.data(), compressed.size(),
-                            string_as_array(output));
-      break;
-    }
-
-
-    default: {
-      return false;     // the asked-for library wasn't compiled in
-    }
-  }
-  return true;
-}
-
-static void Measure(const char* data,
-                    size_t length,
-                    CompressorType comp,
-                    int repeats,
-                    int block_size) {
-  // Run tests a few time and pick median running times
-  static const int kRuns = 5;
-  double ctime[kRuns];
-  double utime[kRuns];
-  int compressed_size = 0;
-
-  {
-    // Chop the input into blocks
-    int num_blocks = (length + block_size - 1) / block_size;
-    vector<const char*> input(num_blocks);
-    vector<size_t> input_length(num_blocks);
-    vector<string> compressed(num_blocks);
-    vector<string> output(num_blocks);
-    for (int b = 0; b < num_blocks; b++) {
-      int input_start = b * block_size;
-      int input_limit = min<int>((b+1)*block_size, length);
-      input[b] = data+input_start;
-      input_length[b] = input_limit-input_start;
-
-      // Pre-grow the output buffer so we don't measure string append time.
-      compressed[b].resize(MinimumRequiredOutputSpace(block_size, comp));
-    }
-
-    // First, try one trial compression to make sure the code is compiled in
-    if (!Compress(input[0], input_length[0], comp, &compressed[0], true)) {
-      LOG(WARNING) << "Skipping " << names[comp] << ": "
-                   << "library not compiled in";
-      return;
-    }
-
-    for (int run = 0; run < kRuns; run++) {
-      CycleTimer ctimer, utimer;
-
-      for (int b = 0; b < num_blocks; b++) {
-        // Pre-grow the output buffer so we don't measure string append time.
-        compressed[b].resize(MinimumRequiredOutputSpace(block_size, comp));
-      }
-
-      ctimer.Start();
-      for (int b = 0; b < num_blocks; b++)
-        for (int i = 0; i < repeats; i++)
-          Compress(input[b], input_length[b], comp, &compressed[b], true);
-      ctimer.Stop();
-
-      // Compress once more, with resizing, so we don't leave junk
-      // at the end that will confuse the decompressor.
-      for (int b = 0; b < num_blocks; b++) {
-        Compress(input[b], input_length[b], comp, &compressed[b], false);
-      }
-
-      for (int b = 0; b < num_blocks; b++) {
-        output[b].resize(input_length[b]);
-      }
-
-      utimer.Start();
-      for (int i = 0; i < repeats; i++)
-        for (int b = 0; b < num_blocks; b++)
-          Uncompress(compressed[b], comp, input_length[b], &output[b]);
-      utimer.Stop();
-
-      ctime[run] = ctimer.Get();
-      utime[run] = utimer.Get();
-    }
-
-    compressed_size = 0;
-    for (int i = 0; i < compressed.size(); i++) {
-      compressed_size += compressed[i].size();
-    }
-  }
-
-  sort(ctime, ctime + kRuns);
-  sort(utime, utime + kRuns);
-  const int med = kRuns/2;
-
-  float comp_rate = (length / ctime[med]) * repeats / 1048576.0;
-  float uncomp_rate = (length / utime[med]) * repeats / 1048576.0;
-  string x = names[comp];
-  x += ":";
-  string urate = (uncomp_rate >= 0)
-                 ? StringPrintf("%.1f", uncomp_rate)
-                 : string("?");
-  printf("%-7s [b %dM] bytes %6d -> %6d %4.1f%%  "
-         "comp %5.1f MB/s  uncomp %5s MB/s\n",
-         x.c_str(),
-         block_size/(1<<20),
-         static_cast<int>(length), static_cast<uint32>(compressed_size),
-         (compressed_size * 100.0) / max<int>(1, length),
-         comp_rate,
-         urate.c_str());
-}
-
-
-static int VerifyString(const string& input) {
-  string compressed;
-  DataEndingAtUnreadablePage i(input);
-  const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
-  CHECK_EQ(written, compressed.size());
-  CHECK_LE(compressed.size(),
-           snappy::MaxCompressedLength(input.size()));
-  CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
-
-  string uncompressed;
-  DataEndingAtUnreadablePage c(compressed);
-  CHECK(snappy::Uncompress(c.data(), c.size(), &uncompressed));
-  CHECK_EQ(uncompressed, input);
-  return uncompressed.size();
-}
-
-
-// Test that data compressed by a compressor that does not
-// obey block sizes is uncompressed properly.
-static void VerifyNonBlockedCompression(const string& input) {
-  if (input.length() > snappy::kBlockSize) {
-    // We cannot test larger blocks than the maximum block size, obviously.
-    return;
-  }
-
-  string prefix;
-  Varint::Append32(&prefix, input.size());
-
-  // Setup compression table
-  snappy::internal::WorkingMemory wmem;
-  int table_size;
-  uint16* table = wmem.GetHashTable(input.size(), &table_size);
-
-  // Compress entire input in one shot
-  string compressed;
-  compressed += prefix;
-  compressed.resize(prefix.size()+snappy::MaxCompressedLength(input.size()));
-  char* dest = string_as_array(&compressed) + prefix.size();
-  char* end = snappy::internal::CompressFragment(input.data(), input.size(),
-                                                dest, table, table_size);
-  compressed.resize(end - compressed.data());
-
-  // Uncompress into string
-  string uncomp_str;
-  CHECK(snappy::Uncompress(compressed.data(), compressed.size(), &uncomp_str));
-  CHECK_EQ(uncomp_str, input);
-
-}
-
-// Expand the input so that it is at least K times as big as block size
-static string Expand(const string& input) {
-  static const int K = 3;
-  string data = input;
-  while (data.size() < K * snappy::kBlockSize) {
-    data += input;
-  }
-  return data;
-}
-
-static int Verify(const string& input) {
-  VLOG(1) << "Verifying input of size " << input.size();
-
-  // Compress using string based routines
-  const int result = VerifyString(input);
-
-
-  VerifyNonBlockedCompression(input);
-  if (!input.empty()) {
-    VerifyNonBlockedCompression(Expand(input));
-  }
-
-
-  return result;
-}
-
-// This test checks to ensure that snappy doesn't coredump if it gets
-// corrupted data.
-
-static bool IsValidCompressedBuffer(const string& c) {
-  return snappy::IsValidCompressedBuffer(c.data(), c.size());
-}
-static bool Uncompress(const string& c, string* u) {
-  return snappy::Uncompress(c.data(), c.size(), u);
-}
-
-TYPED_TEST(CorruptedTest, VerifyCorrupted) {
-  string source = "making sure we don't crash with corrupted input";
-  VLOG(1) << source;
-  string dest;
-  TypeParam uncmp;
-  snappy::Compress(source.data(), source.size(), &dest);
-
-  // Mess around with the data. It's hard to simulate all possible
-  // corruptions; this is just one example ...
-  CHECK_GT(dest.size(), 3);
-  dest[1]--;
-  dest[3]++;
-  // this really ought to fail.
-  CHECK(!IsValidCompressedBuffer(TypeParam(dest)));
-  CHECK(!Uncompress(TypeParam(dest), &uncmp));
-
-  // This is testing for a security bug - a buffer that decompresses to 100k
-  // but we lie in the snappy header and only reserve 0 bytes of memory :)
-  source.resize(100000);
-  for (int i = 0; i < source.length(); ++i) {
-    source[i] = 'A';
-  }
-  snappy::Compress(source.data(), source.size(), &dest);
-  dest[0] = dest[1] = dest[2] = dest[3] = 0;
-  CHECK(!IsValidCompressedBuffer(TypeParam(dest)));
-  CHECK(!Uncompress(TypeParam(dest), &uncmp));
-
-  if (sizeof(void *) == 4) {
-    // Another security check; check a crazy big length can't DoS us with an
-    // over-allocation.
-    // Currently this is done only for 32-bit builds.  On 64-bit builds,
-    // where 3GBytes might be an acceptable allocation size, Uncompress()
-    // attempts to decompress, and sometimes causes the test to run out of
-    // memory.
-    dest[0] = dest[1] = dest[2] = dest[3] = 0xff;
-    // This decodes to a really large size, i.e., 3221225471 bytes
-    dest[4] = 'k';
-    CHECK(!IsValidCompressedBuffer(TypeParam(dest)));
-    CHECK(!Uncompress(TypeParam(dest), &uncmp));
-    dest[0] = dest[1] = dest[2] = 0xff;
-    dest[3] = 0x7f;
-    CHECK(!IsValidCompressedBuffer(TypeParam(dest)));
-    CHECK(!Uncompress(TypeParam(dest), &uncmp));
-  } else {
-    LOG(WARNING) << "Crazy decompression lengths not checked on 64-bit build";
-  }
-
-  // try reading stuff in from a bad file.
-  for (int i = 1; i <= 3; ++i) {
-    string data = ReadTestDataFile(StringPrintf("baddata%d.snappy", i).c_str());
-    string uncmp;
-    // check that we don't return a crazy length
-    size_t ulen;
-    CHECK(!snappy::GetUncompressedLength(data.data(), data.size(), &ulen)
-          || (ulen < (1<<20)));
-    uint32 ulen2;
-    snappy::ByteArraySource source(data.data(), data.size());
-    CHECK(!snappy::GetUncompressedLength(&source, &ulen2) ||
-          (ulen2 < (1<<20)));
-    CHECK(!IsValidCompressedBuffer(TypeParam(data)));
-    CHECK(!Uncompress(TypeParam(data), &uncmp));
-  }
-}
-
-// Helper routines to construct arbitrary compressed strings.
-// These mirror the compression code in snappy.cc, but are copied
-// here so that we can bypass some limitations in the how snappy.cc
-// invokes these routines.
-static void AppendLiteral(string* dst, const string& literal) {
-  if (literal.empty()) return;
-  int n = literal.size() - 1;
-  if (n < 60) {
-    // Fit length in tag byte
-    dst->push_back(0 | (n << 2));
-  } else {
-    // Encode in upcoming bytes
-    char number[4];
-    int count = 0;
-    while (n > 0) {
-      number[count++] = n & 0xff;
-      n >>= 8;
-    }
-    dst->push_back(0 | ((59+count) << 2));
-    *dst += string(number, count);
-  }
-  *dst += literal;
-}
-
-static void AppendCopy(string* dst, int offset, int length) {
-  while (length > 0) {
-    // Figure out how much to copy in one shot
-    int to_copy;
-    if (length >= 68) {
-      to_copy = 64;
-    } else if (length > 64) {
-      to_copy = 60;
-    } else {
-      to_copy = length;
-    }
-    length -= to_copy;
-
-    if ((to_copy < 12) && (offset < 2048)) {
-      assert(to_copy-4 < 8);            // Must fit in 3 bits
-      dst->push_back(1 | ((to_copy-4) << 2) | ((offset >> 8) << 5));
-      dst->push_back(offset & 0xff);
-    } else if (offset < 65536) {
-      dst->push_back(2 | ((to_copy-1) << 2));
-      dst->push_back(offset & 0xff);
-      dst->push_back(offset >> 8);
-    } else {
-      dst->push_back(3 | ((to_copy-1) << 2));
-      dst->push_back(offset & 0xff);
-      dst->push_back((offset >> 8) & 0xff);
-      dst->push_back((offset >> 16) & 0xff);
-      dst->push_back((offset >> 24) & 0xff);
-    }
-  }
-}
-
-TEST(Snappy, SimpleTests) {
-  Verify("");
-  Verify("a");
-  Verify("ab");
-  Verify("abc");
-
-  Verify("aaaaaaa" + string(16, 'b') + string("aaaaa") + "abc");
-  Verify("aaaaaaa" + string(256, 'b') + string("aaaaa") + "abc");
-  Verify("aaaaaaa" + string(2047, 'b') + string("aaaaa") + "abc");
-  Verify("aaaaaaa" + string(65536, 'b') + string("aaaaa") + "abc");
-  Verify("abcaaaaaaa" + string(65536, 'b') + string("aaaaa") + "abc");
-}
-
-// Verify max blowup (lots of four-byte copies)
-TEST(Snappy, MaxBlowup) {
-  string input;
-  for (int i = 0; i < 20000; i++) {
-    ACMRandom rnd(i);
-    uint32 bytes = static_cast<uint32>(rnd.Next());
-    input.append(reinterpret_cast<char*>(&bytes), sizeof(bytes));
-  }
-  for (int i = 19999; i >= 0; i--) {
-    ACMRandom rnd(i);
-    uint32 bytes = static_cast<uint32>(rnd.Next());
-    input.append(reinterpret_cast<char*>(&bytes), sizeof(bytes));
-  }
-  Verify(input);
-}
-
-TEST(Snappy, RandomData) {
-  ACMRandom rnd(FLAGS_test_random_seed);
-
-  const int num_ops = 20000;
-  for (int i = 0; i < num_ops; i++) {
-    if ((i % 1000) == 0) {
-      VLOG(0) << "Random op " << i << " of " << num_ops;
-    }
-
-    string x;
-    int len = rnd.Uniform(4096);
-    if (i < 100) {
-      len = 65536 + rnd.Uniform(65536);
-    }
-    while (x.size() < len) {
-      int run_len = 1;
-      if (rnd.OneIn(10)) {
-        run_len = rnd.Skewed(8);
-      }
-      char c = (i < 100) ? rnd.Uniform(256) : rnd.Skewed(3);
-      while (run_len-- > 0 && x.size() < len) {
-        x += c;
-      }
-    }
-
-    Verify(x);
-  }
-}
-
-TEST(Snappy, FourByteOffset) {
-  // The new compressor cannot generate four-byte offsets since
-  // it chops up the input into 32KB pieces.  So we hand-emit the
-  // copy manually.
-
-  // The two fragments that make up the input string.
-  string fragment1 = "012345689abcdefghijklmnopqrstuvwxyz";
-  string fragment2 = "some other string";
-
-  // How many times each fragment is emitted.
-  const int n1 = 2;
-  const int n2 = 100000 / fragment2.size();
-  const int length = n1 * fragment1.size() + n2 * fragment2.size();
-
-  string compressed;
-  Varint::Append32(&compressed, length);
-
-  AppendLiteral(&compressed, fragment1);
-  string src = fragment1;
-  for (int i = 0; i < n2; i++) {
-    AppendLiteral(&compressed, fragment2);
-    src += fragment2;
-  }
-  AppendCopy(&compressed, src.size(), fragment1.size());
-  src += fragment1;
-  CHECK_EQ(length, src.size());
-
-  string uncompressed;
-  CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
-  CHECK(snappy::Uncompress(compressed.data(), compressed.size(), &uncompressed));
-  CHECK_EQ(uncompressed, src);
-}
-
-
-static bool CheckUncompressedLength(const string& compressed,
-                                    size_t* ulength) {
-  const bool result1 = snappy::GetUncompressedLength(compressed.data(),
-                                                     compressed.size(),
-                                                     ulength);
-
-  snappy::ByteArraySource source(compressed.data(), compressed.size());
-  uint32 length;
-  const bool result2 = snappy::GetUncompressedLength(&source, &length);
-  CHECK_EQ(result1, result2);
-  return result1;
-}
-
-TEST(SnappyCorruption, TruncatedVarint) {
-  string compressed, uncompressed;
-  size_t ulength;
-  compressed.push_back('\xf0');
-  CHECK(!CheckUncompressedLength(compressed, &ulength));
-  CHECK(!snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
-  CHECK(!snappy::Uncompress(compressed.data(), compressed.size(),
-                            &uncompressed));
-}
-
-TEST(SnappyCorruption, UnterminatedVarint) {
-  string compressed, uncompressed;
-  size_t ulength;
-  compressed.push_back(128);
-  compressed.push_back(128);
-  compressed.push_back(128);
-  compressed.push_back(128);
-  compressed.push_back(128);
-  compressed.push_back(10);
-  CHECK(!CheckUncompressedLength(compressed, &ulength));
-  CHECK(!snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
-  CHECK(!snappy::Uncompress(compressed.data(), compressed.size(),
-                            &uncompressed));
-}
-
-TEST(Snappy, ReadPastEndOfBuffer) {
-  // Check that we do not read past end of input
-
-  // Make a compressed string that ends with a single-byte literal
-  string compressed;
-  Varint::Append32(&compressed, 1);
-  AppendLiteral(&compressed, "x");
-
-  string uncompressed;
-  DataEndingAtUnreadablePage c(compressed);
-  CHECK(snappy::Uncompress(c.data(), c.size(), &uncompressed));
-  CHECK_EQ(uncompressed, string("x"));
-}
-
-// Check for an infinite loop caused by a copy with offset==0
-TEST(Snappy, ZeroOffsetCopy) {
-  const char* compressed = "\x40\x12\x00\x00";
-  //  \x40              Length (must be > kMaxIncrementCopyOverflow)
-  //  \x12\x00\x00      Copy with offset==0, length==5
-  char uncompressed[100];
-  EXPECT_FALSE(snappy::RawUncompress(compressed, 4, uncompressed));
-}
-
-TEST(Snappy, ZeroOffsetCopyValidation) {
-  const char* compressed = "\x05\x12\x00\x00";
-  //  \x05              Length
-  //  \x12\x00\x00      Copy with offset==0, length==5
-  EXPECT_FALSE(snappy::IsValidCompressedBuffer(compressed, 4));
-}
-
-
-namespace {
-
-int TestFindMatchLength(const char* s1, const char *s2, unsigned length) {
-  return snappy::internal::FindMatchLength(s1, s2, s2 + length);
-}
-
-}  // namespace
-
-TEST(Snappy, FindMatchLength) {
-  // Exercise all different code paths through the function.
-  // 64-bit version:
-
-  // Hit s1_limit in 64-bit loop, hit s1_limit in single-character loop.
-  EXPECT_EQ(6, TestFindMatchLength("012345", "012345", 6));
-  EXPECT_EQ(11, TestFindMatchLength("01234567abc", "01234567abc", 11));
-
-  // Hit s1_limit in 64-bit loop, find a non-match in single-character loop.
-  EXPECT_EQ(9, TestFindMatchLength("01234567abc", "01234567axc", 9));
-
-  // Same, but edge cases.
-  EXPECT_EQ(11, TestFindMatchLength("01234567abc!", "01234567abc!", 11));
-  EXPECT_EQ(11, TestFindMatchLength("01234567abc!", "01234567abc?", 11));
-
-  // Find non-match at once in first loop.
-  EXPECT_EQ(0, TestFindMatchLength("01234567xxxxxxxx", "?1234567xxxxxxxx", 16));
-  EXPECT_EQ(1, TestFindMatchLength("01234567xxxxxxxx", "0?234567xxxxxxxx", 16));
-  EXPECT_EQ(4, TestFindMatchLength("01234567xxxxxxxx", "01237654xxxxxxxx", 16));
-  EXPECT_EQ(7, TestFindMatchLength("01234567xxxxxxxx", "0123456?xxxxxxxx", 16));
-
-  // Find non-match in first loop after one block.
-  EXPECT_EQ(8, TestFindMatchLength("abcdefgh01234567xxxxxxxx",
-                                   "abcdefgh?1234567xxxxxxxx", 24));
-  EXPECT_EQ(9, TestFindMatchLength("abcdefgh01234567xxxxxxxx",
-                                   "abcdefgh0?234567xxxxxxxx", 24));
-  EXPECT_EQ(12, TestFindMatchLength("abcdefgh01234567xxxxxxxx",
-                                    "abcdefgh01237654xxxxxxxx", 24));
-  EXPECT_EQ(15, TestFindMatchLength("abcdefgh01234567xxxxxxxx",
-                                    "abcdefgh0123456?xxxxxxxx", 24));
-
-  // 32-bit version:
-
-  // Short matches.
-  EXPECT_EQ(0, TestFindMatchLength("01234567", "?1234567", 8));
-  EXPECT_EQ(1, TestFindMatchLength("01234567", "0?234567", 8));
-  EXPECT_EQ(2, TestFindMatchLength("01234567", "01?34567", 8));
-  EXPECT_EQ(3, TestFindMatchLength("01234567", "012?4567", 8));
-  EXPECT_EQ(4, TestFindMatchLength("01234567", "0123?567", 8));
-  EXPECT_EQ(5, TestFindMatchLength("01234567", "01234?67", 8));
-  EXPECT_EQ(6, TestFindMatchLength("01234567", "012345?7", 8));
-  EXPECT_EQ(7, TestFindMatchLength("01234567", "0123456?", 8));
-  EXPECT_EQ(7, TestFindMatchLength("01234567", "0123456?", 7));
-  EXPECT_EQ(7, TestFindMatchLength("01234567!", "0123456??", 7));
-
-  // Hit s1_limit in 32-bit loop, hit s1_limit in single-character loop.
-  EXPECT_EQ(10, TestFindMatchLength("xxxxxxabcd", "xxxxxxabcd", 10));
-  EXPECT_EQ(10, TestFindMatchLength("xxxxxxabcd?", "xxxxxxabcd?", 10));
-  EXPECT_EQ(13, TestFindMatchLength("xxxxxxabcdef", "xxxxxxabcdef", 13));
-
-  // Same, but edge cases.
-  EXPECT_EQ(12, TestFindMatchLength("xxxxxx0123abc!", "xxxxxx0123abc!", 12));
-  EXPECT_EQ(12, TestFindMatchLength("xxxxxx0123abc!", "xxxxxx0123abc?", 12));
-
-  // Hit s1_limit in 32-bit loop, find a non-match in single-character loop.
-  EXPECT_EQ(11, TestFindMatchLength("xxxxxx0123abc", "xxxxxx0123axc", 13));
-
-  // Find non-match at once in first loop.
-  EXPECT_EQ(6, TestFindMatchLength("xxxxxx0123xxxxxxxx",
-                                   "xxxxxx?123xxxxxxxx", 18));
-  EXPECT_EQ(7, TestFindMatchLength("xxxxxx0123xxxxxxxx",
-                                   "xxxxxx0?23xxxxxxxx", 18));
-  EXPECT_EQ(8, TestFindMatchLength("xxxxxx0123xxxxxxxx",
-                                   "xxxxxx0132xxxxxxxx", 18));
-  EXPECT_EQ(9, TestFindMatchLength("xxxxxx0123xxxxxxxx",
-                                   "xxxxxx012?xxxxxxxx", 18));
-
-  // Same, but edge cases.
-  EXPECT_EQ(6, TestFindMatchLength("xxxxxx0123", "xxxxxx?123", 10));
-  EXPECT_EQ(7, TestFindMatchLength("xxxxxx0123", "xxxxxx0?23", 10));
-  EXPECT_EQ(8, TestFindMatchLength("xxxxxx0123", "xxxxxx0132", 10));
-  EXPECT_EQ(9, TestFindMatchLength("xxxxxx0123", "xxxxxx012?", 10));
-
-  // Find non-match in first loop after one block.
-  EXPECT_EQ(10, TestFindMatchLength("xxxxxxabcd0123xx",
-                                    "xxxxxxabcd?123xx", 16));
-  EXPECT_EQ(11, TestFindMatchLength("xxxxxxabcd0123xx",
-                                    "xxxxxxabcd0?23xx", 16));
-  EXPECT_EQ(12, TestFindMatchLength("xxxxxxabcd0123xx",
-                                    "xxxxxxabcd0132xx", 16));
-  EXPECT_EQ(13, TestFindMatchLength("xxxxxxabcd0123xx",
-                                    "xxxxxxabcd012?xx", 16));
-
-  // Same, but edge cases.
-  EXPECT_EQ(10, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd?123", 14));
-  EXPECT_EQ(11, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd0?23", 14));
-  EXPECT_EQ(12, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd0132", 14));
-  EXPECT_EQ(13, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd012?", 14));
-}
-
-TEST(Snappy, FindMatchLengthRandom) {
-  const int kNumTrials = 10000;
-  const int kTypicalLength = 10;
-  ACMRandom rnd(FLAGS_test_random_seed);
-
-  for (int i = 0; i < kNumTrials; i++) {
-    string s, t;
-    char a = rnd.Rand8();
-    char b = rnd.Rand8();
-    while (!rnd.OneIn(kTypicalLength)) {
-      s.push_back(rnd.OneIn(2) ? a : b);
-      t.push_back(rnd.OneIn(2) ? a : b);
-    }
-    DataEndingAtUnreadablePage u(s);
-    DataEndingAtUnreadablePage v(t);
-    int matched = snappy::internal::FindMatchLength(
-        u.data(), v.data(), v.data() + t.size());
-    if (matched == t.size()) {
-      EXPECT_EQ(s, t);
-    } else {
-      EXPECT_NE(s[matched], t[matched]);
-      for (int j = 0; j < matched; j++) {
-        EXPECT_EQ(s[j], t[j]);
-      }
-    }
-  }
-}
-
-
-static void CompressFile(const char* fname) {
-  string fullinput;
-  File::ReadFileToStringOrDie(fname, &fullinput);
-
-  string compressed;
-  Compress(fullinput.data(), fullinput.size(), SNAPPY, &compressed, false);
-
-  File::WriteStringToFileOrDie(compressed,
-                               string(fname).append(".comp").c_str());
-}
-
-static void UncompressFile(const char* fname) {
-  string fullinput;
-  File::ReadFileToStringOrDie(fname, &fullinput);
-
-  size_t uncompLength;
-  CHECK(CheckUncompressedLength(fullinput, &uncompLength));
-
-  string uncompressed;
-  uncompressed.resize(uncompLength);
-  CHECK(snappy::Uncompress(fullinput.data(), fullinput.size(), &uncompressed));
-
-  File::WriteStringToFileOrDie(uncompressed,
-                               string(fname).append(".uncomp").c_str());
-}
-
-static void MeasureFile(const char* fname) {
-  string fullinput;
-  File::ReadFileToStringOrDie(fname, &fullinput);
-  printf("%-40s :\n", fname);
-
-  int start_len = (FLAGS_start_len < 0) ? fullinput.size() : FLAGS_start_len;
-  int end_len = fullinput.size();
-  if (FLAGS_end_len >= 0) {
-    end_len = min<int>(fullinput.size(), FLAGS_end_len);
-  }
-  for (int len = start_len; len <= end_len; len++) {
-    const char* const input = fullinput.data();
-    int repeats = (FLAGS_bytes + len) / (len + 1);
-    if (FLAGS_zlib)     Measure(input, len, ZLIB, repeats, 1024<<10);
-    if (FLAGS_lzo)      Measure(input, len, LZO, repeats, 1024<<10);
-    if (FLAGS_liblzf)   Measure(input, len, LIBLZF, repeats, 1024<<10);
-    if (FLAGS_quicklz)  Measure(input, len, QUICKLZ, repeats, 1024<<10);
-    if (FLAGS_fastlz)   Measure(input, len, FASTLZ, repeats, 1024<<10);
-    if (FLAGS_snappy)    Measure(input, len, SNAPPY, repeats, 4096<<10);
-
-    // For block-size based measurements
-    if (0 && FLAGS_snappy) {
-      Measure(input, len, SNAPPY, repeats, 8<<10);
-      Measure(input, len, SNAPPY, repeats, 16<<10);
-      Measure(input, len, SNAPPY, repeats, 32<<10);
-      Measure(input, len, SNAPPY, repeats, 64<<10);
-      Measure(input, len, SNAPPY, repeats, 256<<10);
-      Measure(input, len, SNAPPY, repeats, 1024<<10);
-    }
-  }
-}
-
-static struct {
-  const char* label;
-  const char* filename;
-} files[] = {
-  { "html", "html" },
-  { "urls", "urls.10K" },
-  { "jpg", "house.jpg" },
-  { "pdf", "mapreduce-osdi-1.pdf" },
-  { "html4", "html_x_4" },
-  { "cp", "cp.html" },
-  { "c", "fields.c" },
-  { "lsp", "grammar.lsp" },
-  { "xls", "kennedy.xls" },
-  { "txt1", "alice29.txt" },
-  { "txt2", "asyoulik.txt" },
-  { "txt3", "lcet10.txt" },
-  { "txt4", "plrabn12.txt" },
-  { "bin", "ptt5" },
-  { "sum", "sum" },
-  { "man", "xargs.1" },
-  { "pb", "geo.protodata" },
-  { "gaviota", "kppkn.gtb" },
-};
-
-static void BM_UFlat(int iters, int arg) {
-  StopBenchmarkTiming();
-
-  // Pick file to process based on "arg"
-  CHECK_GE(arg, 0);
-  CHECK_LT(arg, ARRAYSIZE(files));
-  string contents = ReadTestDataFile(files[arg].filename);
-
-  string zcontents;
-  snappy::Compress(contents.data(), contents.size(), &zcontents);
-  char* dst = new char[contents.size()];
-
-  SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
-                             static_cast<int64>(contents.size()));
-  SetBenchmarkLabel(files[arg].label);
-  StartBenchmarkTiming();
-  while (iters-- > 0) {
-    CHECK(snappy::RawUncompress(zcontents.data(), zcontents.size(), dst));
-  }
-  StopBenchmarkTiming();
-
-  delete[] dst;
-}
-BENCHMARK(BM_UFlat)->DenseRange(0, 17);
-
-static void BM_UValidate(int iters, int arg) {
-  StopBenchmarkTiming();
-
-  // Pick file to process based on "arg"
-  CHECK_GE(arg, 0);
-  CHECK_LT(arg, ARRAYSIZE(files));
-  string contents = ReadTestDataFile(files[arg].filename);
-
-  string zcontents;
-  snappy::Compress(contents.data(), contents.size(), &zcontents);
-
-  SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
-                             static_cast<int64>(contents.size()));
-  SetBenchmarkLabel(files[arg].label);
-  StartBenchmarkTiming();
-  while (iters-- > 0) {
-    CHECK(snappy::IsValidCompressedBuffer(zcontents.data(), zcontents.size()));
-  }
-  StopBenchmarkTiming();
-}
-BENCHMARK(BM_UValidate)->DenseRange(0, 4);
-
-
-static void BM_ZFlat(int iters, int arg) {
-  StopBenchmarkTiming();
-
-  // Pick file to process based on "arg"
-  CHECK_GE(arg, 0);
-  CHECK_LT(arg, ARRAYSIZE(files));
-  string contents = ReadTestDataFile(files[arg].filename);
-
-  char* dst = new char[snappy::MaxCompressedLength(contents.size())];
-
-  SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
-                             static_cast<int64>(contents.size()));
-  StartBenchmarkTiming();
-
-  size_t zsize = 0;
-  while (iters-- > 0) {
-    snappy::RawCompress(contents.data(), contents.size(), dst, &zsize);
-  }
-  StopBenchmarkTiming();
-  const double compression_ratio =
-      static_cast<double>(zsize) / std::max<size_t>(1, contents.size());
-  SetBenchmarkLabel(StringPrintf("%s (%.2f %%)",
-                                 files[arg].label, 100.0 * compression_ratio));
-  VLOG(0) << StringPrintf("compression for %s: %zd -> %zd bytes",
-                          files[arg].label, contents.size(), zsize);
-  delete[] dst;
-}
-BENCHMARK(BM_ZFlat)->DenseRange(0, 17);
-
-
-}  // namespace snappy
-
-
-int main(int argc, char** argv) {
-  InitGoogle(argv[0], &argc, &argv, true);
-  File::Init();
-  RunSpecifiedBenchmarks();
-
-
-  if (argc >= 2) {
-    for (int arg = 1; arg < argc; arg++) {
-      if (FLAGS_write_compressed) {
-        CompressFile(argv[arg]);
-      } else if (FLAGS_write_uncompressed) {
-        UncompressFile(argv[arg]);
-      } else {
-        MeasureFile(argv[arg]);
-      }
-    }
-    return 0;
-  }
-
-  return RUN_ALL_TESTS();
-}
--- a/toolkit/content/license.html
+++ b/toolkit/content/license.html
@@ -137,17 +137,16 @@
       <li><a href="about:license#hunspell-lt">Lithuanian Spellchecking Dictionary License</a></li>
       <li><a href="about:license#maattachedwindow">MAAttachedWindow</a></li>
       <li><a href="about:license#directx">Microsoft DirectX License</a></li>
       <li><a href="about:license#openvision">OpenVision License</a></li>
       <li><a href="about:license#qcms">qcms License</a></li>
       <li><a href="about:license#xdg">Red Hat xdg_user_dir_lookup License</a></li>
       <li><a href="about:license#hunspell-ru">Russian Spellchecking Dictionary License</a></li>
       <li><a href="about:license#skia">Skia License</a></li>
-      <li><a href="about:license#snappy">Snappy License</a></li>
       <li><a href="about:license#sparkle">Sparkle License</a></li>
       <li><a href="about:license#sunsoft">SunSoft License</a></li>
       <li><a href="about:license#ucal">University of California License</a></li>      
       <li><a href="about:license#ucambridge">University of Cambridge License</a></li>      
       <li><a href="about:license#uszeged">University of Szeged License</a></li>      
       <li><a href="about:license#hunspell-en-US">US English Spellchecking Dictionary Licenses</a></li>
       <li><a href="about:license#v8">V8 License</a></li>
       <li><a href="about:license#xiph">Xiph.org Foundation License</a></li>
@@ -3272,54 +3271,16 @@ LIMITED TO, PROCUREMENT OF SUBSTITUTE GO
 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 </pre>
     
     <hr>
 
-    <h1><a name="snappy"></a>Snappy License</h1>
-
-    <p>This license applies to certain files in the directory
-    <span class="path">other-licenses/snappy/</span>.</p>
-
-<pre>
-Copyright 2011, Google Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-    * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-</pre>
-
-    <hr>
-
     <h1><a name="sparkle"></a>Sparkle License</h1>
 
     <p>This license applies to certain files in the directory
     <span class="path">camino/sparkle/</span>. (This code only ships in the
     Camino browser or products based on it.)</p>
 
 <pre>
 Copyright (c) 2006 Andy Matuschak
--- a/toolkit/library/Makefile.in
+++ b/toolkit/library/Makefile.in
@@ -157,17 +157,16 @@ OS_LIBS += -lrt
 endif
 endif
 
 STATIC_LIBS += \
   xpcom_core \
   ucvutil_s \
   chromium_s \
   mozreg_s \
-  snappy_s \
   $(NULL)
 
 # component libraries
 COMPONENT_LIBS += \
   necko \
   uconv \
   i18n \
   chardet \
--- a/toolkit/toolkit-tiers.mk
+++ b/toolkit/toolkit-tiers.mk
@@ -220,18 +220,16 @@ tier_platform_dirs += extensions/pref
 endif
 
 tier_platform_dirs += services/crypto/component
 
 tier_platform_dirs += startupcache
 
 tier_platform_dirs += js/ductwork/debugger
 
-tier_platform_dirs += other-licenses/snappy
-
 ifdef APP_LIBXUL_STATICDIRS
 # Applications can cheat and ask for code to be
 # built before libxul so libxul can be linked against it.
 tier_platform_staticdirs += $(APP_LIBXUL_STATICDIRS)
 endif
 ifdef APP_LIBXUL_DIRS
 # Applications can cheat and ask for code to be
 # built before libxul so it can be linked into libxul.
@@ -266,8 +264,9 @@ tier_platform_dirs	+= tools/codesighs
 endif
 
 ifdef ENABLE_TESTS
 tier_platform_dirs += testing/mochitest
 tier_platform_dirs += testing/xpcshell
 tier_platform_dirs += testing/tools/screenshot
 tier_platform_dirs += testing/peptest
 endif
+