Bug 768074 - Update Snappy library to revision >= r59 to get ARM optimizations; r=bkelly
authorJan Varga <jan.varga@gmail.com>
Mon, 24 Oct 2016 21:19:24 +0200
changeset 319239 0cbca09be54660af257c167ccf808d6abd178fc6
parent 319238 b534adeedaca2c2dca84c950ad4a69b5873c8435
child 319240 ebd4625e101a38c2754a0eb773b4fa2c1fd93c28
push id30865
push usercbook@mozilla.com
push dateTue, 25 Oct 2016 08:31:38 +0000
treeherdermozilla-central@78b863e9fcd9 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbkelly
bugs768074
milestone52.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 768074 - Update Snappy library to revision >= r59 to get ARM optimizations; r=bkelly
dom/indexedDB/test/unit/snappyUpgrade_profile.zip
dom/indexedDB/test/unit/test_snappyUpgrade.js
dom/indexedDB/test/unit/xpcshell-parent-process.ini
other-licenses/snappy/README
other-licenses/snappy/snappy-stubs-public.h
other-licenses/snappy/src/COPYING
other-licenses/snappy/src/ChangeLog
other-licenses/snappy/src/NEWS
other-licenses/snappy/src/README
other-licenses/snappy/src/framing_format.txt
other-licenses/snappy/src/snappy-c.h
other-licenses/snappy/src/snappy-internal.h
other-licenses/snappy/src/snappy-sinksource.cc
other-licenses/snappy/src/snappy-sinksource.h
other-licenses/snappy/src/snappy-stubs-internal.h
other-licenses/snappy/src/snappy-stubs-public.h.in
other-licenses/snappy/src/snappy-test.cc
other-licenses/snappy/src/snappy-test.h
other-licenses/snappy/src/snappy.cc
other-licenses/snappy/src/snappy.h
other-licenses/snappy/src/snappy_unittest.cc
new file mode 100644
index 0000000000000000000000000000000000000000..f9635fc9f516ac8346920b6c5a4f430bbe7c3e14
GIT binary patch
literal 4360
zc%03bc{tSj9>>R#I3?qxBYQ`R%ATE=B7|&_VJs74XN+ZRV>>EzvQ+jZ9Lr$r$-a!F
zp)gdIVQeRiHDqE8!?>yY+<We+ll$Di?&tS>pXdAiJ)iIE{r%_tecoRqJ;p;P0000B
zASc&cC;Ud&S}-F3kiiT9umSi0FmDH2qyyC6PQn=pcfV=+BY^RkF`Idz5u3Shz)=9h
zp$!HA;PZa;&C&b3+Uy|&u<81l@Y>GXR=<dfmyY1X&KOiC%?KXqPcKjE>OpQ&G)ws?
zlRhZN(r%)G`f2v~f~5f^-{Qt<>f)5$RU~}VRHYSIH6k2;Ei!gM`AN>qIe1!V?~;~l
zqgNSc31FmPrnRD2xzeC&npjiz)VQG%6MWLG@%17jGZ;S@?TYmBl7qOLj3xqyW{+5K
z>fC4ti^lzyJ2<g=o~HjgazU*=`G?i{3`F!jGF)vBWxR3En4weoo?#*-D`on{0}QId
zzcCEeFwKq|Az6heNSJPp4E+c^7yce@cS^+M5}gB~zQ%cEV$5c;nYN4{UrbI$`hWEP
zxjME{12a8Bnm=Uc+sz`7lE1orBy{1Z5qe;aq*H3yI>$30@nMN$*y6gR#J%yv=~L{)
zcxtL8F;{%TqMZT6s(=O^&#P8XcFNDe;a-HtxB;RQ+aZ=ZJe7YqnsyU^Qa~-=X0yuG
z)77j?f4$_+kpnG1(<d$3D1X-u=^{D41mXFO{k5G0-;RE2Z1ONe>)T@-7iJ-z?r#M0
z*)7?eZdJ;KUL{n<W@?RJKn-eYUvtJSW<K<o5nPqK=tz8(m#d<|lyI%n+`I9zF)x?#
zNhc#9QiNP&Rv0D52x7^}6sXd>#_^1O*hz@UgMV=`Z%FRkU9NWHyFYjY=@2rjCsg8N
zeRcYZ9?5Yjcc)spDXH1dyeZ;nsP(M%Rk=6TAWX}@<2+D25BAWtaZxSgV@OP_X5x*@
z=a=gBZ|Cw&@N&LO^@`L=9$*9spZ=ZSCO-F+kNC9+%UMRmyR+5z&mVrq5_3#;E56X}
zv{q3}Bo8nF63oiFz+!cq-p*ez{|JxQBV8KRNqgr4x~q}u?N=X}%-sZ#ig7xg5lFI`
z&P}3y^g}Go6{Rkv!*-&mEuOG*8n_m7`8z$uxiW8`4sgwnof$0Sr7r|L67G{2L|&t4
z-oNs$jq7RTuurle)j7?g1Zl;TFq!Q|msT0!>t_qvzOjOs3FRq^P2;P37Bib&J-e40
zvwFPFVbY)gdQS*-p}S^rb3Sai8aH&KO;q-xbK()Sh+dtE_i%^cs14rsWcHOLCJV`p
z1y+B^O9mdLvV4b>{u$UG_R5b{4XqR;$l4rPj_Xr0XR<dpp)RUEp2izt0eLuLnyS*M
zB0PCoV>xBIMDu3c_V5abENS$~!$$!7m6T(y1Nc)N`Q%|24||6I2m7n)lAry{ZOCT6
zKmOw1e=}14!uaVw7%*pV1l-{ZivJUa|9hya{QpEre+wloC9SA<QC3Pu9_i!Y>Lam#
z-}u;ic-uO8dV1ar{37+il+Ap9d_(HX?e+ANl-f@&^no7%pVtcigOr4im%9tn;j<w5
zlNkAH!DWKAyCoq6@WJPJf39i2@mroQVna)*P&?!%+MsFxPZX=x9EF@vN+JadX6RW|
z<FDqPh$p|7Ejv?pKJu7(co{#L{Z_lc{oD`j0+Cohu1GajO{o)_PonI&nwQHMZ|BZO
z+FtfA0%la5>BRrQZW#)CNJyFc7)GBPbEB=#b-I<g`h{Yp!7dKKC=xp?;B>b-OT+py
zP+>d)`()amMn^nfCOsFa2rI2z-}unhVQITy*t#>P*B89t1I1Et2rMNVOy7-$*59D*
z=7HI4=qY+(feTc-vJE>Q%$nIW@2ZqA;P2}hZQ?DV9kyCN*Y%P)NR9jf8{ivOV%+4J
zLsW!jlx104rDCd!omB2WfAY%PrTDSFeq5%aaaK>QOL9`2Av#^g@IftaR=PPnfgdE7
zmZaBd;FwUDh0jv8$a*Q<)fXpQJlBVp9I_mlt1@)Z?iT3~u6jcbP})#Y3tYt1WMS6q
zl}hmW@`WAR+O9IKNl7AFV4M1iIUzLT+TzEh{$9gYQ&oa;#%avsOiZsC;_!g6j~i9R
z9|FapWL<vd*RQj<u2MB<HQ<;&GQ8kEHDD_5<ro|sVqDlS)mc!raTQ*=ZRAw(Zjho;
z65T}fUtOHSlGEUvbxNcD_4T73Z`R&x&98>$U>t(9_Xs-kTrlU5LYn`?0;I(PULA_F
zcVDT_ih1%|Kj{_5-{4u3rF|35FFNR5R#tcjZK~^X$wgI5IH9I>m$xk?E-r<Tbz5VB
zfh!~RLLGs;J~M2Bel_=0q`MF3q#%$JON{+B(u}gY%pE{)t0#}vY+v6X+qF=-?9v-U
zT9c#nmC}@&LP~;$VB8s8;-HlAkzba@JY3TFiDSrl(NnvZ`<~qab`YR8);q~;mPaV=
zy4~^|rQ`ngu0ki<N6=mG?c`LHLl>R3ZJjPlf-A<#QKMH><fmq5mVDscA<l{_GSZYS
z8IriZiN3NE=?Mo%hBi-^aCc{aH)6_sYrVr6KNJiLc@sqxHy96q35aH@nem7t1QwGM
z5i+-$TxpLYbtA?ZodjZReQRe$pVlCAMWf5MYN)jW5mB`#mc^I11|t=l(M6TVX{f_l
zVjW40B@wHh>mFudxBA;!?r;QU<e_{8CuNMpItZXAC{sjjb0DE<!IKVEPFfuw;SB`>
zC)-<^m<>+&D=TGc&(4NS4obdX;Z_V*4E0@3s*+nNJFA*OfxGSYA|~+gy3yZI4&~|k
zN!b~T=k%07O-3%(%{9#*|3JtLj7_<hn3dJ>fNL>h0TpA%-#`F$D<8yT{l@0`DJ1j0
zm2fBEYPF>0M}NwPp{kuhjV9D#HokjM{`s*&3a8)LZ0%^;hW4U0LPuqn-*DV{D`26A
zszr)zC5#5iJTB|??e4FG6(d-81m<W8K26LWY|R>2g23IlQ%zujh7zY+o>DfuFOo%a
zKGIkC4flF=Jo>UIYgT#6;PKf>eg*Gsj7k8&Lm0VH#1^u>GR|noHt}YGh%EO*@3p^2
zDN$J8Po4rtnx%{xv?I}h(|t|y;=XxE_>?8A*%U78_Hkph6khbuF-C8X^DXmt+`V7)
zFj`H(W+AME=$;855<Pkp+$(0~Z0cAnBq;|IyxED)j#_8<KH}HSBy8$YFDOfy>(jxM
z&`$Io%L3y8va1SPO`ABgnvwev>q4OmFu{-mHh9c!Oh&5;PMoCA=Ww-dQckVg1n-oy
z`$a(M-j{l!w2C4d9!5aSS@4yqqdsK2Mnpk;gsCM^27@)GdX6RF3&=h>#~}@O3}yRw
zt+pL8nsTgBVw>;k>%!DF+(>FcVaX_b)QGw&*3(`ZFO`WJG+8n$no%2QeW*L@qy6Bq
z-r0VPb+T5fzFZJF7TLlHi8!fYMjhG}!W2B#b)4ee!Ws3cz6PaqU`RYIAYu*I6bSby
z_<YX{HVMjdGy+4tLl=8&h=<1R@rCiVX7YedmS62OoS@fG^QJU%(whWCfcoewjwaTe
z5;NG$E`x$UPCW?Y+pF1h+k&LecH*FtY$u^a6T@h6iMgd5r2r|pC0fs`;IloX-Y5<l
zGzL*X-f9EWmRu_J?trHWD-{vo=XUk+LDkf&_vO1oWUYE*BcF_gMLE8+^J=C-oM$1p
zuyY%QQW@uapouF-=e)!CB@#C|_fBK^yBa#5KNktR5qzmuw@FL=I=%vEZNM0E>Fm<A
zW;4FNLVfNXTgb9qxeYNR`^TiVge_Oym(%A<S6O#;f<>*mYNq_vOD;h!nLobn7N~;d
z)@?|epg-#N|GBU|qD@=(D2^~CW2?N0d>OhKyhg*#ZI7x9_k<|$KXeUL?pY2Jvf{gR
zqdBW!S8L7x?L)#_6sLFp8fu~MJ}4S-*QpCszgcnN!HMj9lZdE9w?wuM1wG)+stN6B
z#3ht;eq28~`^DY`o9@0y+;bS(4X1IQn=n0z>*Ev=I3}GP;d2L6whQCgsHuM#c0r7d
zUP>LGV19135nKIxG>O?xRotcdBF~<@thcs$!*)N_btiBwYLgyfRlZ~Ebvo3!z6PbM
zZ3Ya0Zg-%ecMtYO+JyBQmTP)*A})f`N0Nv-$Q9N@1gx0963YQ?^Hu7!sxi_p2ekd(
zy-iFZNiZL9%>Q>)EXARr`^o`rms5WJ?^G!uBRz&61Q`GB5zO_uZ6EV%`2Os#PhzGo
zPvWnF=|1!CLOItzXY7DyS6+Nl`g25Q`V!H<!}~g*|Cz^f2=J}A&h#a&e+T<@SpPHD
z<XhN-qdU`==>ARFzYFeM|2o32;n@E{_Fplc=}V0NN}TENKAh>(xBBTPBXEE94~Z}7
AMF0Q*
new file mode 100644
--- /dev/null
+++ b/dom/indexedDB/test/unit/test_snappyUpgrade.js
@@ -0,0 +1,44 @@
+/**
+ * Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+var testGenerator = testSteps();
+
+function testSteps()
+{
+  const name = "test_snappyUpgrade.js";
+  const objectStoreName = "test";
+  const testString = "Lorem ipsum his ponderum delicatissimi ne, at noster dolores urbanitas pro, cibo elaboraret no his. Ea dicunt maiorum usu. Ad appareat facilisis mediocritatem eos. Tale graeci mentitum in eos, hinc insolens at nam. Graecis nominavi aliquyam eu vix. Id solet assentior sadipscing pro. Et per atqui graecis, usu quot viris repudiandae ei, mollis evertitur an nam. At nam dolor ignota, liber labore omnesque ea mei, has movet voluptaria in. Vel an impetus omittantur. Vim movet option salutandi ex, ne mei ignota corrumpit. Mucius comprehensam id per. Est ea putant maiestatis.";
+
+  info("Installing profile");
+
+  clearAllDatabases(continueToNextStepSync);
+  yield undefined;
+
+  installPackagedProfile("snappyUpgrade_profile");
+
+  info("Opening database");
+
+  let request = indexedDB.open(name);
+  request.onerror = errorHandler;
+  request.onupgradeneeded = unexpectedSuccessHandler;
+  request.onsuccess = continueToNextStepSync;
+  yield undefined;
+
+  // success
+  let db = request.result;
+  db.onerror = errorHandler;
+
+  info("Getting string");
+
+  request = db.transaction([objectStoreName])
+              .objectStore(objectStoreName).get(1);
+  request.onsuccess = continueToNextStepSync;
+  yield undefined;
+
+  is(request.result, testString);
+
+  finishTest();
+  yield undefined;
+}
--- a/dom/indexedDB/test/unit/xpcshell-parent-process.ini
+++ b/dom/indexedDB/test/unit/xpcshell-parent-process.ini
@@ -19,16 +19,17 @@ support-files =
   GlobalObjectsComponent.manifest
   GlobalObjectsModule.jsm
   GlobalObjectsSandbox.js
   metadata2Restore_profile.zip
   metadataRestore_profile.zip
   schema18upgrade_profile.zip
   schema21upgrade_profile.zip
   schema23upgrade_profile.zip
+  snappyUpgrade_profile.zip
   storagePersistentUpgrade_profile.zip
   xpcshell-shared.ini
 
 [include:xpcshell-shared.ini]
 
 [test_blob_file_backed.js]
 [test_bug1056939.js]
 [test_cleanup_transaction.js]
@@ -47,12 +48,13 @@ skip-if = true
 [test_metadataRestore.js]
 [test_mutableFileUpgrade.js]
 [test_oldDirectories.js]
 [test_quotaExceeded_recovery.js]
 [test_readwriteflush_disabled.js]
 [test_schema18upgrade.js]
 [test_schema21upgrade.js]
 [test_schema23upgrade.js]
+[test_snappyUpgrade.js]
 [test_storagePersistentUpgrade.js]
 [test_temporary_storage.js]
 # bug 951017: intermittent failure on Android x86 emulator
 skip-if = os == "android" && processor == "x86"
--- a/other-licenses/snappy/README
+++ b/other-licenses/snappy/README
@@ -1,25 +1,26 @@
 See src/README for the README that ships with snappy.
 
 Mozilla does not modify the actual snappy source with the exception of the
 'snappy-stubs-public.h' header. We have replaced its build system with our own.
 
 Snappy comes from:
   http://code.google.com/p/snappy/
 
-We are currently using revision: 56
+We are currently using revision: 114
 
 To upgrade to a newer version:
   1. Check out the new code using subversion.
   2. Update 'snappy-stubs-public.h' in this directory with any changes that were
      made to 'snappy-stubs-public.h.in' in the new source.
   3. Copy the major/minor/patch versions from 'configure.ac' into
      'snappy-stubs-public.h'.
   4. Copy all source files from the new version into the src subdirectory. The
      following files are not needed:
        - 'autom4te.cache' subdirectory
        - 'm4' subdirectory
        - 'testdata' subdirectory
        - 'autogen.sh'
        - 'configure.ac'
        - 'Makefile.am'
+       - 'snappy.pc.in'
   5. Update the revision stamp in this file.
--- a/other-licenses/snappy/snappy-stubs-public.h
+++ b/other-licenses/snappy/snappy-stubs-public.h
@@ -28,28 +28,28 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
 // Various type stubs for the open-source version of Snappy.
 //
 // This file cannot include config.h, as it is included from snappy.h,
 // which is a public header. Instead, snappy-stubs-public.h is generated by
 // from snappy-stubs-public.h.in at configure time.
 
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
 
 #include <stdint.h>
 
 #if defined IS_BIG_ENDIAN || defined __BIG_ENDIAN__
 #define WORDS_BIGENDIAN
 #endif
 
 #define SNAPPY_MAJOR 1
-#define SNAPPY_MINOR 0
-#define SNAPPY_PATCHLEVEL 4
+#define SNAPPY_MINOR 1
+#define SNAPPY_PATCHLEVEL 3
 #define SNAPPY_VERSION \
     ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
 
 #include <string>
 
 namespace snappy {
 
 typedef int8_t int8;
@@ -58,15 +58,32 @@ typedef int16_t int16;
 typedef uint16_t uint16;
 typedef int32_t int32;
 typedef uint32_t uint32;
 typedef int64_t int64;
 typedef uint64_t uint64;
 
 typedef std::string string;
 
+#ifndef DISALLOW_COPY_AND_ASSIGN
 #define DISALLOW_COPY_AND_ASSIGN(TypeName) \
   TypeName(const TypeName&);               \
   void operator=(const TypeName&)
+#endif
+
+struct iovec {
+	void* iov_base;
+	size_t iov_len;
+};
+
+#if defined(_WIN32) || defined(_WIN64)
+#if defined(_WIN64)
+typedef __int64 LONG_PTR;
+#else
+typedef long LONG_PTR;
+#endif
+typedef LONG_PTR SSIZE_T;
+typedef SSIZE_T ssize_t;
+#endif
 
 }  // namespace snappy
 
-#endif  // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+#endif  // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
--- a/other-licenses/snappy/src/COPYING
+++ b/other-licenses/snappy/src/COPYING
@@ -21,8 +21,34 @@ LIMITED TO, THE IMPLIED WARRANTIES OF ME
 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+===
+
+Some of the benchmark data in testdata/ is licensed differently:
+
+ - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and
+   is licensed under the Creative Commons Attribution 3.0 license
+   (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/
+   for more information.
+
+ - kppkn.gtb is taken from the Gaviota chess tablebase set, and
+   is licensed under the MIT License. See
+   https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1
+   for more information.
+
+ - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper
+   “Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA
+   Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro,
+   which is licensed under the CC-BY license. See
+   http://www.ploscompbiol.org/static/license for more ifnormation.
+
+ - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project
+   Gutenberg. The first three have expired copyrights and are in the public
+   domain; the latter does not have expired copyright, but is still in the
+   public domain according to the license information
+   (http://www.gutenberg.org/ebooks/53).
--- a/other-licenses/snappy/src/ChangeLog
+++ b/other-licenses/snappy/src/ChangeLog
@@ -1,801 +1,2468 @@
-------------------------------------------------------------------------
-r49 | snappy.mirrorbot@gmail.com | 2011-09-15 11:50:05 +0200 (Thu, 15 Sep 2011) | 5 lines
-
-Fix public issue #50: Include generic byteswap macros.
-Also include Solaris 10 and FreeBSD versions.
-
-R=csilvers
+commit eb66d8176b3d1f560ee012e1b488cb1540c45f88
+Author: Steinar H. Gunderson <sesse@google.com>
+Date:   Mon Jun 22 16:10:47 2015 +0200
 
-------------------------------------------------------------------------
-r48 | snappy.mirrorbot@gmail.com | 2011-08-10 20:57:27 +0200 (Wed, 10 Aug 2011) | 5 lines
-
-Partially fix public issue 50: Remove an extra comma from the end of some
-enum declarations, as it seems the Sun compiler does not like it.
+    Initialized members of SnappyArrayWriter and SnappyDecompressionValidator.
+    These members were almost surely initialized before use by other member
+    functions, but Coverity was warning about this. Eliminating these warnings
+    minimizes clutter in that report and the likelihood of overlooking a real bug.
+    
+    A=cmumford
+    R=jeff
 
-Based on patch by Travis Vitek.
-
-------------------------------------------------------------------------
-r47 | snappy.mirrorbot@gmail.com | 2011-08-10 20:44:16 +0200 (Wed, 10 Aug 2011) | 4 lines
-
-Use the right #ifdef test for sys/mman.h.
+commit b2312c4c25883ab03b5110f1b006dce95f419a4f
+Author: Steinar H. Gunderson <sesse@google.com>
+Date:   Mon Jun 22 16:03:28 2015 +0200
 
-Based on patch by Travis Vitek.
-
-------------------------------------------------------------------------
-r46 | snappy.mirrorbot@gmail.com | 2011-08-10 03:22:09 +0200 (Wed, 10 Aug 2011) | 6 lines
-
-Fix public issue #47: Small comment cleanups in the unit test.
-
-Originally based on a patch by Patrick Pelletier.
-
-R=sanjay
+    Add support for Uncompress(source, sink). Various changes to allow
+    Uncompress(source, sink) to get the same performance as the different
+    variants of Uncompress to Cord/DataBuffer/String/FlatBuffer.
+    
+    Changes to efficiently support Uncompress(source, sink)
+    --------
+    
+    a) For strings - we add support to StringByteSink to do GetAppendBuffer so we
+       can write to it without copying.
+    b) For flat array buffers, we do GetAppendBuffer and see if we can get a full buffer.
+    
+    With the above changes we get performance with ByteSource/ByteSink
+    that is	very close to directly using flat arrays and strings.
+    
+    We add various benchmark cases to demonstrate that.
+    
+    Orthogonal change
+    ------------------
+    
+    Add support for TryFastAppend() for SnappyScatteredWriter.
+    
+    Benchmark results are below
+    
+    CPU: Intel Core2 dL1:32KB dL2:4096KB
+    Benchmark              Time(ns)    CPU(ns) Iterations
+    -----------------------------------------------------
+    BM_UFlat/0               109065     108996       6410 896.0MB/s  html
+    BM_UFlat/1              1012175    1012343        691 661.4MB/s  urls
+    BM_UFlat/2                26775      26771      26149 4.4GB/s  jpg
+    BM_UFlat/3                48947      48940      14363 1.8GB/s  pdf
+    BM_UFlat/4               441029     440835       1589 886.1MB/s  html4
+    BM_UFlat/5                39861      39880      17823 588.3MB/s  cp
+    BM_UFlat/6                18315      18300      38126 581.1MB/s  c
+    BM_UFlat/7                 5254       5254     100000 675.4MB/s  lsp
+    BM_UFlat/8              1568060    1567376        447 626.6MB/s  xls
+    BM_UFlat/9               337512     337734       2073 429.5MB/s  txt1
+    BM_UFlat/10              287269     287054       2434 415.9MB/s  txt2
+    BM_UFlat/11              890098     890219        787 457.2MB/s  txt3
+    BM_UFlat/12             1186593    1186863        590 387.2MB/s  txt4
+    BM_UFlat/13              573927     573318       1000 853.7MB/s  bin
+    BM_UFlat/14               64250      64294      10000 567.2MB/s  sum
+    BM_UFlat/15                7301       7300      96153 552.2MB/s  man
+    BM_UFlat/16              109617     109636       6375 1031.5MB/s  pb
+    BM_UFlat/17              364438     364497       1921 482.3MB/s  gaviota
+    BM_UFlatSink/0           108518     108465       6450 900.4MB/s  html
+    BM_UFlatSink/1           991952     991997        705 675.0MB/s  urls
+    BM_UFlatSink/2            26815      26798      26065 4.4GB/s  jpg
+    BM_UFlatSink/3            49127      49122      14255 1.8GB/s  pdf
+    BM_UFlatSink/4           436674     436731       1604 894.4MB/s  html4
+    BM_UFlatSink/5            39738      39733      17345 590.5MB/s  cp
+    BM_UFlatSink/6            18413      18416      37962 577.4MB/s  c
+    BM_UFlatSink/7             5677       5676     100000 625.2MB/s  lsp
+    BM_UFlatSink/8          1552175    1551026        451 633.2MB/s  xls
+    BM_UFlatSink/9           338526     338489       2065 428.5MB/s  txt1
+    BM_UFlatSink/10          289387     289307       2420 412.6MB/s  txt2
+    BM_UFlatSink/11          893803     893706        783 455.4MB/s  txt3
+    BM_UFlatSink/12         1195919    1195459        586 384.4MB/s  txt4
+    BM_UFlatSink/13          559637     559779       1000 874.3MB/s  bin
+    BM_UFlatSink/14           65073      65094      10000 560.2MB/s  sum
+    BM_UFlatSink/15            7618       7614      92823 529.5MB/s  man
+    BM_UFlatSink/16          110085     110121       6352 1027.0MB/s  pb
+    BM_UFlatSink/17          369196     368915       1896 476.5MB/s  gaviota
+    BM_UValidate/0            46954      46957      14899 2.0GB/s  html
+    BM_UValidate/1           500621     500868       1000 1.3GB/s  urls
+    BM_UValidate/2              283        283    2481447 417.2GB/s  jpg
+    BM_UValidate/3            16230      16228      43137 5.4GB/s  pdf
+    BM_UValidate/4           189129     189193       3701 2.0GB/s  html4
+    
+    A=uday
+    R=sanjay
 
-------------------------------------------------------------------------
-r45 | snappy.mirrorbot@gmail.com | 2011-08-10 03:14:43 +0200 (Wed, 10 Aug 2011) | 8 lines
+commit b2ad96006741d40935db2f73194a3e489b467338
+Author: Steinar H. Gunderson <sesse@google.com>
+Date:   Mon Jun 22 15:48:29 2015 +0200
+
+    Changes to eliminate compiler warnings on MSVC
+    
+    This code was not compiling under Visual Studio 2013 with warnings being treated
+    as errors. Specifically:
+    
+    1. Changed int -> size_t to eliminate signed/unsigned mismatch warning.
+    2. Added some missing return values to functions.
+    3. Inserting character instead of integer literals into strings to avoid type
+       conversions.
+    
+    A=cmumford
+    R=jeff
 
-Fix public issue #46: Format description said "3-byte offset"
-instead of "4-byte offset" for the longest copies.
+commit e7a897e187e90b33f87bd9e64872cf561de9ebca
+Author: Steinar H. Gunderson <sesse@google.com>
+Date:   Mon Jun 22 15:45:11 2015 +0200
 
-Also fix an inconsistency in the heading for section 2.2.3.
-Both patches by Patrick Pelletier.
+    Fixed unit tests to compile under MSVC.
+    
+    1. Including config.h in test.
+    2. Including windows.h before zippy-test.h.
+    3. Removed definition of WIN32_LEAN_AND_MEAN. This caused problems in
+       build environments that define WIN32_LEAN_AND_MEAN as our
+       definition didn't check for prior existence. This constant is old
+       and no longer needed anyhow.
+    4. Disable MSVC warning 4722 since ~LogMessageCrash() never returns.
+    
+    A=cmumford
+    R=jeff
 
-R=csilvers
+commit 86eb8b152bdb065ad11bf331a9f7d65b72616acf
+Author: Steinar H. Gunderson <sesse@google.com>
+Date:   Mon Jun 22 15:41:30 2015 +0200
 
-------------------------------------------------------------------------
-r44 | snappy.mirrorbot@gmail.com | 2011-06-28 13:40:25 +0200 (Tue, 28 Jun 2011) | 8 lines
-
-Fix public issue #44: Make the definition and declaration of CompressFragment
-identical, even regarding cv-qualifiers.
-
-This is required to work around a bug in the Solaris Studio C++ compiler
-(it does not properly disregard cv-qualifiers when doing name mangling).
-
-R=sanjay
-
-------------------------------------------------------------------------
-r43 | snappy.mirrorbot@gmail.com | 2011-06-04 12:19:05 +0200 (Sat, 04 Jun 2011) | 7 lines
-
-Correct an inaccuracy in the Snappy format description. 
-(I stumbled into this when changing the way we decompress literals.) 
-
-R=csilvers
-
-Revision created by MOE tool push_codebase.
+    Change a few branch annotations that profiling found to be wrong.
+    Overall performance is neutral or slightly positive.
+    
+    Westmere (64-bit, opt):
+    
+    Benchmark               Base (ns)  New (ns)                                Improvement
+    --------------------------------------------------------------------------------------
+    BM_UFlat/0                  73798     71464  1.3GB/s  html                    +3.3%
+    BM_UFlat/1                 715223    704318  953.5MB/s  urls                  +1.5%
+    BM_UFlat/2                   8137      8871  13.0GB/s  jpg                    -8.3%
+    BM_UFlat/3                    200       204  935.5MB/s  jpg_200               -2.0%
+    BM_UFlat/4                  21627     21281  4.5GB/s  pdf                     +1.6%
+    BM_UFlat/5                 302806    290350  1.3GB/s  html4                   +4.3%
+    BM_UFlat/6                 218920    219017  664.1MB/s  txt1                  -0.0%
+    BM_UFlat/7                 190437    191212  626.1MB/s  txt2                  -0.4%
+    BM_UFlat/8                 584192    580484  703.4MB/s  txt3                  +0.6%
+    BM_UFlat/9                 776537    779055  591.6MB/s  txt4                  -0.3%
+    BM_UFlat/10                 76056     72606  1.5GB/s  pb                      +4.8%
+    BM_UFlat/11                235962    239043  737.4MB/s  gaviota               -1.3%
+    BM_UFlat/12                 28049     28000  840.1MB/s  cp                    +0.2%
+    BM_UFlat/13                 12225     12021  886.9MB/s  c                     +1.7%
+    BM_UFlat/14                  3362      3544  1004.0MB/s  lsp                  -5.1%
+    BM_UFlat/15                937015    939206  1048.9MB/s  xls                  -0.2%
+    BM_UFlat/16                   236       233  823.1MB/s  xls_200               +1.3%
+    BM_UFlat/17                373170    361947  1.3GB/s  bin                     +3.1%
+    BM_UFlat/18                   264       264  725.5MB/s  bin_200               +0.0%
+    BM_UFlat/19                 42834     43577  839.2MB/s  sum                   -1.7%
+    BM_UFlat/20                  4770      4736  853.6MB/s  man                   +0.7%
+    BM_UValidate/0              39671     39944  2.4GB/s  html                    -0.7%
+    BM_UValidate/1             443391    443391  1.5GB/s  urls                    +0.0%
+    BM_UValidate/2                163       163  703.3GB/s  jpg                   +0.0%
+    BM_UValidate/3                113       112  1.7GB/s  jpg_200                 +0.9%
+    BM_UValidate/4               7555      7608  12.6GB/s  pdf                    -0.7%
+    BM_ZFlat/0                 157616    157568  621.5MB/s  html (22.31 %)        +0.0%
+    BM_ZFlat/1                1997290   2014486  333.4MB/s  urls (47.77 %)        -0.9%
+    BM_ZFlat/2                  23035     22237  5.2GB/s  jpg (99.95 %)           +3.6%
+    BM_ZFlat/3                    539       540  354.5MB/s  jpg_200 (73.00 %)     -0.2%
+    BM_ZFlat/4                  80709     81369  1.2GB/s  pdf (81.85 %)           -0.8%
+    BM_ZFlat/5                 639059    639220  613.0MB/s  html4 (22.51 %)       -0.0%
+    BM_ZFlat/6                 577203    583370  249.3MB/s  txt1 (57.87 %)        -1.1%
+    BM_ZFlat/7                 510887    516094  232.0MB/s  txt2 (61.93 %)        -1.0%
+    BM_ZFlat/8                1535843   1556973  262.2MB/s  txt3 (54.92 %)        -1.4%
+    BM_ZFlat/9                2070068   2102380  219.3MB/s  txt4 (66.22 %)        -1.5%
+    BM_ZFlat/10                152396    152148  745.5MB/s  pb (19.64 %)          +0.2%
+    BM_ZFlat/11                447367    445859  395.4MB/s  gaviota (37.72 %)     +0.3%
+    BM_ZFlat/12                 76375     76797  306.3MB/s  cp (48.12 %)          -0.5%
+    BM_ZFlat/13                 31518     31987  333.3MB/s  c (42.40 %)           -1.5%
+    BM_ZFlat/14                 10598     10827  328.6MB/s  lsp (48.37 %)         -2.1%
+    BM_ZFlat/15               1782243   1802728  546.5MB/s  xls (41.23 %)         -1.1%
+    BM_ZFlat/16                   526       539  355.0MB/s  xls_200 (78.00 %)     -2.4%
+    BM_ZFlat/17                598141    597311  822.1MB/s  bin (18.11 %)         +0.1%
+    BM_ZFlat/18                   121       120  1.6GB/s  bin_200 (7.50 %)        +0.8%
+    BM_ZFlat/19                109981    112173  326.0MB/s  sum (48.96 %)         -2.0%
+    BM_ZFlat/20                 14355     14575  277.4MB/s  man (59.36 %)         -1.5%
+    Sum of all benchmarks    33882722  33879325                                   +0.0%
+    
+    Sandy Bridge (64-bit, opt):
+    
+    Benchmark               Base (ns)  New (ns)                                Improvement
+    --------------------------------------------------------------------------------------
+    BM_UFlat/0                  43764     41600  2.3GB/s  html                    +5.2%
+    BM_UFlat/1                 517990    507058  1.3GB/s  urls                    +2.2%
+    BM_UFlat/2                   6625      5529  20.8GB/s  jpg                   +19.8%
+    BM_UFlat/3                    154       155  1.2GB/s  jpg_200                 -0.6%
+    BM_UFlat/4                  12795     11747  8.1GB/s  pdf                     +8.9%
+    BM_UFlat/5                 200335    193413  2.0GB/s  html4                   +3.6%
+    BM_UFlat/6                 156574    156426  929.2MB/s  txt1                  +0.1%
+    BM_UFlat/7                 137574    137464  870.4MB/s  txt2                  +0.1%
+    BM_UFlat/8                 422551    421603  967.4MB/s  txt3                  +0.2%
+    BM_UFlat/9                 577749    578985  795.6MB/s  txt4                  -0.2%
+    BM_UFlat/10                 42329     39362  2.8GB/s  pb                      +7.5%
+    BM_UFlat/11                170615    169751  1037.9MB/s  gaviota              +0.5%
+    BM_UFlat/12                 12800     12719  1.8GB/s  cp                      +0.6%
+    BM_UFlat/13                  6585      6579  1.6GB/s  c                       +0.1%
+    BM_UFlat/14                  2066      2044  1.7GB/s  lsp                     +1.1%
+    BM_UFlat/15                750861    746911  1.3GB/s  xls                     +0.5%
+    BM_UFlat/16                   188       192  996.0MB/s  xls_200               -2.1%
+    BM_UFlat/17                271622    264333  1.8GB/s  bin                     +2.8%
+    BM_UFlat/18                   208       207  923.6MB/s  bin_200               +0.5%
+    BM_UFlat/19                 24667     24845  1.4GB/s  sum                     -0.7%
+    BM_UFlat/20                  2663      2662  1.5GB/s  man                     +0.0%
+    BM_ZFlat/0                 115173    115624  846.5MB/s  html (22.31 %)        -0.4%
+    BM_ZFlat/1                1530331   1537769  436.5MB/s  urls (47.77 %)        -0.5%
+    BM_ZFlat/2                  17503     17013  6.8GB/s  jpg (99.95 %)           +2.9%
+    BM_ZFlat/3                    385       385  496.3MB/s  jpg_200 (73.00 %)     +0.0%
+    BM_ZFlat/4                  61753     61540  1.6GB/s  pdf (81.85 %)           +0.3%
+    BM_ZFlat/5                 484806    483356  810.1MB/s  html4 (22.51 %)       +0.3%
+    BM_ZFlat/6                 464143    467609  310.9MB/s  txt1 (57.87 %)        -0.7%
+    BM_ZFlat/7                 410315    413319  289.5MB/s  txt2 (61.93 %)        -0.7%
+    BM_ZFlat/8                1244082   1249381  326.5MB/s  txt3 (54.92 %)        -0.4%
+    BM_ZFlat/9                1696914   1709685  269.4MB/s  txt4 (66.22 %)        -0.7%
+    BM_ZFlat/10                104148    103372  1096.7MB/s  pb (19.64 %)         +0.8%
+    BM_ZFlat/11                363522    359722  489.8MB/s  gaviota (37.72 %)     +1.1%
+    BM_ZFlat/12                 47021     50095  469.3MB/s  cp (48.12 %)          -6.1%
+    BM_ZFlat/13                 16888     16985  627.4MB/s  c (42.40 %)           -0.6%
+    BM_ZFlat/14                  5496      5469  650.3MB/s  lsp (48.37 %)         +0.5%
+    BM_ZFlat/15               1460713   1448760  679.5MB/s  xls (41.23 %)         +0.8%
+    BM_ZFlat/16                   387       393  486.8MB/s  xls_200 (78.00 %)     -1.5%
+    BM_ZFlat/17                457654    451462  1086.6MB/s  bin (18.11 %)        +1.4%
+    BM_ZFlat/18                    97        87  2.1GB/s  bin_200 (7.50 %)       +11.5%
+    BM_ZFlat/19                 77904     80924  451.7MB/s  sum (48.96 %)         -3.7%
+    BM_ZFlat/20                  7648      7663  527.1MB/s  man (59.36 %)         -0.2%
+    Sum of all benchmarks    25493635  25482069                                   +0.0%
+    
+    A=dehao
+    R=sesse
 
-------------------------------------------------------------------------
-r42 | snappy.mirrorbot@gmail.com | 2011-06-03 22:53:06 +0200 (Fri, 03 Jun 2011) | 50 lines
+commit 11ccdfb868387e56d845766d89ddab9d489c4128
+Author: Steinar H. Gunderson <sesse@google.com>
+Date:   Mon Jun 22 16:07:58 2015 +0200
+
+    Sync with various Google-internal changes.
+    
+    Should not mean much for the open-source version.
+
+commit 22acaf438ed93ab21a2ff1919d173206798b996e
+Author: Steinar H. Gunderson <sesse@google.com>
+Date:   Mon Jun 22 15:39:08 2015 +0200
 
-Speed up decompression by removing a fast-path attempt.
+    Change some internal path names.
+    
+    This is mostly to sync up with some changes from Google's internal
+    repositories; it does not affect the open-source distribution in itself.
+
+commit 1ff9be9b8fafc8528ca9e055646f5932aa5db9c4
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Feb 28 11:18:07 2014 +0000
+
+    Release Snappy 1.1.2.
+    
+    R=jeff
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@84 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit 19690d78e83f8963f497585031efa3d9ca66b807
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Feb 19 10:31:49 2014 +0000
 
-Whenever we try to enter a copy fast-path, there is a certain cost in checking
-that all the preconditions are in place, but it's normally offset by the fact
-that we can usually take the cheaper path. However, in a certain path we've
-already established that "avail < literal_length", which usually means that
-either the available space is small, or the literal is big. Both will disqualify
-us from taking the fast path, and thus we take the hit from the precondition
-checking without gaining much from having a fast path. Thus, simply don't try
-the fast path in this situation -- we're already on a slow path anyway
-(one where we need to refill more data from the reader).
+    Fix public issue 82: Stop distributing benchmark data files that have
+    unclear or unsuitable licensing.
+    
+    In general, we replace the files we can with liberally licensed data,
+    and remove all the others (in particular all the parts of the Canterbury
+    corpus that are not clearly in the public domain). The replacements
+    do not always have the exact same characteristics as the original ones,
+    but they are more than good enough to be useful for benchmarking.
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@83 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit f82bff66afe0de4c9ae22f8c4ef84e3c2233e799
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Oct 25 13:31:27 2013 +0000
 
-I'm a bit surprised at how much this gained; it could be that this path is
-more common than I thought, or that the simpler structure somehow makes the
-compiler happier. I haven't looked at the assembler, but it's a win across
-the board on both Core 2, Core i7 and Opteron, at least for the cases we
-typically care about. The gains seem to be the largest on Core i7, though.
-Results from my Core i7 workstation:
+    Add support for padding in the Snappy framed format.
+    
+    This is specifically motivated by DICOM's demands that embedded data
+    must be of an even number of bytes, but could in principle be used for
+    any sort of padding/alignment needed.
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@82 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
+commit eeead8dc38ea359f027fb6e89f345448e8e9d723
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue Oct 15 15:21:31 2013 +0000
+
+    Release Snappy 1.1.1.
+    
+    R=jeff
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@81 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-  Benchmark            Time(ns)    CPU(ns) Iterations
-  ---------------------------------------------------
-  BM_UFlat/0              73337      73091     190996 1.3GB/s  html      [ +1.7%]
-  BM_UFlat/1             696379     693501      20173 965.5MB/s  urls    [ +2.7%]
-  BM_UFlat/2               9765       9734    1472135 12.1GB/s  jpg      [ +0.7%]
-  BM_UFlat/3              29720      29621     472973 3.0GB/s  pdf       [ +1.8%]
-  BM_UFlat/4             294636     293834      47782 1.3GB/s  html4     [ +2.3%]
-  BM_UFlat/5              28399      28320     494700 828.5MB/s  cp      [ +3.5%]
-  BM_UFlat/6              12795      12760    1000000 833.3MB/s  c       [ +1.2%]
-  BM_UFlat/7               3984       3973    3526448 893.2MB/s  lsp     [ +5.7%]
-  BM_UFlat/8             991996     989322      14141 992.6MB/s  xls     [ +3.3%]
-  BM_UFlat/9             228620     227835      61404 636.6MB/s  txt1    [ +4.0%]
-  BM_UFlat/10            197114     196494      72165 607.5MB/s  txt2    [ +3.5%]
-  BM_UFlat/11            605240     603437      23217 674.4MB/s  txt3    [ +3.7%]
-  BM_UFlat/12            804157     802016      17456 573.0MB/s  txt4    [ +3.9%]
-  BM_UFlat/13            347860     346998      40346 1.4GB/s  bin       [ +1.2%]
-  BM_UFlat/14             44684      44559     315315 818.4MB/s  sum     [ +2.3%]
-  BM_UFlat/15              5120       5106    2739726 789.4MB/s  man     [ +3.3%]
-  BM_UFlat/16             76591      76355     183486 1.4GB/s  pb        [ +2.8%]
-  BM_UFlat/17            238564     237828      58824 739.1MB/s  gaviota [ +1.6%]
-  BM_UValidate/0          42194      42060     333333 2.3GB/s  html      [ -0.1%]
-  BM_UValidate/1         433182     432005      32407 1.5GB/s  urls      [ -0.1%]
-  BM_UValidate/2            197        196   71428571 603.3GB/s  jpg     [ +0.5%]
-  BM_UValidate/3          14494      14462     972222 6.1GB/s  pdf       [ +0.5%]
-  BM_UValidate/4         168444     167836      83832 2.3GB/s  html4     [ +0.1%]
-	
-R=jeff
+commit 6bc39e24c76adbbff26ae629fafbf7dfc795f554
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue Aug 13 12:55:00 2013 +0000
+
+    Add autoconf tests for size_t and ssize_t. Sort-of resolves public issue 79;
+    it would solve the problem if MSVC typically used autoconf. However, it gives
+    a natural place (config.h) to put the typedef even for MSVC.
+    
+    R=jsbell
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@80 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit 7c3c01df77e191ad1f8377448961fe88db2802e9
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Mon Jul 29 11:06:44 2013 +0000
+
+    When we compare the number of bytes produced with the offset for a
+    backreference, make the signedness of the bytes produced clear,
+    by sticking it into a size_t. This avoids a signed/unsigned compare
+    warning from MSVC (public issue 71), and also is slightly clearer.
+    
+    Since the line is now so long the explanatory comment about the -1u
+    trick has to go somewhere else anyway, I used the opportunity to
+    explain it in slightly more detail.
+    
+    This is a purely stylistic change; the emitted assembler from GCC
+    is identical.
+    
+    R=jeff
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@79 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit 2f0aaf8631d8fb2475ca1a6687c181efb14ed286
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Sun Jun 30 19:24:03 2013 +0000
 
-Revision created by MOE tool push_codebase.
-
-------------------------------------------------------------------------
-r41 | snappy.mirrorbot@gmail.com | 2011-06-03 22:47:14 +0200 (Fri, 03 Jun 2011) | 43 lines
-
-Speed up decompression by not needing a lookup table for literal items.
-
-Looking up into and decoding the values from char_table has long shown up as a
-hotspot in the decompressor. While it turns out that it's hard to make a more
-efficient decoder for the copy ops, the literals are simple enough that we can
-decode them without needing a table lookup. (This means that 1/4 of the table
-is now unused, although that in itself doesn't buy us anything.)
-
-The gains are small, but definitely present; some tests win as much as 10%,
-but 1-4% is more typical. These results are from Core i7, in 64-bit mode;
-Core 2 and Opteron show similar results. (I've run with more iterations
-than unusual to make sure the smaller gains don't drown entirely in noise.)
+    In the fast path for decompressing literals, instead of checking
+    whether there's 16 bytes free and then checking right afterwards
+    (when having subtracted the literal size) that there are now
+    5 bytes free, just check once for 21 bytes. This skips a compare
+    and a branch; although it is easily predictable, it is still
+    a few cycles on a fast path that we would like to get rid of.
+    
+    Benchmarking this yields very confusing results. On open-source
+    GCC 4.8.1 on Haswell, we get exactly the expected results; the
+    benchmarks where we hit the fast path for literals (in particular
+    the two HTML benchmarks and the protobuf benchmark) give very nice
+    speedups, and the others are not really affected.
+    
+    However, benchmarks with Google's GCC branch on other hardware
+    is much less clear. It seems that we have a weak loss in some cases
+    (and the win for the “typical” win cases are not nearly as clear),
+    but that it depends on microarchitecture and plain luck in how we run
+    the benchmark. Looking at the generated assembler, it seems that
+    the removal of the if causes other large-scale changes in how the
+    function is laid out, which makes it likely that this is just bad luck.
+    
+    Thus, we should keep this change, even though its exact current impact is
+    unclear; it's a sensible change per se, and dropping it on the basis of
+    microoptimization for a given compiler (or even branch of a compiler)
+    would seem like a bad strategy in the long run.
+    
+    Microbenchmark results (all in 64-bit, opt mode):
+    
+      Nehalem, Google GCC:
+    
+      Benchmark                Base (ns)  New (ns)                       Improvement
+      ------------------------------------------------------------------------------
+      BM_UFlat/0                   76747     75591  1.3GB/s  html           +1.5%
+      BM_UFlat/1                  765756    757040  886.3MB/s  urls         +1.2%
+      BM_UFlat/2                   10867     10893  10.9GB/s  jpg           -0.2%
+      BM_UFlat/3                     124       131  1.4GB/s  jpg_200        -5.3%
+      BM_UFlat/4                   31663     31596  2.8GB/s  pdf            +0.2%
+      BM_UFlat/5                  314162    308176  1.2GB/s  html4          +1.9%
+      BM_UFlat/6                   29668     29746  790.6MB/s  cp           -0.3%
+      BM_UFlat/7                   12958     13386  796.4MB/s  c            -3.2%
+      BM_UFlat/8                    3596      3682  966.0MB/s  lsp          -2.3%
+      BM_UFlat/9                 1019193   1033493  953.3MB/s  xls          -1.4%
+      BM_UFlat/10                    239       247  775.3MB/s  xls_200      -3.2%
+      BM_UFlat/11                 236411    240271  606.9MB/s  txt1         -1.6%
+      BM_UFlat/12                 206639    209768  571.2MB/s  txt2         -1.5%
+      BM_UFlat/13                 627803    635722  641.4MB/s  txt3         -1.2%
+      BM_UFlat/14                 845932    857816  538.2MB/s  txt4         -1.4%
+      BM_UFlat/15                 402107    391670  1.2GB/s  bin            +2.7%
+      BM_UFlat/16                    283       279  683.6MB/s  bin_200      +1.4%
+      BM_UFlat/17                  46070     46815  781.5MB/s  sum          -1.6%
+      BM_UFlat/18                   5053      5163  782.0MB/s  man          -2.1%
+      BM_UFlat/19                  79721     76581  1.4GB/s  pb             +4.1%
+      BM_UFlat/20                 251158    252330  697.5MB/s  gaviota      -0.5%
+      Sum of all benchmarks      4966150   4980396                          -0.3%
+    
+    
+      Sandy Bridge, Google GCC:
+    
+      Benchmark                Base (ns)  New (ns)                       Improvement
+      ------------------------------------------------------------------------------
+      BM_UFlat/0                   42850     42182  2.3GB/s  html           +1.6%
+      BM_UFlat/1                  525660    515816  1.3GB/s  urls           +1.9%
+      BM_UFlat/2                    7173      7283  16.3GB/s  jpg           -1.5%
+      BM_UFlat/3                      92        91  2.1GB/s  jpg_200        +1.1%
+      BM_UFlat/4                   15147     14872  5.9GB/s  pdf            +1.8%
+      BM_UFlat/5                  199936    192116  2.0GB/s  html4          +4.1%
+      BM_UFlat/6                   12796     12443  1.8GB/s  cp             +2.8%
+      BM_UFlat/7                    6588      6400  1.6GB/s  c              +2.9%
+      BM_UFlat/8                    2010      1951  1.8GB/s  lsp            +3.0%
+      BM_UFlat/9                  761124    763049  1.3GB/s  xls            -0.3%
+      BM_UFlat/10                    186       189  1016.1MB/s  xls_200     -1.6%
+      BM_UFlat/11                 159354    158460  918.6MB/s  txt1         +0.6%
+      BM_UFlat/12                 139732    139950  856.1MB/s  txt2         -0.2%
+      BM_UFlat/13                 429917    425027  961.7MB/s  txt3         +1.2%
+      BM_UFlat/14                 585255    587324  785.8MB/s  txt4         -0.4%
+      BM_UFlat/15                 276186    266173  1.8GB/s  bin            +3.8%
+      BM_UFlat/16                    205       207  925.5MB/s  bin_200      -1.0%
+      BM_UFlat/17                  24925     24935  1.4GB/s  sum            -0.0%
+      BM_UFlat/18                   2632      2576  1.5GB/s  man            +2.2%
+      BM_UFlat/19                  40546     39108  2.8GB/s  pb             +3.7%
+      BM_UFlat/20                 175803    168209  1048.9MB/s  gaviota     +4.5%
+      Sum of all benchmarks      3408117   3368361                          +1.2%
+    
+    
+      Haswell, upstream GCC 4.8.1:
+    
+      Benchmark                Base (ns)  New (ns)                       Improvement
+      ------------------------------------------------------------------------------
+      BM_UFlat/0                   46308     40641  2.3GB/s  html          +13.9%
+      BM_UFlat/1                  513385    514706  1.3GB/s  urls           -0.3%
+      BM_UFlat/2                    6197      6151  19.2GB/s  jpg           +0.7%
+      BM_UFlat/3                      61        61  3.0GB/s  jpg_200        +0.0%
+      BM_UFlat/4                   13551     13429  6.5GB/s  pdf            +0.9%
+      BM_UFlat/5                  198317    190243  2.0GB/s  html4          +4.2%
+      BM_UFlat/6                   14768     12560  1.8GB/s  cp            +17.6%
+      BM_UFlat/7                    6453      6447  1.6GB/s  c              +0.1%
+      BM_UFlat/8                    1991      1980  1.8GB/s  lsp            +0.6%
+      BM_UFlat/9                  766947    770424  1.2GB/s  xls            -0.5%
+      BM_UFlat/10                    170       169  1.1GB/s  xls_200        +0.6%
+      BM_UFlat/11                 164350    163554  888.7MB/s  txt1         +0.5%
+      BM_UFlat/12                 145444    143830  832.1MB/s  txt2         +1.1%
+      BM_UFlat/13                 437849    438413  929.2MB/s  txt3         -0.1%
+      BM_UFlat/14                 603587    605309  759.8MB/s  txt4         -0.3%
+      BM_UFlat/15                 249799    248067  1.9GB/s  bin            +0.7%
+      BM_UFlat/16                    191       188  1011.4MB/s  bin_200     +1.6%
+      BM_UFlat/17                  26064     24778  1.4GB/s  sum            +5.2%
+      BM_UFlat/18                   2620      2601  1.5GB/s  man            +0.7%
+      BM_UFlat/19                  44551     37373  3.0GB/s  pb            +19.2%
+      BM_UFlat/20                 165408    164584  1.0GB/s  gaviota        +0.5%
+      Sum of all benchmarks      3408011   3385508                          +0.7%
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@78 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-  Benchmark            Time(ns)    CPU(ns) Iterations
-  ---------------------------------------------------
-  BM_UFlat/0              74665      74428     182055 1.3GB/s  html      [ +3.1%]
-  BM_UFlat/1             714106     711997      19663 940.4MB/s  urls    [ +4.4%]
-  BM_UFlat/2               9820       9789    1427115 12.1GB/s  jpg      [ -1.2%]
-  BM_UFlat/3              30461      30380     465116 2.9GB/s  pdf       [ +0.8%]
-  BM_UFlat/4             301445     300568      46512 1.3GB/s  html4     [ +2.2%]
-  BM_UFlat/5              29338      29263     479452 801.8MB/s  cp      [ +1.6%]
-  BM_UFlat/6              13004      12970    1000000 819.9MB/s  c       [ +2.1%]
-  BM_UFlat/7               4180       4168    3349282 851.4MB/s  lsp     [ +1.3%]
-  BM_UFlat/8            1026149    1024000      10000 959.0MB/s  xls     [+10.7%]
-  BM_UFlat/9             237441     236830      59072 612.4MB/s  txt1    [ +0.3%]
-  BM_UFlat/10            203966     203298      69307 587.2MB/s  txt2    [ +0.8%]
-  BM_UFlat/11            627230     625000      22400 651.2MB/s  txt3    [ +0.7%]
-  BM_UFlat/12            836188     833979      16787 551.0MB/s  txt4    [ +1.3%]
-  BM_UFlat/13            351904     350750      39886 1.4GB/s  bin       [ +3.8%]
-  BM_UFlat/14             45685      45562     308370 800.4MB/s  sum     [ +5.9%]
-  BM_UFlat/15              5286       5270    2656546 764.9MB/s  man     [ +1.5%]
-  BM_UFlat/16             78774      78544     178117 1.4GB/s  pb        [ +4.3%]
-  BM_UFlat/17            242270     241345      58091 728.3MB/s  gaviota [ +1.2%]
-  BM_UValidate/0          42149      42000     333333 2.3GB/s  html      [ -3.0%]
-  BM_UValidate/1         432741     431303      32483 1.5GB/s  urls      [ +7.8%]
-  BM_UValidate/2            198        197   71428571 600.7GB/s  jpg     [+16.8%]
-  BM_UValidate/3          14560      14521     965517 6.1GB/s  pdf       [ -4.1%]
-  BM_UValidate/4         169065     168671      83832 2.3GB/s  html4     [ -2.9%]
-
-R=jeff
-
-Revision created by MOE tool push_codebase.
-
-------------------------------------------------------------------------
-r40 | snappy.mirrorbot@gmail.com | 2011-06-03 00:57:41 +0200 (Fri, 03 Jun 2011) | 2 lines
+commit 062bf544a61107db730b6d08cb0b159c4dd9b24c
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Jun 14 21:42:26 2013 +0000
 
-Release Snappy 1.0.3.
-
-------------------------------------------------------------------------
-r39 | snappy.mirrorbot@gmail.com | 2011-06-02 20:06:54 +0200 (Thu, 02 Jun 2011) | 11 lines
-
-Remove an unneeded goto in the decompressor; it turns out that the
-state of ip_ after decompression (or attempted decompresion) is
-completely irrelevant, so we don't need the trailer.
-
-Performance is, as expected, mostly flat -- there's a curious ~3–5%
-loss in the “lsp” test, but that test case is so short it is hard to say
-anything definitive about why (most likely, it's some sort of
-unrelated effect).
-
-R=jeff
+    Make the two IncrementalCopy* functions take in an ssize_t instead of a len,
+    in order to avoid having to do 32-to-64-bit signed conversions on a hot path
+    during decompression. (Also fixes some MSVC warnings, mentioned in public
+    issue 75, but more of those remain.) They cannot be size_t because we expect
+    them to go negative and test for that.
+    
+    This saves a few movzwl instructions, yielding ~2% speedup in decompression.
+    
+    
+    Sandy Bridge:
+    
+    Benchmark                          Base (ns)  New (ns)                                Improvement
+    -------------------------------------------------------------------------------------------------
+    BM_UFlat/0                             48009     41283  2.3GB/s  html                   +16.3%
+    BM_UFlat/1                            531274    513419  1.3GB/s  urls                    +3.5%
+    BM_UFlat/2                              7378      7062  16.8GB/s  jpg                    +4.5%
+    BM_UFlat/3                                92        92  2.0GB/s  jpg_200                 +0.0%
+    BM_UFlat/4                             15057     14974  5.9GB/s  pdf                     +0.6%
+    BM_UFlat/5                            204323    193140  2.0GB/s  html4                   +5.8%
+    BM_UFlat/6                             13282     12611  1.8GB/s  cp                      +5.3%
+    BM_UFlat/7                              6511      6504  1.6GB/s  c                       +0.1%
+    BM_UFlat/8                              2014      2030  1.7GB/s  lsp                     -0.8%
+    BM_UFlat/9                            775909    768336  1.3GB/s  xls                     +1.0%
+    BM_UFlat/10                              182       184  1043.2MB/s  xls_200              -1.1%
+    BM_UFlat/11                           167352    161630  901.2MB/s  txt1                  +3.5%
+    BM_UFlat/12                           147393    142246  842.8MB/s  txt2                  +3.6%
+    BM_UFlat/13                           449960    432853  944.4MB/s  txt3                  +4.0%
+    BM_UFlat/14                           620497    594845  775.9MB/s  txt4                  +4.3%
+    BM_UFlat/15                           265610    267356  1.8GB/s  bin                     -0.7%
+    BM_UFlat/16                              206       205  932.7MB/s  bin_200               +0.5%
+    BM_UFlat/17                            25561     24730  1.4GB/s  sum                     +3.4%
+    BM_UFlat/18                             2620      2644  1.5GB/s  man                     -0.9%
+    BM_UFlat/19                            45766     38589  2.9GB/s  pb                     +18.6%
+    BM_UFlat/20                           171107    169832  1039.5MB/s  gaviota              +0.8%
+    Sum of all benchmarks                3500103   3394565                                   +3.1%
+    
+    
+    Westmere:
+    
+    Benchmark                          Base (ns)  New (ns)                                Improvement
+    -------------------------------------------------------------------------------------------------
+    BM_UFlat/0                             72624     71526  1.3GB/s  html                    +1.5%
+    BM_UFlat/1                            735821    722917  930.8MB/s  urls                  +1.8%
+    BM_UFlat/2                             10450     10172  11.7GB/s  jpg                    +2.7%
+    BM_UFlat/3                               117       117  1.6GB/s  jpg_200                 +0.0%
+    BM_UFlat/4                             29817     29648  3.0GB/s  pdf                     +0.6%
+    BM_UFlat/5                            297126    293073  1.3GB/s  html4                   +1.4%
+    BM_UFlat/6                             28252     27994  842.0MB/s  cp                    +0.9%
+    BM_UFlat/7                             12672     12391  862.1MB/s  c                     +2.3%
+    BM_UFlat/8                              3507      3425  1040.9MB/s  lsp                  +2.4%
+    BM_UFlat/9                           1004268    969395  1018.0MB/s  xls                  +3.6%
+    BM_UFlat/10                              233       227  844.8MB/s  xls_200               +2.6%
+    BM_UFlat/11                           230054    224981  647.8MB/s  txt1                  +2.3%
+    BM_UFlat/12                           201229    196447  610.5MB/s  txt2                  +2.4%
+    BM_UFlat/13                           609547    596761  685.3MB/s  txt3                  +2.1%
+    BM_UFlat/14                           824362    804821  573.8MB/s  txt4                  +2.4%
+    BM_UFlat/15                           371095    374899  1.3GB/s  bin                     -1.0%
+    BM_UFlat/16                              267       267  717.8MB/s  bin_200               +0.0%
+    BM_UFlat/17                            44623     43828  835.9MB/s  sum                   +1.8%
+    BM_UFlat/18                             5077      4815  841.0MB/s  man                   +5.4%
+    BM_UFlat/19                            74964     73210  1.5GB/s  pb                      +2.4%
+    BM_UFlat/20                           237987    236745  746.0MB/s  gaviota               +0.5%
+    Sum of all benchmarks                4794092   4697659                                   +2.1%
+    
+    
+    Istanbul:
+    
+    Benchmark                          Base (ns)  New (ns)                                Improvement
+    -------------------------------------------------------------------------------------------------
+    BM_UFlat/0                             98614     96376  1020.4MB/s  html                 +2.3%
+    BM_UFlat/1                            963740    953241  707.2MB/s  urls                  +1.1%
+    BM_UFlat/2                             25042     24769  4.8GB/s  jpg                     +1.1%
+    BM_UFlat/3                               180       180  1065.6MB/s  jpg_200              +0.0%
+    BM_UFlat/4                             45942     45403  1.9GB/s  pdf                     +1.2%
+    BM_UFlat/5                            400135    390226  1008.2MB/s  html4                +2.5%
+    BM_UFlat/6                             37768     37392  631.9MB/s  cp                    +1.0%
+    BM_UFlat/7                             18585     18200  588.2MB/s  c                     +2.1%
+    BM_UFlat/8                              5751      5690  627.7MB/s  lsp                   +1.1%
+    BM_UFlat/9                           1543154   1542209  641.4MB/s  xls                   +0.1%
+    BM_UFlat/10                              381       388  494.6MB/s  xls_200               -1.8%
+    BM_UFlat/11                           339715    331973  440.1MB/s  txt1                  +2.3%
+    BM_UFlat/12                           294807    289418  415.4MB/s  txt2                  +1.9%
+    BM_UFlat/13                           906160    884094  463.3MB/s  txt3                  +2.5%
+    BM_UFlat/14                          1224221   1198435  386.1MB/s  txt4                  +2.2%
+    BM_UFlat/15                           516277    502923  979.5MB/s  bin                   +2.7%
+    BM_UFlat/16                              405       402  477.2MB/s  bin_200               +0.7%
+    BM_UFlat/17                            61640     60621  605.6MB/s  sum                   +1.7%
+    BM_UFlat/18                             7326      7383  549.5MB/s  man                   -0.8%
+    BM_UFlat/19                            94720     92653  1.2GB/s  pb                      +2.2%
+    BM_UFlat/20                           360435    346687  510.6MB/s  gaviota               +4.0%
+    Sum of all benchmarks                6944998   6828663                                   +1.7%
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@77 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-------------------------------------------------------------------------
-r38 | snappy.mirrorbot@gmail.com | 2011-06-02 19:59:40 +0200 (Thu, 02 Jun 2011) | 52 lines
+commit 328aafa1980824a9afdcd50edc30d9d5157e417f
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Thu Jun 13 16:19:52 2013 +0000
 
-Speed up decompression by caching ip_.
+    Add support for uncompressing to iovecs (scatter I/O).
+    Windows does not have struct iovec defined anywhere,
+    so we define our own version that's equal to what UNIX
+    typically has.
+    
+    The bulk of this patch was contributed by Mohit Aron.
+    
+    R=jeff
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@76 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit cd92eb0852e2339187b693eef3595a07d2276c1d
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Jun 12 19:51:15 2013 +0000
 
-It is seemingly hard for the compiler to understand that ip_, the current input
-pointer into the compressed data stream, can not alias on anything else, and
-thus using it directly will incur memory traffic as it cannot be kept in a
-register. The code already knew about this and cached it into a local
-variable, but since Step() only decoded one tag, it had to move ip_ back into
-place between every tag. This seems to have cost us a significant amount of
-performance, so changing Step() into a function that decodes as much as it can
-before it saves ip_ back and returns. (Note that Step() was already inlined,
-so it is not the manual inlining that buys the performance here.)
+    Some code reorganization needed for an internal change.
+    
+    R=fikes
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@75 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit a3e928d62bbd61b523b988c07b560253950cf73b
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue Apr 9 15:33:30 2013 +0000
 
-The wins are about 3–6% for Core 2, 6–13% on Core i7 and 5–12% on Opteron
-(for plain array-to-array decompression, in 64-bit opt mode).
+    Supports truncated test data in zippy benchmark.
+    
+    R=sesse
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@74 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit bde324c0169763688f35ee44630a26ad1f49eec3
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue Feb 5 14:36:15 2013 +0000
+
+    Release Snappy 1.1.0.
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@73 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit 8168446c7eaaa0594e1f4ca923376dcf3a2846fa
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue Feb 5 14:30:05 2013 +0000
 
-There is a tiny difference in the behavior here; if an invalid literal is
-encountered (ie., the writer refuses the Append() operation), ip_ will now
-point to the byte past the tag byte, instead of where the literal was
-originally thought to end. However, we don't use ip_ for anything after
-DecompressAllTags() has returned, so this should not change external behavior
-in any way.
-
-Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
+    Make ./snappy_unittest pass without "srcdir" being defined.
+    
+    Previously, snappy_unittests would read from an absolute path /testdata/..;
+    convert it to use a relative path instead.
+    
+    Patch from Marc-Antonie Ruel.
+    
+    R=maruel
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@72 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Benchmark            Time(ns)    CPU(ns) Iterations
----------------------------------------------------
-BM_UFlat/0              79134      79110       8835 1.2GB/s  html      [ +6.2%]
-BM_UFlat/1             786126     786096        891 851.8MB/s  urls    [+10.0%]
-BM_UFlat/2               9948       9948      69125 11.9GB/s  jpg      [ -1.3%]
-BM_UFlat/3              31999      31998      21898 2.7GB/s  pdf       [ +6.5%]
-BM_UFlat/4             318909     318829       2204 1.2GB/s  html4     [ +6.5%]
-BM_UFlat/5              31384      31390      22363 747.5MB/s  cp      [ +9.2%]
-BM_UFlat/6              14037      14034      49858 757.7MB/s  c       [+10.6%]
-BM_UFlat/7               4612       4612     151395 769.5MB/s  lsp     [ +9.5%]
-BM_UFlat/8            1203174    1203007        582 816.3MB/s  xls     [+19.3%]
-BM_UFlat/9             253869     253955       2757 571.1MB/s  txt1    [+11.4%]
-BM_UFlat/10            219292     219290       3194 544.4MB/s  txt2    [+12.1%]
-BM_UFlat/11            672135     672131       1000 605.5MB/s  txt3    [+11.2%]
-BM_UFlat/12            902512     902492        776 509.2MB/s  txt4    [+12.5%]
-BM_UFlat/13            372110     371998       1881 1.3GB/s  bin       [ +5.8%]
-BM_UFlat/14             50407      50407      10000 723.5MB/s  sum     [+13.5%]
-BM_UFlat/15              5699       5701     100000 707.2MB/s  man     [+12.4%]
-BM_UFlat/16             83448      83424       8383 1.3GB/s  pb        [ +5.7%]
-BM_UFlat/17            256958     256963       2723 684.1MB/s  gaviota [ +7.9%]
-BM_UValidate/0          42795      42796      16351 2.2GB/s  html      [+25.8%]
-BM_UValidate/1         490672     490622       1427 1.3GB/s  urls      [+22.7%]
-BM_UValidate/2            237        237    2950297 499.0GB/s  jpg     [+24.9%]
-BM_UValidate/3          14610      14611      47901 6.0GB/s  pdf       [+26.8%]
-BM_UValidate/4         171973     171990       4071 2.2GB/s  html4     [+25.7%]
-
-
-
-------------------------------------------------------------------------
-r37 | snappy.mirrorbot@gmail.com | 2011-05-17 10:48:25 +0200 (Tue, 17 May 2011) | 10 lines
-
-
-Fix the numbering of the headlines in the Snappy format description.
-
-R=csilvers
-DELTA=4  (0 added, 0 deleted, 4 changed)
-
+commit 27a0cc394950ebdad2e8d67322f0862835b10bd9
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Jan 18 12:16:36 2013 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1906
-
-------------------------------------------------------------------------
-r36 | snappy.mirrorbot@gmail.com | 2011-05-16 10:59:18 +0200 (Mon, 16 May 2011) | 12 lines
-
-
-Fix public issue #32: Add compressed format documentation for Snappy.
-This text is new, but an earlier version from Zeev Tarantov was used
-as reference.
-
-R=csilvers
-DELTA=112  (111 added, 0 deleted, 1 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1867
-
-------------------------------------------------------------------------
-r35 | snappy.mirrorbot@gmail.com | 2011-05-09 23:29:02 +0200 (Mon, 09 May 2011) | 12 lines
-
-
-Fix public issue #39: Pick out the median runs based on CPU time,
-not real time. Also, use nth_element instead of sort, since we
-only need one element.
+    Increase the Zippy block size from 32 kB to 64 kB, winning ~3% density
+    while being effectively performance neutral.
+    
+    The longer story about density is that we win 3-6% density on the benchmarks
+    where this has any effect at all; many of the benchmarks (cp, c, lsp, man)
+    are smaller than 32 kB and thus will have no effect. Binary data also seems
+    to win little or nothing; of course, the already-compressed data wins nothing.
+    The protobuf benchmark wins as much as ~18% depending on architecture,
+    but I wouldn't be too sure that this is representative of protobuf data in
+    general.
+    
+    As of performance, we lose a tiny amount since we get more tags (e.g., a long
+    literal might be broken up into literal-copy-literal), but we win it back with
+    less clearing of the hash table, and more opportunities to skip incompressible
+    data (e.g. in the jpg benchmark). Decompression seems to get ever so slightly
+    slower, again due to more tags. The total net change is about as close to zero
+    as we can get, so the end effect seems to be simply more density and no
+    real performance change.
+    
+    The comment about not changing kBlockSize, scary as it is, is not really
+    relevant, since we're never going to have a block-level decompressor without
+    explicitly marked blocks. Replace it with something more appropriate.
+    
+    This affects the framing format, but it's okay to change it since it basically
+    has no users yet.
+    
+    
+    Density (note that cp, c, lsp and man are all smaller than 32 kB):
+    
+       Benchmark         Description   Base (%)  New (%)  Improvement
+       --------------------------------------------------------------
+       ZFlat/0           html            22.57    22.31     +5.6%
+       ZFlat/1           urls            50.89    47.77     +6.5%
+       ZFlat/2           jpg             99.88    99.87     +0.0%
+       ZFlat/3           pdf             82.13    82.07     +0.1%
+       ZFlat/4           html4           23.55    22.51     +4.6%
+       ZFlat/5           cp              48.12    48.12     +0.0%
+       ZFlat/6           c               42.40    42.40     +0.0%
+       ZFlat/7           lsp             48.37    48.37     +0.0%
+       ZFlat/8           xls             41.34    41.23     +0.3%
+       ZFlat/9           txt1            59.81    57.87     +3.4%
+       ZFlat/10          txt2            64.07    61.93     +3.5%
+       ZFlat/11          txt3            57.11    54.92     +4.0%
+       ZFlat/12          txt4            68.35    66.22     +3.2%
+       ZFlat/13          bin             18.21    18.11     +0.6%
+       ZFlat/14          sum             51.88    48.96     +6.0%
+       ZFlat/15          man             59.36    59.36     +0.0%
+       ZFlat/16          pb              23.15    19.64    +17.9%
+       ZFlat/17          gaviota         38.27    37.72     +1.5%
+       Geometric mean                    45.51    44.15     +3.1%
+    
+    
+    Microbenchmarks (64-bit, opt):
+    
+    Westmere 2.8 GHz:
+    
+       Benchmark                          Base (ns)  New (ns)                                Improvement
+       -------------------------------------------------------------------------------------------------
+       BM_UFlat/0                             75342     75027  1.3GB/s  html                    +0.4%
+       BM_UFlat/1                            723767    744269  899.6MB/s  urls                  -2.8%
+       BM_UFlat/2                             10072     10072  11.7GB/s  jpg                    +0.0%
+       BM_UFlat/3                             30747     30388  2.9GB/s  pdf                     +1.2%
+       BM_UFlat/4                            307353    306063  1.2GB/s  html4                   +0.4%
+       BM_UFlat/5                             28593     28743  816.3MB/s  cp                    -0.5%
+       BM_UFlat/6                             12958     12998  818.1MB/s  c                     -0.3%
+       BM_UFlat/7                              3700      3792  935.8MB/s  lsp                   -2.4%
+       BM_UFlat/8                            999685    999905  982.1MB/s  xls                   -0.0%
+       BM_UFlat/9                            232954    230079  630.4MB/s  txt1                  +1.2%
+       BM_UFlat/10                           200785    201468  592.6MB/s  txt2                  -0.3%
+       BM_UFlat/11                           617267    610968  666.1MB/s  txt3                  +1.0%
+       BM_UFlat/12                           821595    822475  558.7MB/s  txt4                  -0.1%
+       BM_UFlat/13                           377097    377632  1.3GB/s  bin                     -0.1%
+       BM_UFlat/14                            45476     45260  805.8MB/s  sum                   +0.5%
+       BM_UFlat/15                             4985      5003  805.7MB/s  man                   -0.4%
+       BM_UFlat/16                            80813     77494  1.4GB/s  pb                      +4.3%
+       BM_UFlat/17                           251792    241553  727.7MB/s  gaviota               +4.2%
+       BM_UValidate/0                         40343     40354  2.4GB/s  html                    -0.0%
+       BM_UValidate/1                        426890    451574  1.4GB/s  urls                    -5.5%
+       BM_UValidate/2                           187       179  661.9GB/s  jpg                   +4.5%
+       BM_UValidate/3                         13783     13827  6.4GB/s  pdf                     -0.3%
+       BM_UValidate/4                        162393    163335  2.3GB/s  html4                   -0.6%
+       BM_UDataBuffer/0                       93756     93302  1046.7MB/s  html                 +0.5%
+       BM_UDataBuffer/1                      886714    916292  730.7MB/s  urls                  -3.2%
+       BM_UDataBuffer/2                       15861     16401  7.2GB/s  jpg                     -3.3%
+       BM_UDataBuffer/3                       38934     39224  2.2GB/s  pdf                     -0.7%
+       BM_UDataBuffer/4                      381008    379428  1029.5MB/s  html4                +0.4%
+       BM_UCord/0                             92528     91098  1072.0MB/s  html                 +1.6%
+       BM_UCord/1                            858421    885287  756.3MB/s  urls                  -3.0%
+       BM_UCord/2                             13140     13464  8.8GB/s  jpg                     -2.4%
+       BM_UCord/3                             39012     37773  2.3GB/s  pdf                     +3.3%
+       BM_UCord/4                            376869    371267  1052.1MB/s  html4                +1.5%
+       BM_UCordString/0                       75810     75303  1.3GB/s  html                    +0.7%
+       BM_UCordString/1                      735290    753841  888.2MB/s  urls                  -2.5%
+       BM_UCordString/2                       11945     13113  9.0GB/s  jpg                     -8.9%
+       BM_UCordString/3                       33901     32562  2.7GB/s  pdf                     +4.1%
+       BM_UCordString/4                      310985    309390  1.2GB/s  html4                   +0.5%
+       BM_UCordValidate/0                     40952     40450  2.4GB/s  html                    +1.2%
+       BM_UCordValidate/1                    433842    456531  1.4GB/s  urls                    -5.0%
+       BM_UCordValidate/2                      1179      1173  100.8GB/s  jpg                   +0.5%
+       BM_UCordValidate/3                     14481     14392  6.1GB/s  pdf                     +0.6%
+       BM_UCordValidate/4                    164364    164151  2.3GB/s  html4                   +0.1%
+       BM_ZFlat/0                            160610    156601  623.6MB/s  html (22.31 %)        +2.6%
+       BM_ZFlat/1                           1995238   1993582  335.9MB/s  urls (47.77 %)        +0.1%
+       BM_ZFlat/2                             30133     24983  4.7GB/s  jpg (99.87 %)          +20.6%
+       BM_ZFlat/3                             74453     73128  1.2GB/s  pdf (82.07 %)           +1.8%
+       BM_ZFlat/4                            647674    633729  616.4MB/s  html4 (22.51 %)       +2.2%
+       BM_ZFlat/5                             76259     76090  308.4MB/s  cp (48.12 %)          +0.2%
+       BM_ZFlat/6                             31106     31084  342.1MB/s  c (42.40 %)           +0.1%
+       BM_ZFlat/7                             10507     10443  339.8MB/s  lsp (48.37 %)         +0.6%
+       BM_ZFlat/8                           1811047   1793325  547.6MB/s  xls (41.23 %)         +1.0%
+       BM_ZFlat/9                            597903    581793  249.3MB/s  txt1 (57.87 %)        +2.8%
+       BM_ZFlat/10                           525320    514522  232.0MB/s  txt2 (61.93 %)        +2.1%
+       BM_ZFlat/11                          1596591   1551636  262.3MB/s  txt3 (54.92 %)        +2.9%
+       BM_ZFlat/12                          2134523   2094033  219.5MB/s  txt4 (66.22 %)        +1.9%
+       BM_ZFlat/13                           593024    587869  832.6MB/s  bin (18.11 %)         +0.9%
+       BM_ZFlat/14                           114746    110666  329.5MB/s  sum (48.96 %)         +3.7%
+       BM_ZFlat/15                            14376     14485  278.3MB/s  man (59.36 %)         -0.8%
+       BM_ZFlat/16                           167908    150070  753.6MB/s  pb (19.64 %)         +11.9%
+       BM_ZFlat/17                           460228    442253  397.5MB/s  gaviota (37.72 %)     +4.1%
+       BM_ZCord/0                            164896    160241  609.4MB/s  html                  +2.9%
+       BM_ZCord/1                           2070239   2043492  327.7MB/s  urls                  +1.3%
+       BM_ZCord/2                             54402     47002  2.5GB/s  jpg                    +15.7%
+       BM_ZCord/3                             85871     83832  1073.1MB/s  pdf                  +2.4%
+       BM_ZCord/4                            664078    648825  602.0MB/s  html4                 +2.4%
+       BM_ZDataBuffer/0                      174874    172549  566.0MB/s  html                  +1.3%
+       BM_ZDataBuffer/1                     2134410   2139173  313.0MB/s  urls                  -0.2%
+       BM_ZDataBuffer/2                       71911     69551  1.7GB/s  jpg                     +3.4%
+       BM_ZDataBuffer/3                       98236     99727  902.1MB/s  pdf                   -1.5%
+       BM_ZDataBuffer/4                      710776    699104  558.8MB/s  html4                 +1.7%
+       Sum of all benchmarks               27358908  27200688                                   +0.6%
+    
+    
+    Sandy Bridge 2.6 GHz:
+    
+       Benchmark                          Base (ns)  New (ns)                                Improvement
+       -------------------------------------------------------------------------------------------------
+       BM_UFlat/0                             49356     49018  1.9GB/s  html                    +0.7%
+       BM_UFlat/1                            516764    531955  1.2GB/s  urls                    -2.9%
+       BM_UFlat/2                              6982      7304  16.2GB/s  jpg                    -4.4%
+       BM_UFlat/3                             15285     15598  5.6GB/s  pdf                     -2.0%
+       BM_UFlat/4                            206557    206669  1.8GB/s  html4                   -0.1%
+       BM_UFlat/5                             13681     13567  1.7GB/s  cp                      +0.8%
+       BM_UFlat/6                              6571      6592  1.6GB/s  c                       -0.3%
+       BM_UFlat/7                              2008      1994  1.7GB/s  lsp                     +0.7%
+       BM_UFlat/8                            775700    773286  1.2GB/s  xls                     +0.3%
+       BM_UFlat/9                            165578    164480  881.8MB/s  txt1                  +0.7%
+       BM_UFlat/10                           143707    144139  828.2MB/s  txt2                  -0.3%
+       BM_UFlat/11                           443026    436281  932.8MB/s  txt3                  +1.5%
+       BM_UFlat/12                           603129    595856  771.2MB/s  txt4                  +1.2%
+       BM_UFlat/13                           271682    270450  1.8GB/s  bin                     +0.5%
+       BM_UFlat/14                            26200     25666  1.4GB/s  sum                     +2.1%
+       BM_UFlat/15                             2620      2608  1.5GB/s  man                     +0.5%
+       BM_UFlat/16                            48908     47756  2.3GB/s  pb                      +2.4%
+       BM_UFlat/17                           174638    170346  1031.9MB/s  gaviota              +2.5%
+       BM_UValidate/0                         31922     31898  3.0GB/s  html                    +0.1%
+       BM_UValidate/1                        341265    363554  1.8GB/s  urls                    -6.1%
+       BM_UValidate/2                           160       151  782.8GB/s  jpg                   +6.0%
+       BM_UValidate/3                         10402     10380  8.5GB/s  pdf                     +0.2%
+       BM_UValidate/4                        129490    130587  2.9GB/s  html4                   -0.8%
+       BM_UDataBuffer/0                       59383     58736  1.6GB/s  html                    +1.1%
+       BM_UDataBuffer/1                      619222    637786  1049.8MB/s  urls                 -2.9%
+       BM_UDataBuffer/2                       10775     11941  9.9GB/s  jpg                     -9.8%
+       BM_UDataBuffer/3                       18002     17930  4.9GB/s  pdf                     +0.4%
+       BM_UDataBuffer/4                      259182    259306  1.5GB/s  html4                   -0.0%
+       BM_UCord/0                             59379     57814  1.6GB/s  html                    +2.7%
+       BM_UCord/1                            598456    615162  1088.4MB/s  urls                 -2.7%
+       BM_UCord/2                              8519      8628  13.7GB/s  jpg                    -1.3%
+       BM_UCord/3                             18123     17537  5.0GB/s  pdf                     +3.3%
+       BM_UCord/4                            252375    252331  1.5GB/s  html4                   +0.0%
+       BM_UCordString/0                       49494     49790  1.9GB/s  html                    -0.6%
+       BM_UCordString/1                      524659    541803  1.2GB/s  urls                    -3.2%
+       BM_UCordString/2                        8206      8354  14.2GB/s  jpg                    -1.8%
+       BM_UCordString/3                       17235     16537  5.3GB/s  pdf                     +4.2%
+       BM_UCordString/4                      210188    211072  1.8GB/s  html4                   -0.4%
+       BM_UCordValidate/0                     31956     31587  3.0GB/s  html                    +1.2%
+       BM_UCordValidate/1                    340828    362141  1.8GB/s  urls                    -5.9%
+       BM_UCordValidate/2                       783       744  158.9GB/s  jpg                   +5.2%
+       BM_UCordValidate/3                     10543     10462  8.4GB/s  pdf                     +0.8%
+       BM_UCordValidate/4                    130150    129789  2.9GB/s  html4                   +0.3%
+       BM_ZFlat/0                            113873    111200  878.2MB/s  html (22.31 %)        +2.4%
+       BM_ZFlat/1                           1473023   1489858  449.4MB/s  urls (47.77 %)        -1.1%
+       BM_ZFlat/2                             23569     19486  6.1GB/s  jpg (99.87 %)          +21.0%
+       BM_ZFlat/3                             49178     48046  1.8GB/s  pdf (82.07 %)           +2.4%
+       BM_ZFlat/4                            475063    469394  832.2MB/s  html4 (22.51 %)       +1.2%
+       BM_ZFlat/5                             46910     46816  501.2MB/s  cp (48.12 %)          +0.2%
+       BM_ZFlat/6                             16883     16916  628.6MB/s  c (42.40 %)           -0.2%
+       BM_ZFlat/7                              5381      5447  651.5MB/s  lsp (48.37 %)         -1.2%
+       BM_ZFlat/8                           1466870   1473861  666.3MB/s  xls (41.23 %)         -0.5%
+       BM_ZFlat/9                            468006    464101  312.5MB/s  txt1 (57.87 %)        +0.8%
+       BM_ZFlat/10                           408157    408957  291.9MB/s  txt2 (61.93 %)        -0.2%
+       BM_ZFlat/11                          1253348   1232910  330.1MB/s  txt3 (54.92 %)        +1.7%
+       BM_ZFlat/12                          1702373   1702977  269.8MB/s  txt4 (66.22 %)        -0.0%
+       BM_ZFlat/13                           439792    438557  1116.0MB/s  bin (18.11 %)        +0.3%
+       BM_ZFlat/14                            80766     78851  462.5MB/s  sum (48.96 %)         +2.4%
+       BM_ZFlat/15                             7420      7542  534.5MB/s  man (59.36 %)         -1.6%
+       BM_ZFlat/16                           112043    100126  1.1GB/s  pb (19.64 %)           +11.9%
+       BM_ZFlat/17                           368877    357703  491.4MB/s  gaviota (37.72 %)     +3.1%
+       BM_ZCord/0                            116402    113564  859.9MB/s  html                  +2.5%
+       BM_ZCord/1                           1507156   1519911  440.5MB/s  urls                  -0.8%
+       BM_ZCord/2                             39860     33686  3.5GB/s  jpg                    +18.3%
+       BM_ZCord/3                             56211     54694  1.6GB/s  pdf                     +2.8%
+       BM_ZCord/4                            485594    479212  815.1MB/s  html4                 +1.3%
+       BM_ZDataBuffer/0                      123185    121572  803.3MB/s  html                  +1.3%
+       BM_ZDataBuffer/1                     1569111   1589380  421.3MB/s  urls                  -1.3%
+       BM_ZDataBuffer/2                       53143     49556  2.4GB/s  jpg                     +7.2%
+       BM_ZDataBuffer/3                       65725     66826  1.3GB/s  pdf                     -1.6%
+       BM_ZDataBuffer/4                      517871    514750  758.9MB/s  html4                 +0.6%
+       Sum of all benchmarks               20258879  20315484                                   -0.3%
+    
+    
+    AMD Instanbul 2.4 GHz:
+    
+       Benchmark                          Base (ns)  New (ns)                                Improvement
+       -------------------------------------------------------------------------------------------------
+       BM_UFlat/0                             97120     96585  1011.1MB/s  html                 +0.6%
+       BM_UFlat/1                            917473    948016  706.3MB/s  urls                  -3.2%
+       BM_UFlat/2                             21496     23938  4.9GB/s  jpg                    -10.2%
+       BM_UFlat/3                             44751     45639  1.9GB/s  pdf                     -1.9%
+       BM_UFlat/4                            391950    391413  998.0MB/s  html4                 +0.1%
+       BM_UFlat/5                             37366     37201  630.7MB/s  cp                    +0.4%
+       BM_UFlat/6                             18350     18318  580.5MB/s  c                     +0.2%
+       BM_UFlat/7                              5672      5661  626.9MB/s  lsp                   +0.2%
+       BM_UFlat/8                           1533390   1529441  642.1MB/s  xls                   +0.3%
+       BM_UFlat/9                            335477    336553  431.0MB/s  txt1                  -0.3%
+       BM_UFlat/10                           285140    292080  408.7MB/s  txt2                  -2.4%
+       BM_UFlat/11                           888507    894758  454.9MB/s  txt3                  -0.7%
+       BM_UFlat/12                          1187643   1210928  379.5MB/s  txt4                  -1.9%
+       BM_UFlat/13                           493717    507447  964.5MB/s  bin                   -2.7%
+       BM_UFlat/14                            61740     60870  599.1MB/s  sum                   +1.4%
+       BM_UFlat/15                             7211      7187  560.9MB/s  man                   +0.3%
+       BM_UFlat/16                            97435     93100  1.2GB/s  pb                      +4.7%
+       BM_UFlat/17                           362662    356395  493.2MB/s  gaviota               +1.8%
+       BM_UValidate/0                         47475     47118  2.0GB/s  html                    +0.8%
+       BM_UValidate/1                        501304    529741  1.2GB/s  urls                    -5.4%
+       BM_UValidate/2                           276       243  486.2GB/s  jpg                  +13.6%
+       BM_UValidate/3                         16361     16261  5.4GB/s  pdf                     +0.6%
+       BM_UValidate/4                        190741    190353  2.0GB/s  html4                   +0.2%
+       BM_UDataBuffer/0                      111080    109771  889.6MB/s  html                  +1.2%
+       BM_UDataBuffer/1                     1051035   1085999  616.5MB/s  urls                  -3.2%
+       BM_UDataBuffer/2                       25801     25463  4.6GB/s  jpg                     +1.3%
+       BM_UDataBuffer/3                       50493     49946  1.8GB/s  pdf                     +1.1%
+       BM_UDataBuffer/4                      447258    444138  879.5MB/s  html4                 +0.7%
+       BM_UCord/0                            109350    107909  905.0MB/s  html                  +1.3%
+       BM_UCord/1                           1023396   1054964  634.7MB/s  urls                  -3.0%
+       BM_UCord/2                             25292     24371  4.9GB/s  jpg                     +3.8%
+       BM_UCord/3                             48955     49736  1.8GB/s  pdf                     -1.6%
+       BM_UCord/4                            440452    437331  893.2MB/s  html4                 +0.7%
+       BM_UCordString/0                       98511     98031  996.2MB/s  html                  +0.5%
+       BM_UCordString/1                      933230    963495  694.9MB/s  urls                  -3.1%
+       BM_UCordString/2                       23311     24076  4.9GB/s  jpg                     -3.2%
+       BM_UCordString/3                       45568     46196  1.9GB/s  pdf                     -1.4%
+       BM_UCordString/4                      397791    396934  984.1MB/s  html4                 +0.2%
+       BM_UCordValidate/0                     47537     46921  2.0GB/s  html                    +1.3%
+       BM_UCordValidate/1                    505071    532716  1.2GB/s  urls                    -5.2%
+       BM_UCordValidate/2                      1663      1621  72.9GB/s  jpg                    +2.6%
+       BM_UCordValidate/3                     16890     16926  5.2GB/s  pdf                     -0.2%
+       BM_UCordValidate/4                    192365    191984  2.0GB/s  html4                   +0.2%
+       BM_ZFlat/0                            184708    179103  545.3MB/s  html (22.31 %)        +3.1%
+       BM_ZFlat/1                           2293864   2302950  290.7MB/s  urls (47.77 %)        -0.4%
+       BM_ZFlat/2                             52852     47618  2.5GB/s  jpg (99.87 %)          +11.0%
+       BM_ZFlat/3                            100766     96179  935.3MB/s  pdf (82.07 %)         +4.8%
+       BM_ZFlat/4                            741220    727977  536.6MB/s  html4 (22.51 %)       +1.8%
+       BM_ZFlat/5                             85402     85418  274.7MB/s  cp (48.12 %)          -0.0%
+       BM_ZFlat/6                             36558     36494  291.4MB/s  c (42.40 %)           +0.2%
+       BM_ZFlat/7                             12706     12507  283.7MB/s  lsp (48.37 %)         +1.6%
+       BM_ZFlat/8                           2336823   2335688  420.5MB/s  xls (41.23 %)         +0.0%
+       BM_ZFlat/9                            701804    681153  212.9MB/s  txt1 (57.87 %)        +3.0%
+       BM_ZFlat/10                           606700    597194  199.9MB/s  txt2 (61.93 %)        +1.6%
+       BM_ZFlat/11                          1852283   1803238  225.7MB/s  txt3 (54.92 %)        +2.7%
+       BM_ZFlat/12                          2475527   2443354  188.1MB/s  txt4 (66.22 %)        +1.3%
+       BM_ZFlat/13                           694497    696654  702.6MB/s  bin (18.11 %)         -0.3%
+       BM_ZFlat/14                           136929    129855  280.8MB/s  sum (48.96 %)         +5.4%
+       BM_ZFlat/15                            17172     17124  235.4MB/s  man (59.36 %)         +0.3%
+       BM_ZFlat/16                           190364    171763  658.4MB/s  pb (19.64 %)         +10.8%
+       BM_ZFlat/17                           567285    555190  316.6MB/s  gaviota (37.72 %)     +2.2%
+       BM_ZCord/0                            193490    187031  522.1MB/s  html                  +3.5%
+       BM_ZCord/1                           2427537   2415315  277.2MB/s  urls                  +0.5%
+       BM_ZCord/2                             85378     81412  1.5GB/s  jpg                     +4.9%
+       BM_ZCord/3                            121898    119419  753.3MB/s  pdf                   +2.1%
+       BM_ZCord/4                            779564    762961  512.0MB/s  html4                 +2.2%
+       BM_ZDataBuffer/0                      213820    207272  471.1MB/s  html                  +3.2%
+       BM_ZDataBuffer/1                     2589010   2586495  258.9MB/s  urls                  +0.1%
+       BM_ZDataBuffer/2                      121871    118885  1018.4MB/s  jpg                  +2.5%
+       BM_ZDataBuffer/3                      145382    145986  616.2MB/s  pdf                   -0.4%
+       BM_ZDataBuffer/4                      868117    852754  458.1MB/s  html4                 +1.8%
+       Sum of all benchmarks               33771833  33744763                                   +0.1%
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@71 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-R=csilvers
-DELTA=5  (3 added, 0 deleted, 2 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1799
-
-------------------------------------------------------------------------
-r34 | snappy.mirrorbot@gmail.com | 2011-05-09 23:28:45 +0200 (Mon, 09 May 2011) | 19 lines
-
-
-Fix public issue #38: Make the microbenchmark framework handle
-properly cases where gettimeofday() can stand return the same
-result twice (as sometimes on GNU/Hurd) or go backwards
-(as when the user adjusts the clock). We avoid a division-by-zero,
-and put a lower bound on the number of iterations -- the same
-amount as we use to calibrate.
-
-We should probably use CLOCK_MONOTONIC for platforms that support
-it, to be robust against clock adjustments; we already use Windows'
-monotonic timers. However, that's for a later changelist.
-
-R=csilvers
-DELTA=7  (5 added, 0 deleted, 2 changed)
-
+commit 81f34784b7b812dcda956ee489dfdc74ec2da990
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Sun Jan 6 19:21:26 2013 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1798
+    Adjust the Snappy open-source distribution for the changes in Google's
+    internal file API.
+    
+    R=sanjay
+    
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@70 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit 698af469b47fe809905e2ed173ad84241de5800f
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Jan 4 11:54:20 2013 +0000
 
-------------------------------------------------------------------------
-r33 | snappy.mirrorbot@gmail.com | 2011-05-04 01:22:52 +0200 (Wed, 04 May 2011) | 11 lines
-
-
-Fix public issue #37: Only link snappy_unittest against -lz and other autodetected
-libraries, not libsnappy.so (which doesn't need any such dependency).
+    Change a few ORs to additions where they don't matter. This helps the compiler
+    use the LEA instruction more efficiently, since e.g. a + (b << 2) can be encoded
+    as one instruction. Even more importantly, it can constant-fold the
+    COPY_* enums together with the shifted negative constants, which also saves
+    some instructions. (We don't need it for LITERAL, since it happens to be 0.)
+    
+    I am unsure why the compiler couldn't do this itself, but the theory is that
+    it cannot prove that len-1 and len-4 cannot underflow/wrap, and thus can't
+    do the optimization safely.
+    
+    The gains are small but measurable; 0.5-1.0% over the BM_Z* benchmarks
+    (measured on Westmere, Sandy Bridge and Istanbul).
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@69 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-R=csilvers
-DELTA=20  (14 added, 0 deleted, 6 changed)
+commit 55209f9b92efd97e0a61be28ed94210de04c3bfc
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Mon Oct 8 11:37:16 2012 +0000
 
+    Stop giving -Werror to automake, due to an incompatibility between current
+    versions of libtool and automake on non-GNU platforms (e.g. Mac OS X).
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@68 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1710
+commit b86e81c8b3426a62d8ab3a7674c2506e9e678740
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Aug 17 13:54:47 2012 +0000
 
-------------------------------------------------------------------------
-r32 | snappy.mirrorbot@gmail.com | 2011-05-04 01:22:33 +0200 (Wed, 04 May 2011) | 11 lines
+    Fix public issue 66: Document GetUncompressedLength better, in particular that
+    it leaves the source in a state that's not appropriate for RawUncompress.
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@67 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
+commit 2e225ba821b420ae28e1d427075d5589c1e892d9
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue Jul 31 11:44:44 2012 +0000
 
-Release Snappy 1.0.2, to get the license change and various other fixes into
-a release.
+    Fix public issue 64: Check for <sys/time.h> at configure time,
+    since MSVC seemingly does not have it.
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@66 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-R=csilvers
-DELTA=239  (236 added, 0 deleted, 3 changed)
+commit e89f20ab46ee11050760c6d57f05c2a3825a911c
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Jul 4 09:34:48 2012 +0000
 
+    Handle the case where gettimeofday() goes backwards or returns the same value
+    twice; it could cause division by zero in the unit test framework.
+    (We already had one fix for this in place, but it was incomplete.)
+    
+    This could in theory happen on any system, since there are few guarantees
+    about gettimeofday(), but seems to only happen in practice on GNU/Hurd, where
+    gettimeofday() is cached and only updated ever so often.
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@65 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1709
+commit 3ec60ac9878de5d0317ad38fc545080a4bfaa74f
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Jul 4 09:28:33 2012 +0000
 
-------------------------------------------------------------------------
-r31 | snappy.mirrorbot@gmail.com | 2011-04-26 14:34:55 +0200 (Tue, 26 Apr 2011) | 15 lines
+    Mark ARMv4 as not supporting unaligned accesses (not just ARMv5 and ARMv6);
+    apparently Debian still targets these by default, giving us segfaults on
+    armel.
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@64 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
+commit be80d6f74f9d82220e952a54f3f129aae1f13f95
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue May 22 09:46:05 2012 +0000
 
-Fix public issue #30: Stop using gettimeofday() altogether on Win32,
-as MSVC doesn't include it. Replace with QueryPerformanceCounter(),
-which is monotonic and probably reasonably high-resolution.
-(Some machines have traditionally had bugs in QPC, but they should
-be relatively rare these days, and there's really no much better
-alternative that I know of.)
+    Fix public bug #62: Remove an extraneous comma at the end of an enum list,
+    causing compile errors when embedded in Mozilla on OpenBSD.
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@63 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit 8b95464146dddab1c7068f879162db9a885cdafe
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue May 22 09:32:50 2012 +0000
 
-R=csilvers
-DELTA=74  (55 added, 19 deleted, 0 changed)
+    Snappy library no longer depends on iostream.
+    
+    Achieved by moving logging macro definitions to a test-only
+    header file, and by changing non-test code to use assert,
+    fprintf, and abort instead of LOG/CHECK macros.
+    
+    R=sesse
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@62 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
+commit fc723b212d6972af7051261754770b3f70a7dc03
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Feb 24 15:46:37 2012 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1556
+    Release Snappy 1.0.5.
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@61 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit dc63e0ad9693e13390ba31b00d92ecccaf7605c3
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Thu Feb 23 17:00:36 2012 +0000
 
-------------------------------------------------------------------------
-r30 | snappy.mirrorbot@gmail.com | 2011-04-26 14:34:37 +0200 (Tue, 26 Apr 2011) | 11 lines
-
-
-Fix public issue #31: Don't reset PATH in autogen.sh; instead, do the trickery
-we need for our own build system internally.
-
-R=csilvers
-DELTA=16  (13 added, 1 deleted, 2 changed)
-
+    For 32-bit platforms, do not try to accelerate multiple neighboring
+    32-bit loads with a 64-bit load during compression (it's not a win).
+    
+    The main target for this optimization is ARM, but 32-bit x86 gets
+    a small gain, too, although there is noise in the microbenchmarks.
+    It's a no-op for 64-bit x86. It does not affect decompression.
+    
+    Microbenchmark results on a Cortex-A9 1GHz, using g++ 4.6.2 (from
+    Ubuntu/Linaro), -O2 -DNDEBUG -Wa,-march=armv7a -mtune=cortex-a9
+    -mthumb-interwork, minimum 1000 iterations:
+    
+      Benchmark            Time(ns)    CPU(ns) Iterations
+      ---------------------------------------------------
+      BM_ZFlat/0            1158277    1160000       1000 84.2MB/s  html (23.57 %)    [ +4.3%]
+      BM_ZFlat/1           14861782   14860000       1000 45.1MB/s  urls (50.89 %)    [ +1.1%]
+      BM_ZFlat/2             393595     390000       1000 310.5MB/s  jpg (99.88 %)    [ +0.0%]
+      BM_ZFlat/3             650583     650000       1000 138.4MB/s  pdf (82.13 %)    [ +3.1%]
+      BM_ZFlat/4            4661480    4660000       1000 83.8MB/s  html4 (23.55 %)   [ +4.3%]
+      BM_ZFlat/5             491973     490000       1000 47.9MB/s  cp (48.12 %)      [ +2.0%]
+      BM_ZFlat/6             193575     192678       1038 55.2MB/s  c (42.40 %)       [ +9.0%]
+      BM_ZFlat/7              62343      62754       3187 56.5MB/s  lsp (48.37 %)     [ +2.6%]
+      BM_ZFlat/8           17708468   17710000       1000 55.5MB/s  xls (41.34 %)     [ -0.3%]
+      BM_ZFlat/9            3755345    3760000       1000 38.6MB/s  txt1 (59.81 %)    [ +8.2%]
+      BM_ZFlat/10           3324217    3320000       1000 36.0MB/s  txt2 (64.07 %)    [ +4.2%]
+      BM_ZFlat/11          10139932   10140000       1000 40.1MB/s  txt3 (57.11 %)    [ +6.4%]
+      BM_ZFlat/12          13532109   13530000       1000 34.0MB/s  txt4 (68.35 %)    [ +5.0%]
+      BM_ZFlat/13           4690847    4690000       1000 104.4MB/s  bin (18.21 %)    [ +4.1%]
+      BM_ZFlat/14            830682     830000       1000 43.9MB/s  sum (51.88 %)     [ +1.2%]
+      BM_ZFlat/15             84784      85011       2235 47.4MB/s  man (59.36 %)     [ +1.1%]
+      BM_ZFlat/16           1293254    1290000       1000 87.7MB/s  pb (23.15 %)      [ +2.3%]
+      BM_ZFlat/17           2775155    2780000       1000 63.2MB/s  gaviota (38.27 %) [+12.2%]
+    
+    Core i7 in 32-bit mode (only one run and 100 iterations, though, so noisy):
+    
+      Benchmark            Time(ns)    CPU(ns) Iterations
+      ---------------------------------------------------
+      BM_ZFlat/0             227582     223464       3043 437.0MB/s  html (23.57 %)    [ +7.4%]
+      BM_ZFlat/1            2982430    2918455        233 229.4MB/s  urls (50.89 %)    [ +2.9%]
+      BM_ZFlat/2              46967      46658      15217 2.5GB/s  jpg (99.88 %)       [ +0.0%]
+      BM_ZFlat/3             115298     114864       5833 783.2MB/s  pdf (82.13 %)     [ +1.5%]
+      BM_ZFlat/4             913440     899743        778 434.2MB/s  html4 (23.55 %)   [ +0.3%]
+      BM_ZFlat/5             110302     108571       7000 216.1MB/s  cp (48.12 %)      [ +0.0%]
+      BM_ZFlat/6              44409      43372      15909 245.2MB/s  c (42.40 %)       [ +0.8%]
+      BM_ZFlat/7              15713      15643      46667 226.9MB/s  lsp (48.37 %)     [ +2.7%]
+      BM_ZFlat/8            2625539    2602230        269 377.4MB/s  xls (41.34 %)     [ +1.4%]
+      BM_ZFlat/9             808884     811429        875 178.8MB/s  txt1 (59.81 %)    [ -3.9%]
+      BM_ZFlat/10            709532     700000       1000 170.5MB/s  txt2 (64.07 %)    [ +0.0%]
+      BM_ZFlat/11           2177682    2162162        333 188.2MB/s  txt3 (57.11 %)    [ -1.4%]
+      BM_ZFlat/12           2849640    2840000        250 161.8MB/s  txt4 (68.35 %)    [ -1.4%]
+      BM_ZFlat/13            849760     835476        778 585.8MB/s  bin (18.21 %)     [ +1.2%]
+      BM_ZFlat/14            165940     164571       4375 221.6MB/s  sum (51.88 %)     [ +1.4%]
+      BM_ZFlat/15             20939      20571      35000 196.0MB/s  man (59.36 %)     [ +2.1%]
+      BM_ZFlat/16            239209     236544       2917 478.1MB/s  pb (23.15 %)      [ +4.2%]
+      BM_ZFlat/17            616206     610000       1000 288.2MB/s  gaviota (38.27 %) [ -1.6%]
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@60 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1555
-
-------------------------------------------------------------------------
-r29 | snappy.mirrorbot@gmail.com | 2011-04-16 00:55:56 +0200 (Sat, 16 Apr 2011) | 12 lines
-
-
-When including <windows.h>, define WIN32_LEAN_AND_MEAN first,
-so we won't pull in macro definitions of things like min() and max(),
-which can conflict with <algorithm>.
-
-R=csilvers
-DELTA=1  (1 added, 0 deleted, 0 changed)
-
+commit f8829ea39d51432ba4e6a26ddaec57acea779f4c
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue Feb 21 17:02:17 2012 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1485
-
-------------------------------------------------------------------------
-r28 | snappy.mirrorbot@gmail.com | 2011-04-11 11:07:01 +0200 (Mon, 11 Apr 2011) | 15 lines
-
-
-Fix public issue #29: Write CPU timing code for Windows, based on GetProcessTimes()
-instead of getursage().
+    Enable the use of unaligned loads and stores for ARM-based architectures
+    where they are available (ARMv7 and higher). This gives a significant
+    speed boost on ARM, both for compression and decompression.
+    It should not affect x86 at all.
+    
+    There are more changes possible to speed up ARM, but it might not be
+    that easy to do without hurting x86 or making the code uglier.
+    Also, we de not try to use NEON yet.
+    
+    Microbenchmark results on a Cortex-A9 1GHz, using g++ 4.6.2 (from Ubuntu/Linaro),
+    -O2 -DNDEBUG -Wa,-march=armv7a -mtune=cortex-a9 -mthumb-interwork:
+    
+    Benchmark            Time(ns)    CPU(ns) Iterations
+    ---------------------------------------------------
+    BM_UFlat/0             524806     529100        378 184.6MB/s  html            [+33.6%]
+    BM_UFlat/1            5139790    5200000        100 128.8MB/s  urls            [+28.8%]
+    BM_UFlat/2              86540      84166       1901 1.4GB/s  jpg               [ +0.6%]
+    BM_UFlat/3             215351     210176        904 428.0MB/s  pdf             [+29.8%]
+    BM_UFlat/4            2144490    2100000        100 186.0MB/s  html4           [+33.3%]
+    BM_UFlat/5             194482     190000       1000 123.5MB/s  cp              [+36.2%]
+    BM_UFlat/6              91843      90175       2107 117.9MB/s  c               [+38.6%]
+    BM_UFlat/7              28535      28426       6684 124.8MB/s  lsp             [+34.7%]
+    BM_UFlat/8            9206600    9200000        100 106.7MB/s  xls             [+42.4%]
+    BM_UFlat/9            1865273    1886792        106 76.9MB/s  txt1             [+32.5%]
+    BM_UFlat/10           1576809    1587301        126 75.2MB/s  txt2             [+32.3%]
+    BM_UFlat/11           4968450    4900000        100 83.1MB/s  txt3             [+32.7%]
+    BM_UFlat/12           6673970    6700000        100 68.6MB/s  txt4             [+32.8%]
+    BM_UFlat/13           2391470    2400000        100 203.9MB/s  bin             [+29.2%]
+    BM_UFlat/14            334601     344827        522 105.8MB/s  sum             [+30.6%]
+    BM_UFlat/15             37404      38080       5252 105.9MB/s  man             [+33.8%]
+    BM_UFlat/16            535470     540540        370 209.2MB/s  pb              [+31.2%]
+    BM_UFlat/17           1875245    1886792        106 93.2MB/s  gaviota          [+37.8%]
+    BM_UValidate/0         178425     179533       1114 543.9MB/s  html            [ +2.7%]
+    BM_UValidate/1        2100450    2000000        100 334.8MB/s  urls            [ +5.0%]
+    BM_UValidate/2           1039       1044     172413 113.3GB/s  jpg             [ +3.4%]
+    BM_UValidate/3          59423      59470       3363 1.5GB/s  pdf               [ +7.8%]
+    BM_UValidate/4         760716     766283        261 509.8MB/s  html4           [ +6.5%]
+    BM_ZFlat/0            1204632    1204819        166 81.1MB/s  html (23.57 %)   [+32.8%]
+    BM_ZFlat/1           15656190   15600000        100 42.9MB/s  urls (50.89 %)   [+27.6%]
+    BM_ZFlat/2             403336     410677        487 294.8MB/s  jpg (99.88 %)   [+16.5%]
+    BM_ZFlat/3             664073     671140        298 134.0MB/s  pdf (82.13 %)   [+28.4%]
+    BM_ZFlat/4            4961940    4900000        100 79.7MB/s  html4 (23.55 %)  [+30.6%]
+    BM_ZFlat/5             500664     501253        399 46.8MB/s  cp (48.12 %)     [+33.4%]
+    BM_ZFlat/6             217276     215982        926 49.2MB/s  c (42.40 %)      [+25.0%]
+    BM_ZFlat/7              64122      65487       3054 54.2MB/s  lsp (48.37 %)    [+36.1%]
+    BM_ZFlat/8           18045730   18000000        100 54.6MB/s  xls (41.34 %)    [+34.4%]
+    BM_ZFlat/9            4051530    4000000        100 36.3MB/s  txt1 (59.81 %)   [+25.0%]
+    BM_ZFlat/10           3451800    3500000        100 34.1MB/s  txt2 (64.07 %)   [+25.7%]
+    BM_ZFlat/11          11052340   11100000        100 36.7MB/s  txt3 (57.11 %)   [+24.3%]
+    BM_ZFlat/12          14538690   14600000        100 31.5MB/s  txt4 (68.35 %)   [+24.7%]
+    BM_ZFlat/13           5041850    5000000        100 97.9MB/s  bin (18.21 %)    [+32.0%]
+    BM_ZFlat/14            908840     909090        220 40.1MB/s  sum (51.88 %)    [+22.2%]
+    BM_ZFlat/15             86921      86206       1972 46.8MB/s  man (59.36 %)    [+42.2%]
+    BM_ZFlat/16           1312315    1315789        152 86.0MB/s  pb (23.15 %)     [+34.5%]
+    BM_ZFlat/17           3173120    3200000        100 54.9MB/s  gaviota (38.27%) [+28.1%]
+    
+    
+    The move from 64-bit to 32-bit operations for the copies also affected 32-bit x86;
+    positive on the decompression side, and slightly negative on the compression side
+    (unless that is noise; I only ran once):
+    
+    Benchmark              Time(ns)    CPU(ns) Iterations
+    -----------------------------------------------------
+    BM_UFlat/0                86279      86140       7778 1.1GB/s  html             [ +7.5%]
+    BM_UFlat/1               839265     822622        778 813.9MB/s  urls           [ +9.4%]
+    BM_UFlat/2                 9180       9143      87500 12.9GB/s  jpg             [ +1.2%]
+    BM_UFlat/3                35080      35000      20000 2.5GB/s  pdf              [+10.1%]
+    BM_UFlat/4               350318     345000       2000 1.1GB/s  html4            [ +7.0%]
+    BM_UFlat/5                33808      33472      21212 701.0MB/s  cp             [ +9.0%]
+    BM_UFlat/6                15201      15214      46667 698.9MB/s  c              [+14.9%]
+    BM_UFlat/7                 4652       4651     159091 762.9MB/s  lsp            [ +7.5%]
+    BM_UFlat/8              1285551    1282528        538 765.7MB/s  xls            [+10.7%]
+    BM_UFlat/9               282510     281690       2414 514.9MB/s  txt1           [+13.6%]
+    BM_UFlat/10              243494     239286       2800 498.9MB/s  txt2           [+14.4%]
+    BM_UFlat/11              743625     740000       1000 550.0MB/s  txt3           [+14.3%]
+    BM_UFlat/12              999441     989717        778 464.3MB/s  txt4           [+16.1%]
+    BM_UFlat/13              412402     410076       1707 1.2GB/s  bin              [ +7.3%]
+    BM_UFlat/14               54876      54000      10000 675.3MB/s  sum            [+13.0%]
+    BM_UFlat/15                6146       6100     100000 660.8MB/s  man            [+14.8%]
+    BM_UFlat/16               90496      90286       8750 1.2GB/s  pb               [ +4.0%]
+    BM_UFlat/17              292650     292000       2500 602.0MB/s  gaviota        [+18.1%]
+    BM_UValidate/0            49620      49699      14286 1.9GB/s  html             [ +0.0%]
+    BM_UValidate/1           501371     500000       1000 1.3GB/s  urls             [ +0.0%]
+    BM_UValidate/2              232        227    3043478 521.5GB/s  jpg            [ +1.3%]
+    BM_UValidate/3            17250      17143      43750 5.1GB/s  pdf              [ -1.3%]
+    BM_UValidate/4           198643     200000       3500 1.9GB/s  html4            [ -0.9%]
+    BM_ZFlat/0               227128     229415       3182 425.7MB/s  html (23.57 %) [ -1.4%]
+    BM_ZFlat/1              2970089    2960000        250 226.2MB/s  urls (50.89 %) [ -1.9%]
+    BM_ZFlat/2                45683      44999      15556 2.6GB/s  jpg (99.88 %)    [ +2.2%]
+    BM_ZFlat/3               114661     113136       6364 795.1MB/s  pdf (82.13 %)  [ -1.5%]
+    BM_ZFlat/4               919702     914286        875 427.2MB/s  html4 (23.55%) [ -1.3%]
+    BM_ZFlat/5               108189     108422       6364 216.4MB/s  cp (48.12 %)   [ -1.2%]
+    BM_ZFlat/6                44525      44000      15909 241.7MB/s  c (42.40 %)    [ -2.9%]
+    BM_ZFlat/7                15973      15857      46667 223.8MB/s  lsp (48.37 %)  [ +0.0%]
+    BM_ZFlat/8              2677888    2639405        269 372.1MB/s  xls (41.34 %)  [ -1.4%]
+    BM_ZFlat/9               800715     780000       1000 186.0MB/s  txt1 (59.81 %) [ -0.4%]
+    BM_ZFlat/10              700089     700000       1000 170.5MB/s  txt2 (64.07 %) [ -2.9%]
+    BM_ZFlat/11             2159356    2138365        318 190.3MB/s  txt3 (57.11 %) [ -0.3%]
+    BM_ZFlat/12             2796143    2779923        259 165.3MB/s  txt4 (68.35 %) [ -1.4%]
+    BM_ZFlat/13              856458     835476        778 585.8MB/s  bin (18.21 %)  [ -0.1%]
+    BM_ZFlat/14              166908     166857       4375 218.6MB/s  sum (51.88 %)  [ -1.4%]
+    BM_ZFlat/15               21181      20857      35000 193.3MB/s  man (59.36 %)  [ -0.8%]
+    BM_ZFlat/16              244009     239973       2917 471.3MB/s  pb (23.15 %)   [ -1.4%]
+    BM_ZFlat/17              596362     590000       1000 297.9MB/s  gaviota (38.27%) [ +0.0%]
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@59 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-I thought I'd already committed this patch, so that the 1.0.1 release already
-would have a Windows-compatible snappy_unittest, but I'd seemingly deleted it
-instead, so this is a reconstruction.
-
-R=csilvers
-DELTA=43  (39 added, 3 deleted, 1 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1295
-
-------------------------------------------------------------------------
-r27 | snappy.mirrorbot@gmail.com | 2011-04-08 11:51:53 +0200 (Fri, 08 Apr 2011) | 22 lines
-
-
-Include C bindings of Snappy, contributed by Martin Gieseking.
+commit f2e184f638bdc7905f26c24faaf10fc0f5d33403
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Sat Feb 11 22:11:22 2012 +0000
 
-I've made a few changes since Martin's version; mostly style nits, but also
-a semantic change -- most functions that return bool in the C++ version now
-return an enum, to better match typical C (and zlib) semantics.
+    Lower the size allocated in the "corrupted input" unit test from 256 MB
+    to 2 MB. This fixes issues with running the unit test on platforms with
+    little RAM (e.g. some ARM boards).
+    
+    Also, reactivate the 2 MB test for 64-bit platforms; there's no good
+    reason why it shouldn't be.
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@58 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-I've kept the copyright notice, since Martin is obviously the author here;
-he has signed the contributor license agreement, though, so this should not
-hinder Google's use in the future.
+commit e750dc0f054ba74b0ce76dd2013e6728cc7a41c5
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Sun Jan 8 17:55:48 2012 +0000
+
+    Minor refactoring to accomodate changes in Google's internal code tree.
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@57 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit d9068ee301bdf893a4d8cb7c6518eacc44c4c1f2
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Jan 4 13:10:46 2012 +0000
 
-We'll need to update the libtool version number to match the added interface,
-but as of http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
-I'm going to wait until public release.
-
-R=csilvers
-DELTA=238  (233 added, 0 deleted, 5 changed)
+    Fix public issue r57: Fix most warnings with -Wall, mostly signed/unsigned
+    warnings. There are still some in the unit test, but the main .cc file should
+    be clean. We haven't enabled -Wall for the default build, since the unit test
+    is still not clean.
+    
+    This also fixes a real bug in the open-source implementation of
+    ReadFileToStringOrDie(); it would not detect errors correctly.
+    
+    I had to go through some pains to avoid performance loss as the types
+    were changed; I think there might still be some with 32-bit if and only if LFS
+    is enabled (ie., size_t is 64-bit), but for regular 32-bit and 64-bit I can't
+    see any losses, and I've diffed the generated GCC assembler between the old and
+    new code without seeing any significant choices. If anything, it's ever so
+    slightly faster.
+    
+    This may or may not enable compression of very large blocks (>2^32 bytes)
+    when size_t is 64-bit, but I haven't checked, and it is still not a supported
+    case.
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@56 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1294
+commit 0755c815197dacc77d8971ae917c86d7aa96bf8e
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Jan 4 10:46:39 2012 +0000
 
-------------------------------------------------------------------------
-r26 | snappy.mirrorbot@gmail.com | 2011-04-07 18:36:43 +0200 (Thu, 07 Apr 2011) | 13 lines
+    Add a framing format description. We do not have any implementation of this at
+    the current point, but there seems to be enough of a general interest in the
+    topic (cf. public bug #34).
+    
+    R=csilvers,sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@55 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
+commit d7eb2dc4133794b62cba691f9be40d1549bc32e2
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Mon Dec 5 21:27:26 2011 +0000
 
-Replace geo.protodata with a newer version.
-
-The data compresses/decompresses slightly faster than the old data, and has
-similar density.
-
-R=lookingbill
-DELTA=1  (0 added, 0 deleted, 1 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1288
+    Speed up decompression by moving the refill check to the end of the loop.
+    
+    This seems to work because in most of the branches, the compiler can evaluate
+    “ip_limit_ - ip” in a more efficient way than reloading ip_limit_ from memory
+    (either by already having the entire expression in a register, or reconstructing
+    it from “avail”, or something else). Memory loads, even from L1, are seemingly
+    costly in the big picture at the current decompression speeds.
+    
+    Microbenchmarks (64-bit, opt mode):
+    
+    Westmere (Intel Core i7):
+    
+      Benchmark     Time(ns)    CPU(ns) Iterations
+      --------------------------------------------
+      BM_UFlat/0       74492      74491     187894 1.3GB/s  html      [ +5.9%]
+      BM_UFlat/1      712268     712263      19644 940.0MB/s  urls    [ +3.8%]
+      BM_UFlat/2       10591      10590    1000000 11.2GB/s  jpg      [ -6.8%]
+      BM_UFlat/3       29643      29643     469915 3.0GB/s  pdf       [ +7.9%]
+      BM_UFlat/4      304669     304667      45930 1.3GB/s  html4     [ +4.8%]
+      BM_UFlat/5       28508      28507     490077 823.1MB/s  cp      [ +4.0%]
+      BM_UFlat/6       12415      12415    1000000 856.5MB/s  c       [ +8.6%]
+      BM_UFlat/7        3415       3415    4084723 1039.0MB/s  lsp    [+18.0%]
+      BM_UFlat/8      979569     979563      14261 1002.5MB/s  xls    [ +5.8%]
+      BM_UFlat/9      230150     230148      60934 630.2MB/s  txt1    [ +5.2%]
+      BM_UFlat/10     197167     197166      71135 605.5MB/s  txt2    [ +4.7%]
+      BM_UFlat/11     607394     607390      23041 670.1MB/s  txt3    [ +5.6%]
+      BM_UFlat/12     808502     808496      17316 568.4MB/s  txt4    [ +5.0%]
+      BM_UFlat/13     372791     372788      37564 1.3GB/s  bin       [ +3.3%]
+      BM_UFlat/14      44541      44541     313969 818.8MB/s  sum     [ +5.7%]
+      BM_UFlat/15       4833       4833    2898697 834.1MB/s  man     [ +4.8%]
+      BM_UFlat/16      79855      79855     175356 1.4GB/s  pb        [ +4.8%]
+      BM_UFlat/17     245845     245843      56838 715.0MB/s  gaviota [ +5.8%]
+    
+    Clovertown (Intel Core 2):
+    
+      Benchmark     Time(ns)    CPU(ns) Iterations
+      --------------------------------------------
+      BM_UFlat/0      107911     107890     100000 905.1MB/s  html    [ +2.2%]
+      BM_UFlat/1     1011237    1011041      10000 662.3MB/s  urls    [ +2.5%]
+      BM_UFlat/2       26775      26770     523089 4.4GB/s  jpg       [ +0.0%]
+      BM_UFlat/3       48103      48095     290618 1.8GB/s  pdf       [ +3.4%]
+      BM_UFlat/4      437724     437644      31937 892.6MB/s  html4   [ +2.1%]
+      BM_UFlat/5       39607      39600     358284 592.5MB/s  cp      [ +2.4%]
+      BM_UFlat/6       18227      18224     768191 583.5MB/s  c       [ +2.7%]
+      BM_UFlat/7        5171       5170    2709437 686.4MB/s  lsp     [ +3.9%]
+      BM_UFlat/8     1560291    1559989       8970 629.5MB/s  xls     [ +3.6%]
+      BM_UFlat/9      335401     335343      41731 432.5MB/s  txt1    [ +3.0%]
+      BM_UFlat/10     287014     286963      48758 416.0MB/s  txt2    [ +2.8%]
+      BM_UFlat/11     888522     888356      15752 458.1MB/s  txt3    [ +2.9%]
+      BM_UFlat/12    1186600    1186378      10000 387.3MB/s  txt4    [ +3.1%]
+      BM_UFlat/13     572295     572188      24468 855.4MB/s  bin     [ +2.1%]
+      BM_UFlat/14      64060      64049     218401 569.4MB/s  sum     [ +4.1%]
+      BM_UFlat/15       7264       7263    1916168 555.0MB/s  man     [ +1.4%]
+      BM_UFlat/16     108853     108836     100000 1039.1MB/s  pb     [ +1.7%]
+      BM_UFlat/17     364289     364223      38419 482.6MB/s  gaviota [ +4.9%]
+    
+    Barcelona (AMD Opteron):
+    
+      Benchmark     Time(ns)    CPU(ns) Iterations
+      --------------------------------------------
+      BM_UFlat/0      103900     103871     100000 940.2MB/s  html    [ +8.3%]
+      BM_UFlat/1     1000435    1000107      10000 669.5MB/s  urls    [ +6.6%]
+      BM_UFlat/2       24659      24652     567362 4.8GB/s  jpg       [ +0.1%]
+      BM_UFlat/3       48206      48193     291121 1.8GB/s  pdf       [ +5.0%]
+      BM_UFlat/4      421980     421850      33174 926.0MB/s  html4   [ +7.3%]
+      BM_UFlat/5       40368      40357     346994 581.4MB/s  cp      [ +8.7%]
+      BM_UFlat/6       19836      19830     708695 536.2MB/s  c       [ +8.0%]
+      BM_UFlat/7        6100       6098    2292774 581.9MB/s  lsp     [ +9.0%]
+      BM_UFlat/8     1693093    1692514       8261 580.2MB/s  xls     [ +8.0%]
+      BM_UFlat/9      365991     365886      38225 396.4MB/s  txt1    [ +7.1%]
+      BM_UFlat/10     311330     311238      44950 383.6MB/s  txt2    [ +7.6%]
+      BM_UFlat/11     975037     974737      14376 417.5MB/s  txt3    [ +6.9%]
+      BM_UFlat/12    1303558    1303175      10000 352.6MB/s  txt4    [ +7.3%]
+      BM_UFlat/13     517448     517290      27144 946.2MB/s  bin     [ +5.5%]
+      BM_UFlat/14      66537      66518     210352 548.3MB/s  sum     [ +7.5%]
+      BM_UFlat/15       7976       7974    1760383 505.6MB/s  man     [ +5.6%]
+      BM_UFlat/16     103121     103092     100000 1097.0MB/s  pb     [ +8.7%]
+      BM_UFlat/17     391431     391314      35733 449.2MB/s  gaviota [ +6.5%]
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@54 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-------------------------------------------------------------------------
-r25 | snappy.mirrorbot@gmail.com | 2011-03-30 22:27:53 +0200 (Wed, 30 Mar 2011) | 12 lines
-
-
-Fix public issue #27: Add HAVE_CONFIG_H tests around the config.h
-inclusion in snappy-stubs-internal.h, which eases compiling outside the
-automake/autoconf framework.
-
-R=csilvers
-DELTA=5  (4 added, 1 deleted, 0 changed)
-
+commit 5ed51ce15fc4ff8d2f7235704eb6b0c3f762fb88
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Nov 23 11:14:17 2011 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1152
-
-------------------------------------------------------------------------
-r24 | snappy.mirrorbot@gmail.com | 2011-03-30 22:27:39 +0200 (Wed, 30 Mar 2011) | 13 lines
-
-
-Fix public issue #26: Take memory allocation and reallocation entirely out of the
-Measure() loop. This gives all algorithms a small speed boost, except Snappy which
-already didn't do reallocation (so the measurements were slightly biased in its
-favor).
-
-R=csilvers
-DELTA=92  (69 added, 9 deleted, 14 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1151
-
-------------------------------------------------------------------------
-r23 | snappy.mirrorbot@gmail.com | 2011-03-30 22:25:09 +0200 (Wed, 30 Mar 2011) | 18 lines
-
+    Speed up decompression by making the fast path for literals faster.
+    
+    We do the fast-path step as soon as possible; in fact, as soon as we know the
+    literal length. Since we usually hit the fast path, we can then skip the checks
+    for long literals and available input space (beyond what the fast path check
+    already does).
+    
+    Note that this changes the decompression Writer API; however, it does not
+    change the ABI, since writers are always templatized and as such never
+    cross compilation units. The new API is slightly more general, in that it
+    doesn't hard-code the value 16. Note that we also take care to check
+    for len <= 16 first, since the other two checks almost always succeed
+    (so we don't want to waste time checking for them until we have to).
+    
+    The improvements are most marked on Nehalem, but are generally positive
+    on other platforms as well. All microbenchmarks are 64-bit, opt.
+    
+    Clovertown (Core 2):
+    
+      Benchmark     Time(ns)    CPU(ns) Iterations
+      --------------------------------------------
+      BM_UFlat/0      110226     110224     100000 886.0MB/s  html    [ +1.5%]
+      BM_UFlat/1     1036523    1036508      10000 646.0MB/s  urls    [ -0.8%]
+      BM_UFlat/2       26775      26775     522570 4.4GB/s  jpg       [ +0.0%]
+      BM_UFlat/3       49738      49737     280974 1.8GB/s  pdf       [ +0.3%]
+      BM_UFlat/4      446790     446792      31334 874.3MB/s  html4   [ +0.8%]
+      BM_UFlat/5       40561      40562     350424 578.5MB/s  cp      [ +1.3%]
+      BM_UFlat/6       18722      18722     746903 568.0MB/s  c       [ +1.4%]
+      BM_UFlat/7        5373       5373    2608632 660.5MB/s  lsp     [ +8.3%]
+      BM_UFlat/8     1615716    1615718       8670 607.8MB/s  xls     [ +2.0%]
+      BM_UFlat/9      345278     345281      40481 420.1MB/s  txt1    [ +1.4%]
+      BM_UFlat/10     294855     294855      47452 404.9MB/s  txt2    [ +1.6%]
+      BM_UFlat/11     914263     914263      15316 445.2MB/s  txt3    [ +1.1%]
+      BM_UFlat/12    1222694    1222691      10000 375.8MB/s  txt4    [ +1.4%]
+      BM_UFlat/13     584495     584489      23954 837.4MB/s  bin     [ -0.6%]
+      BM_UFlat/14      66662      66662     210123 547.1MB/s  sum     [ +1.2%]
+      BM_UFlat/15       7368       7368    1881856 547.1MB/s  man     [ +4.0%]
+      BM_UFlat/16     110727     110726     100000 1021.4MB/s  pb     [ +2.3%]
+      BM_UFlat/17     382138     382141      36616 460.0MB/s  gaviota [ -0.7%]
+    
+    Westmere (Core i7):
+    
+      Benchmark     Time(ns)    CPU(ns) Iterations
+      --------------------------------------------
+      BM_UFlat/0       78861      78853     177703 1.2GB/s  html      [ +2.1%]
+      BM_UFlat/1      739560     739491      18912 905.4MB/s  urls    [ +3.4%]
+      BM_UFlat/2        9867       9866    1419014 12.0GB/s  jpg      [ +3.4%]
+      BM_UFlat/3       31989      31986     438385 2.7GB/s  pdf       [ +0.2%]
+      BM_UFlat/4      319406     319380      43771 1.2GB/s  html4     [ +1.9%]
+      BM_UFlat/5       29639      29636     472862 791.7MB/s  cp      [ +5.2%]
+      BM_UFlat/6       13478      13477    1000000 789.0MB/s  c       [ +2.3%]
+      BM_UFlat/7        4030       4029    3475364 880.7MB/s  lsp     [ +8.7%]
+      BM_UFlat/8     1036585    1036492      10000 947.5MB/s  xls     [ +6.9%]
+      BM_UFlat/9      242127     242105      57838 599.1MB/s  txt1    [ +3.0%]
+      BM_UFlat/10     206499     206480      67595 578.2MB/s  txt2    [ +3.4%]
+      BM_UFlat/11     641635     641570      21811 634.4MB/s  txt3    [ +2.4%]
+      BM_UFlat/12     848847     848769      16443 541.4MB/s  txt4    [ +3.1%]
+      BM_UFlat/13     384968     384938      36366 1.2GB/s  bin       [ +0.3%]
+      BM_UFlat/14      47106      47101     297770 774.3MB/s  sum     [ +4.4%]
+      BM_UFlat/15       5063       5063    2772202 796.2MB/s  man     [ +7.7%]
+      BM_UFlat/16      83663      83656     167697 1.3GB/s  pb        [ +1.8%]
+      BM_UFlat/17     260224     260198      53823 675.6MB/s  gaviota [ -0.5%]
+    
+    Barcelona (Opteron):
+    
+      Benchmark     Time(ns)    CPU(ns) Iterations
+      --------------------------------------------
+      BM_UFlat/0      112490     112457     100000 868.4MB/s  html    [ -0.4%]
+      BM_UFlat/1     1066719    1066339      10000 627.9MB/s  urls    [ +1.0%]
+      BM_UFlat/2       24679      24672     563802 4.8GB/s  jpg       [ +0.7%]
+      BM_UFlat/3       50603      50589     277285 1.7GB/s  pdf       [ +2.6%]
+      BM_UFlat/4      452982     452849      30900 862.6MB/s  html4   [ -0.2%]
+      BM_UFlat/5       43860      43848     319554 535.1MB/s  cp      [ +1.2%]
+      BM_UFlat/6       21419      21413     653573 496.6MB/s  c       [ +1.0%]
+      BM_UFlat/7        6646       6645    2105405 534.1MB/s  lsp     [ +0.3%]
+      BM_UFlat/8     1828487    1827886       7658 537.3MB/s  xls     [ +2.6%]
+      BM_UFlat/9      391824     391714      35708 370.3MB/s  txt1    [ +2.2%]
+      BM_UFlat/10     334913     334816      41885 356.6MB/s  txt2    [ +1.7%]
+      BM_UFlat/11    1042062    1041674      10000 390.7MB/s  txt3    [ +1.1%]
+      BM_UFlat/12    1398902    1398456      10000 328.6MB/s  txt4    [ +1.7%]
+      BM_UFlat/13     545706     545530      25669 897.2MB/s  bin     [ -0.4%]
+      BM_UFlat/14      71512      71505     196035 510.0MB/s  sum     [ +1.4%]
+      BM_UFlat/15       8422       8421    1665036 478.7MB/s  man     [ +2.6%]
+      BM_UFlat/16     112053     112048     100000 1009.3MB/s  pb     [ -0.4%]
+      BM_UFlat/17     416723     416713      33612 421.8MB/s  gaviota [ -2.0%]
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@53 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Renamed "namespace zippy" to "namespace snappy" to reduce
-the differences from the opensource code.  Will make it easier
-in the future to mix-and-match third-party code that uses
-snappy with google code.
+commit 0c1b9c3904430f5b399bd057d76de4bc36b7a123
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue Nov 8 14:46:39 2011 +0000
+
+    Fix public issue #53: Update the README to the API we actually open-sourced
+    with.
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@52 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Currently, csearch shows that the only external user of
-"namespace zippy" is some bigtable code that accesses
-a TEST variable, which is temporarily kept in the zippy
-namespace.
+commit b61134bc0a6a904b41522b4e5c9e80874c730cef
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Oct 5 12:27:12 2011 +0000
 
-R=sesse
-DELTA=123  (18 added, 3 deleted, 102 changed)
+    In the format description, use a clearer example to emphasize that varints are
+    stored in little-endian. Patch from Christian von Roques.
+    
+    R=csilvers
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@51 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
+commit 21a2e4f55758e759302cd84ad0f3580affcba7d9
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Thu Sep 15 19:34:06 2011 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1150
+    Release Snappy 1.0.4.
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@50 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-------------------------------------------------------------------------
-r22 | snappy.mirrorbot@gmail.com | 2011-03-29 00:17:04 +0200 (Tue, 29 Mar 2011) | 11 lines
-
+commit e2e303286813c759c5b1cdb46dad63c494f0a061
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Thu Sep 15 09:50:05 2011 +0000
 
-Put back the final few lines of what was truncated during the
-license header change.
+    Fix public issue #50: Include generic byteswap macros.
+    Also include Solaris 10 and FreeBSD versions.
+    
+    R=csilvers
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@49 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit 593002da3c051f4721312869f816b41485bad3b7
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Aug 10 18:57:27 2011 +0000
 
-R=csilvers
-DELTA=5  (4 added, 0 deleted, 1 changed)
+    Partially fix public issue 50: Remove an extra comma from the end of some
+    enum declarations, as it seems the Sun compiler does not like it.
+    
+    Based on patch by Travis Vitek.
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@48 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
+commit f1063a5dc43891eed37f0586bfea57b84dddd756
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Aug 10 18:44:16 2011 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1094
+    Use the right #ifdef test for sys/mman.h.
+    
+    Based on patch by Travis Vitek.
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@47 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit 41c827a2fa9ce048202d941187f211180feadde4
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Aug 10 01:22:09 2011 +0000
 
-------------------------------------------------------------------------
-r21 | snappy.mirrorbot@gmail.com | 2011-03-26 03:34:34 +0100 (Sat, 26 Mar 2011) | 20 lines
+    Fix public issue #47: Small comment cleanups in the unit test.
+    
+    Originally based on a patch by Patrick Pelletier.
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@46 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-
-Change on 2011-03-25 19:18:00-07:00 by sesse
+commit 59aeffa6049b5c2a3a467e7602c1f93630b870e7
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Aug 10 01:14:43 2011 +0000
 
-	Replace the Apache 2.0 license header by the BSD-type license header;
-	somehow a lot of the files were missed in the last round.
+    Fix public issue #46: Format description said "3-byte offset"
+    instead of "4-byte offset" for the longest copies.
+    
+    Also fix an inconsistency in the heading for section 2.2.3.
+    Both patches by Patrick Pelletier.
+    
+    R=csilvers
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@45 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-	R=dannyb,csilvers
-	DELTA=147  (74 added, 2 deleted, 71 changed)
+commit 57e7cd72559cb022ef32856f2252a4c4585e562e
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue Jun 28 11:40:25 2011 +0000
 
-Change on 2011-03-25 19:25:07-07:00 by sesse
+    Fix public issue #44: Make the definition and declaration of CompressFragment
+    identical, even regarding cv-qualifiers.
+    
+    This is required to work around a bug in the Solaris Studio C++ compiler
+    (it does not properly disregard cv-qualifiers when doing name mangling).
+    
+    R=sanjay
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@44 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-	Unbreak the build; the relicensing removed a bit too much (only comments
-	were intended, but I also accidentially removed some of the top lines of
-	the actual source).
-
-
+commit 13c4a449a8ea22139c9aa441e8024eebc9dbdf6e
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Sat Jun 4 10:19:05 2011 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1072
-
-------------------------------------------------------------------------
-r20 | snappy.mirrorbot@gmail.com | 2011-03-25 17:14:41 +0100 (Fri, 25 Mar 2011) | 10 lines
+    Correct an inaccuracy in the Snappy format description.
+    (I stumbled into this when changing the way we decompress literals.)
+    
+    R=csilvers
+    
+    Revision created by MOE tool push_codebase.
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@43 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-
-Change Snappy from the Apache 2.0 to a BSD-type license.
-
-R=dannyb
-DELTA=328  (80 added, 184 deleted, 64 changed)
-
+commit f5406737403119e1483a71d2084d17728663a114
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Jun 3 20:53:06 2011 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1061
-
-------------------------------------------------------------------------
-r19 | snappy.mirrorbot@gmail.com | 2011-03-25 01:39:01 +0100 (Fri, 25 Mar 2011) | 11 lines
-
+    Speed up decompression by removing a fast-path attempt.
+    
+    Whenever we try to enter a copy fast-path, there is a certain cost in checking
+    that all the preconditions are in place, but it's normally offset by the fact
+    that we can usually take the cheaper path. However, in a certain path we've
+    already established that "avail < literal_length", which usually means that
+    either the available space is small, or the literal is big. Both will disqualify
+    us from taking the fast path, and thus we take the hit from the precondition
+    checking without gaining much from having a fast path. Thus, simply don't try
+    the fast path in this situation -- we're already on a slow path anyway
+    (one where we need to refill more data from the reader).
+    
+    I'm a bit surprised at how much this gained; it could be that this path is
+    more common than I thought, or that the simpler structure somehow makes the
+    compiler happier. I haven't looked at the assembler, but it's a win across
+    the board on both Core 2, Core i7 and Opteron, at least for the cases we
+    typically care about. The gains seem to be the largest on Core i7, though.
+    Results from my Core i7 workstation:
+    
+    
+      Benchmark            Time(ns)    CPU(ns) Iterations
+      ---------------------------------------------------
+      BM_UFlat/0              73337      73091     190996 1.3GB/s  html      [ +1.7%]
+      BM_UFlat/1             696379     693501      20173 965.5MB/s  urls    [ +2.7%]
+      BM_UFlat/2               9765       9734    1472135 12.1GB/s  jpg      [ +0.7%]
+      BM_UFlat/3              29720      29621     472973 3.0GB/s  pdf       [ +1.8%]
+      BM_UFlat/4             294636     293834      47782 1.3GB/s  html4     [ +2.3%]
+      BM_UFlat/5              28399      28320     494700 828.5MB/s  cp      [ +3.5%]
+      BM_UFlat/6              12795      12760    1000000 833.3MB/s  c       [ +1.2%]
+      BM_UFlat/7               3984       3973    3526448 893.2MB/s  lsp     [ +5.7%]
+      BM_UFlat/8             991996     989322      14141 992.6MB/s  xls     [ +3.3%]
+      BM_UFlat/9             228620     227835      61404 636.6MB/s  txt1    [ +4.0%]
+      BM_UFlat/10            197114     196494      72165 607.5MB/s  txt2    [ +3.5%]
+      BM_UFlat/11            605240     603437      23217 674.4MB/s  txt3    [ +3.7%]
+      BM_UFlat/12            804157     802016      17456 573.0MB/s  txt4    [ +3.9%]
+      BM_UFlat/13            347860     346998      40346 1.4GB/s  bin       [ +1.2%]
+      BM_UFlat/14             44684      44559     315315 818.4MB/s  sum     [ +2.3%]
+      BM_UFlat/15              5120       5106    2739726 789.4MB/s  man     [ +3.3%]
+      BM_UFlat/16             76591      76355     183486 1.4GB/s  pb        [ +2.8%]
+      BM_UFlat/17            238564     237828      58824 739.1MB/s  gaviota [ +1.6%]
+      BM_UValidate/0          42194      42060     333333 2.3GB/s  html      [ -0.1%]
+      BM_UValidate/1         433182     432005      32407 1.5GB/s  urls      [ -0.1%]
+      BM_UValidate/2            197        196   71428571 603.3GB/s  jpg     [ +0.5%]
+      BM_UValidate/3          14494      14462     972222 6.1GB/s  pdf       [ +0.5%]
+      BM_UValidate/4         168444     167836      83832 2.3GB/s  html4     [ +0.1%]
+    
+    R=jeff
+    
+    Revision created by MOE tool push_codebase.
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@42 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Release Snappy 1.0.1, to soup up all the various small changes
-that have been made since release.
-
-R=csilvers
-DELTA=266  (260 added, 0 deleted, 6 changed)
-
+commit 197f3ee9f9397e98c9abf07f9da875fbcb725dba
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Jun 3 20:47:14 2011 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1057
-
-------------------------------------------------------------------------
-r18 | snappy.mirrorbot@gmail.com | 2011-03-24 20:15:54 +0100 (Thu, 24 Mar 2011) | 11 lines
-
-
-Fix a microbenchmark crash on mingw32; seemingly %lld is not universally
-supported on Windows, and %I64d is recommended instead.
+    Speed up decompression by not needing a lookup table for literal items.
+    
+    Looking up into and decoding the values from char_table has long shown up as a
+    hotspot in the decompressor. While it turns out that it's hard to make a more
+    efficient decoder for the copy ops, the literals are simple enough that we can
+    decode them without needing a table lookup. (This means that 1/4 of the table
+    is now unused, although that in itself doesn't buy us anything.)
+    
+    The gains are small, but definitely present; some tests win as much as 10%,
+    but 1-4% is more typical. These results are from Core i7, in 64-bit mode;
+    Core 2 and Opteron show similar results. (I've run with more iterations
+    than unusual to make sure the smaller gains don't drown entirely in noise.)
+    
+      Benchmark            Time(ns)    CPU(ns) Iterations
+      ---------------------------------------------------
+      BM_UFlat/0              74665      74428     182055 1.3GB/s  html      [ +3.1%]
+      BM_UFlat/1             714106     711997      19663 940.4MB/s  urls    [ +4.4%]
+      BM_UFlat/2               9820       9789    1427115 12.1GB/s  jpg      [ -1.2%]
+      BM_UFlat/3              30461      30380     465116 2.9GB/s  pdf       [ +0.8%]
+      BM_UFlat/4             301445     300568      46512 1.3GB/s  html4     [ +2.2%]
+      BM_UFlat/5              29338      29263     479452 801.8MB/s  cp      [ +1.6%]
+      BM_UFlat/6              13004      12970    1000000 819.9MB/s  c       [ +2.1%]
+      BM_UFlat/7               4180       4168    3349282 851.4MB/s  lsp     [ +1.3%]
+      BM_UFlat/8            1026149    1024000      10000 959.0MB/s  xls     [+10.7%]
+      BM_UFlat/9             237441     236830      59072 612.4MB/s  txt1    [ +0.3%]
+      BM_UFlat/10            203966     203298      69307 587.2MB/s  txt2    [ +0.8%]
+      BM_UFlat/11            627230     625000      22400 651.2MB/s  txt3    [ +0.7%]
+      BM_UFlat/12            836188     833979      16787 551.0MB/s  txt4    [ +1.3%]
+      BM_UFlat/13            351904     350750      39886 1.4GB/s  bin       [ +3.8%]
+      BM_UFlat/14             45685      45562     308370 800.4MB/s  sum     [ +5.9%]
+      BM_UFlat/15              5286       5270    2656546 764.9MB/s  man     [ +1.5%]
+      BM_UFlat/16             78774      78544     178117 1.4GB/s  pb        [ +4.3%]
+      BM_UFlat/17            242270     241345      58091 728.3MB/s  gaviota [ +1.2%]
+      BM_UValidate/0          42149      42000     333333 2.3GB/s  html      [ -3.0%]
+      BM_UValidate/1         432741     431303      32483 1.5GB/s  urls      [ +7.8%]
+      BM_UValidate/2            198        197   71428571 600.7GB/s  jpg     [+16.8%]
+      BM_UValidate/3          14560      14521     965517 6.1GB/s  pdf       [ -4.1%]
+      BM_UValidate/4         169065     168671      83832 2.3GB/s  html4     [ -2.9%]
+    
+    R=jeff
+    
+    Revision created by MOE tool push_codebase.
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@41 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-R=csilvers
-DELTA=6  (5 added, 0 deleted, 1 changed)
+commit 8efa2639e885ac467e7b11c662975c5844019fb9
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Thu Jun 2 22:57:41 2011 +0000
 
+    Release Snappy 1.0.3.
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@40 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1034
+commit 2e12124bd87f39296709decc65195fa5bfced538
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Thu Jun 2 18:06:54 2011 +0000
 
-------------------------------------------------------------------------
-r17 | snappy.mirrorbot@gmail.com | 2011-03-24 20:15:27 +0100 (Thu, 24 Mar 2011) | 13 lines
+    Remove an unneeded goto in the decompressor; it turns out that the
+    state of ip_ after decompression (or attempted decompresion) is
+    completely irrelevant, so we don't need the trailer.
+    
+    Performance is, as expected, mostly flat -- there's a curious ~3-5%
+    loss in the "lsp" test, but that test case is so short it is hard to say
+    anything definitive about why (most likely, it's some sort of
+    unrelated effect).
+    
+    R=jeff
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@39 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-
-Fix public issue #19: Fix unit test when Google Test is installed but the
-gflags package isn't (Google Test is not properly initialized).
+commit c266bbf32103f8ed4a83e2272ed3d8828d5b8b34
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Thu Jun 2 17:59:40 2011 +0000
 
-Patch by Martin Gieseking.
-
-R=csilvers
-DELTA=2  (1 added, 0 deleted, 1 changed)
-
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1033
-
-------------------------------------------------------------------------
-r16 | snappy.mirrorbot@gmail.com | 2011-03-24 20:13:57 +0100 (Thu, 24 Mar 2011) | 15 lines
-
+    Speed up decompression by caching ip_.
+    
+    It is seemingly hard for the compiler to understand that ip_, the current input
+    pointer into the compressed data stream, can not alias on anything else, and
+    thus using it directly will incur memory traffic as it cannot be kept in a
+    register. The code already knew about this and cached it into a local
+    variable, but since Step() only decoded one tag, it had to move ip_ back into
+    place between every tag. This seems to have cost us a significant amount of
+    performance, so changing Step() into a function that decodes as much as it can
+    before it saves ip_ back and returns. (Note that Step() was already inlined,
+    so it is not the manual inlining that buys the performance here.)
+    
+    The wins are about 3-6% for Core 2, 6-13% on Core i7 and 5-12% on Opteron
+    (for plain array-to-array decompression, in 64-bit opt mode).
+    
+    There is a tiny difference in the behavior here; if an invalid literal is
+    encountered (ie., the writer refuses the Append() operation), ip_ will now
+    point to the byte past the tag byte, instead of where the literal was
+    originally thought to end. However, we don't use ip_ for anything after
+    DecompressAllTags() has returned, so this should not change external behavior
+    in any way.
+    
+    Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
+    
+    Benchmark            Time(ns)    CPU(ns) Iterations
+    ---------------------------------------------------
+    BM_UFlat/0              79134      79110       8835 1.2GB/s  html      [ +6.2%]
+    BM_UFlat/1             786126     786096        891 851.8MB/s  urls    [+10.0%]
+    BM_UFlat/2               9948       9948      69125 11.9GB/s  jpg      [ -1.3%]
+    BM_UFlat/3              31999      31998      21898 2.7GB/s  pdf       [ +6.5%]
+    BM_UFlat/4             318909     318829       2204 1.2GB/s  html4     [ +6.5%]
+    BM_UFlat/5              31384      31390      22363 747.5MB/s  cp      [ +9.2%]
+    BM_UFlat/6              14037      14034      49858 757.7MB/s  c       [+10.6%]
+    BM_UFlat/7               4612       4612     151395 769.5MB/s  lsp     [ +9.5%]
+    BM_UFlat/8            1203174    1203007        582 816.3MB/s  xls     [+19.3%]
+    BM_UFlat/9             253869     253955       2757 571.1MB/s  txt1    [+11.4%]
+    BM_UFlat/10            219292     219290       3194 544.4MB/s  txt2    [+12.1%]
+    BM_UFlat/11            672135     672131       1000 605.5MB/s  txt3    [+11.2%]
+    BM_UFlat/12            902512     902492        776 509.2MB/s  txt4    [+12.5%]
+    BM_UFlat/13            372110     371998       1881 1.3GB/s  bin       [ +5.8%]
+    BM_UFlat/14             50407      50407      10000 723.5MB/s  sum     [+13.5%]
+    BM_UFlat/15              5699       5701     100000 707.2MB/s  man     [+12.4%]
+    BM_UFlat/16             83448      83424       8383 1.3GB/s  pb        [ +5.7%]
+    BM_UFlat/17            256958     256963       2723 684.1MB/s  gaviota [ +7.9%]
+    BM_UValidate/0          42795      42796      16351 2.2GB/s  html      [+25.8%]
+    BM_UValidate/1         490672     490622       1427 1.3GB/s  urls      [+22.7%]
+    BM_UValidate/2            237        237    2950297 499.0GB/s  jpg     [+24.9%]
+    BM_UValidate/3          14610      14611      47901 6.0GB/s  pdf       [+26.8%]
+    BM_UValidate/4         171973     171990       4071 2.2GB/s  html4     [+25.7%]
+    
+    
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@38 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Make the unit test work on systems without mmap(). This is required for,
-among others, Windows support. For Windows in specific, we could have used
-CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer
-to compiling, and is of course also relevant for embedded systems with no MMU.
+commit d0ee043bc50c62c5b5ff3da044f0b5567257407d
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue May 17 08:48:25 2011 +0000
 
-(Part 2/2)
+    Fix the numbering of the headlines in the Snappy format description.
+    
+    R=csilvers
+    DELTA=4  (0 added, 0 deleted, 4 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1906
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@37 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit 6c7053871fbdb459c9c14287a138d7f82d6d84a1
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Mon May 16 08:59:18 2011 +0000
 
-R=csilvers
-DELTA=15  (12 added, 3 deleted, 0 changed)
+    Fix public issue #32: Add compressed format documentation for Snappy.
+    This text is new, but an earlier version from Zeev Tarantov was used
+    as reference.
+    
+    R=csilvers
+    DELTA=112  (111 added, 0 deleted, 1 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1867
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@36 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
+commit a1f9f9973d127992f341d442969c86fd9a0847c9
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Mon May 9 21:29:02 2011 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1032
+    Fix public issue #39: Pick out the median runs based on CPU time,
+    not real time. Also, use nth_element instead of sort, since we
+    only need one element.
+    
+    R=csilvers
+    DELTA=5  (3 added, 0 deleted, 2 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1799
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@35 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-------------------------------------------------------------------------
-r15 | snappy.mirrorbot@gmail.com | 2011-03-24 20:12:27 +0100 (Thu, 24 Mar 2011) | 15 lines
-
+commit f7b105683c074cdf233740089e245e43f63e7e55
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Mon May 9 21:28:45 2011 +0000
 
-Make the unit test work on systems without mmap(). This is required for,
-among others, Windows support. For Windows in specific, we could have used
-CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer
-to compiling, and is of course also relevant for embedded systems with no MMU.
+    Fix public issue #38: Make the microbenchmark framework handle
+    properly cases where gettimeofday() can stand return the same
+    result twice (as sometimes on GNU/Hurd) or go backwards
+    (as when the user adjusts the clock). We avoid a division-by-zero,
+    and put a lower bound on the number of iterations -- the same
+    amount as we use to calibrate.
+    
+    We should probably use CLOCK_MONOTONIC for platforms that support
+    it, to be robust against clock adjustments; we already use Windows'
+    monotonic timers. However, that's for a later changelist.
+    
+    R=csilvers
+    DELTA=7  (5 added, 0 deleted, 2 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1798
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@34 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-(Part 1/2)
+commit d8d481427a05b88cdb0810c29bf400153595c423
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue May 3 23:22:52 2011 +0000
 
-R=csilvers
-DELTA=9  (8 added, 0 deleted, 1 changed)
+    Fix public issue #37: Only link snappy_unittest against -lz and other autodetected
+    libraries, not libsnappy.so (which doesn't need any such dependency).
+    
+    R=csilvers
+    DELTA=20  (14 added, 0 deleted, 6 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1710
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@33 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
+commit bcecf195c0aeb2c98144d3d54b4d8d228774f50d
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue May 3 23:22:33 2011 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1031
+    Release Snappy 1.0.2, to get the license change and various other fixes into
+    a release.
+    
+    R=csilvers
+    DELTA=239  (236 added, 0 deleted, 3 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1709
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@32 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-------------------------------------------------------------------------
-r14 | snappy.mirrorbot@gmail.com | 2011-03-24 00:17:36 +0100 (Thu, 24 Mar 2011) | 14 lines
-
+commit 84d9f642025cda672dda0d94a8008f094500aaa6
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue Apr 26 12:34:55 2011 +0000
 
-Fix public issue #12: Don't keep autogenerated auto* files in Subversion;
-it causes problems with others sending patches etc..
-
-We can't get this 100% hermetic anyhow, due to files like lt~obsolete.m4,
-so we can just as well go cleanly in the other direction.
+    Fix public issue #30: Stop using gettimeofday() altogether on Win32,
+    as MSVC doesn't include it. Replace with QueryPerformanceCounter(),
+    which is monotonic and probably reasonably high-resolution.
+    (Some machines have traditionally had bugs in QPC, but they should
+    be relatively rare these days, and there's really no much better
+    alternative that I know of.)
+    
+    R=csilvers
+    DELTA=74  (55 added, 19 deleted, 0 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1556
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@31 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-R=csilvers
-DELTA=21038  (0 added, 21036 deleted, 2 changed)
+commit 3d8e71df8d30f980d71d4c784ebfc5ff62d5b0cb
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Tue Apr 26 12:34:37 2011 +0000
 
+    Fix public issue #31: Don't reset PATH in autogen.sh; instead, do the trickery
+    we need for our own build system internally.
+    
+    R=csilvers
+    DELTA=16  (13 added, 1 deleted, 2 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1555
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@30 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=1012
+commit 73987351de54c88e2fc3f5dcdeceb47708df3585
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Apr 15 22:55:56 2011 +0000
 
-------------------------------------------------------------------------
-r13 | snappy.mirrorbot@gmail.com | 2011-03-23 18:50:49 +0100 (Wed, 23 Mar 2011) | 11 lines
+    When including <windows.h>, define WIN32_LEAN_AND_MEAN first,
+    so we won't pull in macro definitions of things like min() and max(),
+    which can conflict with <algorithm>.
+    
+    R=csilvers
+    DELTA=1  (1 added, 0 deleted, 0 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1485
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@29 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-
-Fix public issue tracker bug #3: Call AC_SUBST([LIBTOOL_DEPS]), or the rule
-to rebuild libtool in Makefile.am won't work.
+commit fb7e0eade471a20b009720a84fea0af1552791d5
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Mon Apr 11 09:07:01 2011 +0000
 
-R=csilvers
-DELTA=1  (1 added, 0 deleted, 0 changed)
+    Fix public issue #29: Write CPU timing code for Windows, based on GetProcessTimes()
+    instead of getursage().
+    
+    I thought I'd already committed this patch, so that the 1.0.1 release already
+    would have a Windows-compatible snappy_unittest, but I'd seemingly deleted it
+    instead, so this is a reconstruction.
+    
+    R=csilvers
+    DELTA=43  (39 added, 3 deleted, 1 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1295
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@28 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=997
+commit c67fa0c755a329000da5546fff79089d62ac2f82
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Apr 8 09:51:53 2011 +0000
 
-------------------------------------------------------------------------
-r12 | snappy.mirrorbot@gmail.com | 2011-03-23 12:16:39 +0100 (Wed, 23 Mar 2011) | 11 lines
-
+    Include C bindings of Snappy, contributed by Martin Gieseking.
+    
+    I've made a few changes since Martin's version; mostly style nits, but also
+    a semantic change -- most functions that return bool in the C++ version now
+    return an enum, to better match typical C (and zlib) semantics.
+    
+    I've kept the copyright notice, since Martin is obviously the author here;
+    he has signed the contributor license agreement, though, so this should not
+    hinder Google's use in the future.
+    
+    We'll need to update the libtool version number to match the added interface,
+    but as of http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
+    I'm going to wait until public release.
+    
+    R=csilvers
+    DELTA=238  (233 added, 0 deleted, 5 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1294
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@27 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Fix public issue #10: Don't add GTEST_CPPFLAGS to snappy_unittest_CXXFLAGS;
-it's not needed (CPPFLAGS are always included when compiling).
-
-R=csilvers
-DELTA=1  (0 added, 1 deleted, 0 changed)
-
+commit 56be85cb9ae06f2e92180ae2575bdd10c012ab73
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Thu Apr 7 16:36:43 2011 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=994
+    Replace geo.protodata with a newer version.
+    
+    The data compresses/decompresses slightly faster than the old data, and has
+    similar density.
+    
+    R=lookingbill
+    DELTA=1  (0 added, 0 deleted, 1 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1288
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@26 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-------------------------------------------------------------------------
-r11 | snappy.mirrorbot@gmail.com | 2011-03-23 12:16:18 +0100 (Wed, 23 Mar 2011) | 11 lines
-
+commit 3dd93f3ec74df54a37f68bffabb058ac757bbe72
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 30 20:27:53 2011 +0000
 
-Fix public issue #9: Add -Wall -Werror to automake flags.
-(This concerns automake itself, not the C++ compiler.)
+    Fix public issue #27: Add HAVE_CONFIG_H tests around the config.h
+    inclusion in snappy-stubs-internal.h, which eases compiling outside the
+    automake/autoconf framework.
+    
+    R=csilvers
+    DELTA=5  (4 added, 1 deleted, 0 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1152
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@25 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-R=csilvers
-DELTA=4  (3 added, 0 deleted, 1 changed)
+commit f67bcaa61006da8b325a7ed9909a782590971815
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 30 20:27:39 2011 +0000
 
+    Fix public issue #26: Take memory allocation and reallocation entirely out of the
+    Measure() loop. This gives all algorithms a small speed boost, except Snappy which
+    already didn't do reallocation (so the measurements were slightly biased in its
+    favor).
+    
+    R=csilvers
+    DELTA=92  (69 added, 9 deleted, 14 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1151
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@24 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=993
-
-------------------------------------------------------------------------
-r10 | snappy.mirrorbot@gmail.com | 2011-03-23 12:13:37 +0100 (Wed, 23 Mar 2011) | 10 lines
+commit cc333c1c5cc4eabceceb9848ff3cac6c604ecbc6
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 30 20:25:09 2011 +0000
 
-
-Fix a typo in the Snappy README file.
+    Renamed "namespace zippy" to "namespace snappy" to reduce
+    the differences from the opensource code.  Will make it easier
+    in the future to mix-and-match third-party code that uses
+    snappy with google code.
+    
+    Currently, csearch shows that the only external user of
+    "namespace zippy" is some bigtable code that accesses
+    a TEST variable, which is temporarily kept in the zippy
+    namespace.
+    
+    R=sesse
+    DELTA=123  (18 added, 3 deleted, 102 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1150
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@23 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-R=csilvers
-DELTA=1  (0 added, 0 deleted, 1 changed)
+commit f19fb07e6dc79d6857e37df572dba25ff30fc8f3
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Mon Mar 28 22:17:04 2011 +0000
 
+    Put back the final few lines of what was truncated during the
+    license header change.
+    
+    R=csilvers
+    DELTA=5  (4 added, 0 deleted, 1 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1094
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@22 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=992
-
-------------------------------------------------------------------------
-r9 | snappy.mirrorbot@gmail.com | 2011-03-23 12:13:13 +0100 (Wed, 23 Mar 2011) | 11 lines
-
+commit 7e8ca8f8315fc2ecb4eea19db695039ab2ca43a0
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Sat Mar 26 02:34:34 2011 +0000
 
-Fix public issue #6: Add a --with-gflags for disabling gflags autodetection
-and using a manually given setting (use/don't use) instead.
+    Change on 2011-03-25 19:18:00-07:00 by sesse
+    
+    	Replace the Apache 2.0 license header by the BSD-type license header;
+    	somehow a lot of the files were missed in the last round.
+    
+    	R=dannyb,csilvers
+    	DELTA=147  (74 added, 2 deleted, 71 changed)
+    
+    Change on 2011-03-25 19:25:07-07:00 by sesse
+    
+    	Unbreak the build; the relicensing removed a bit too much (only comments
+    	were intended, but I also accidentially removed some of the top lines of
+    	the actual source).
+    
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1072
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@21 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-R=csilvers
-DELTA=16  (13 added, 0 deleted, 3 changed)
+commit b4bbc1041b35d844ec26fbae25f2864995361fd8
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Mar 25 16:14:41 2011 +0000
 
+    Change Snappy from the Apache 2.0 to a BSD-type license.
+    
+    R=dannyb
+    DELTA=328  (80 added, 184 deleted, 64 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1061
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@20 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit c47640c510eb11cf8913edfa34f667bceb3a4401
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Mar 25 00:39:01 2011 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=991
+    Release Snappy 1.0.1, to soup up all the various small changes
+    that have been made since release.
+    
+    R=csilvers
+    DELTA=266  (260 added, 0 deleted, 6 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1057
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@19 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit b1dc1f643eaff897a5ce135f525799b99687b118
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Thu Mar 24 19:15:54 2011 +0000
 
-------------------------------------------------------------------------
-r8 | snappy.mirrorbot@gmail.com | 2011-03-23 12:12:44 +0100 (Wed, 23 Mar 2011) | 12 lines
-
+    Fix a microbenchmark crash on mingw32; seemingly %lld is not universally
+    supported on Windows, and %I64d is recommended instead.
+    
+    R=csilvers
+    DELTA=6  (5 added, 0 deleted, 1 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1034
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@18 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Fix public issue #5: Replace the EXTRA_LIBSNAPPY_LDFLAGS setup with something
-slightly more standard, that also doesn't leak libtool command-line into
-configure.ac.
+commit 98004ca9afc62a3279dfe9d9a359083f61db437f
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Thu Mar 24 19:15:27 2011 +0000
 
-R=csilvers
-DELTA=7  (0 added, 4 deleted, 3 changed)
+    Fix public issue #19: Fix unit test when Google Test is installed but the
+    gflags package isn't (Google Test is not properly initialized).
+    
+    Patch by Martin Gieseking.
+    
+    R=csilvers
+    DELTA=2  (1 added, 0 deleted, 1 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1033
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@17 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
+commit 444a6c5f72d6f8d8f7213a5bcc08b26606eb9934
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Thu Mar 24 19:13:57 2011 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=990
+    Make the unit test work on systems without mmap(). This is required for,
+    among others, Windows support. For Windows in specific, we could have used
+    CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer
+    to compiling, and is of course also relevant for embedded systems with no MMU.
+    
+    (Part 2/2)
+    
+    R=csilvers
+    DELTA=15  (12 added, 3 deleted, 0 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1032
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@16 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-------------------------------------------------------------------------
-r7 | snappy.mirrorbot@gmail.com | 2011-03-23 12:12:22 +0100 (Wed, 23 Mar 2011) | 10 lines
-
-
-Fix public issue #4: Properly quote all macro arguments in configure.ac.
+commit 2e182e9bb840737f9cd8817e859dc17a82f2c16b
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Thu Mar 24 19:12:27 2011 +0000
 
-R=csilvers
-DELTA=16  (0 added, 0 deleted, 16 changed)
-
+    Make the unit test work on systems without mmap(). This is required for,
+    among others, Windows support. For Windows in specific, we could have used
+    CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer
+    to compiling, and is of course also relevant for embedded systems with no MMU.
+    
+    (Part 1/2)
+    
+    R=csilvers
+    DELTA=9  (8 added, 0 deleted, 1 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1031
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@15 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=989
+commit 48662cbb7f81533977334629790d346220084527
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 23 23:17:36 2011 +0000
 
-------------------------------------------------------------------------
-r6 | snappy.mirrorbot@gmail.com | 2011-03-23 12:11:54 +0100 (Wed, 23 Mar 2011) | 11 lines
-
+    Fix public issue #12: Don't keep autogenerated auto* files in Subversion;
+    it causes problems with others sending patches etc..
+    
+    We can't get this 100% hermetic anyhow, due to files like lt~obsolete.m4,
+    so we can just as well go cleanly in the other direction.
+    
+    R=csilvers
+    DELTA=21038  (0 added, 21036 deleted, 2 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=1012
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@14 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Fix public issue #7: Don't use internal variables named ac_*, as those belong
-to autoconf's namespace.
+commit 9e4717a586149c9538b353400312bab5ab5458c4
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 23 17:50:49 2011 +0000
 
-R=csilvers
-DELTA=6  (0 added, 0 deleted, 6 changed)
-
+    Fix public issue tracker bug #3: Call AC_SUBST([LIBTOOL_DEPS]), or the rule
+    to rebuild libtool in Makefile.am won't work.
+    
+    R=csilvers
+    DELTA=1  (1 added, 0 deleted, 0 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=997
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@13 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=988
+commit 519c822a34a91a0c0eb32d98e9686ee7d9cd6651
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 23 11:16:39 2011 +0000
 
-------------------------------------------------------------------------
-r5 | snappy.mirrorbot@gmail.com | 2011-03-23 12:11:09 +0100 (Wed, 23 Mar 2011) | 10 lines
+    Fix public issue #10: Don't add GTEST_CPPFLAGS to snappy_unittest_CXXFLAGS;
+    it's not needed (CPPFLAGS are always included when compiling).
+    
+    R=csilvers
+    DELTA=1  (0 added, 1 deleted, 0 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=994
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@12 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
+commit ea6b936378583cba730c33c8a53776edc1782208
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 23 11:16:18 2011 +0000
 
-Add missing licensing headers to a few files. (Part 2/2.)
-
-R=csilvers
-DELTA=12  (12 added, 0 deleted, 0 changed)
-
+    Fix public issue #9: Add -Wall -Werror to automake flags.
+    (This concerns automake itself, not the C++ compiler.)
+    
+    R=csilvers
+    DELTA=4  (3 added, 0 deleted, 1 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=993
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@11 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=987
+commit e3ca06af253094b1c3a8eae508cd97accf077535
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 23 11:13:37 2011 +0000
 
-------------------------------------------------------------------------
-r4 | snappy.mirrorbot@gmail.com | 2011-03-23 12:10:39 +0100 (Wed, 23 Mar 2011) | 10 lines
+    Fix a typo in the Snappy README file.
+    
+    R=csilvers
+    DELTA=1  (0 added, 0 deleted, 1 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=992
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@10 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
+commit 39d27bea23873abaa663e884261386b17b058f20
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 23 11:13:13 2011 +0000
 
-Add mising licensing headers to a few files. (Part 1/2.)
+    Fix public issue #6: Add a --with-gflags for disabling gflags autodetection
+    and using a manually given setting (use/don't use) instead.
+    
+    R=csilvers
+    DELTA=16  (13 added, 0 deleted, 3 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=991
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@9 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-R=csilvers
-DELTA=24  (24 added, 0 deleted, 0 changed)
-
+commit 60add43d99c1c31aeecd895cb555ad6f6520608e
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 23 11:12:44 2011 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=986
+    Fix public issue #5: Replace the EXTRA_LIBSNAPPY_LDFLAGS setup with something
+    slightly more standard, that also doesn't leak libtool command-line into
+    configure.ac.
+    
+    R=csilvers
+    DELTA=7  (0 added, 4 deleted, 3 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=990
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@8 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-------------------------------------------------------------------------
-r3 | snappy.mirrorbot@gmail.com | 2011-03-23 12:10:04 +0100 (Wed, 23 Mar 2011) | 11 lines
+commit a8dd1700879ad646106742aa0e9c3a48dc07b01d
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 23 11:12:22 2011 +0000
 
+    Fix public issue #4: Properly quote all macro arguments in configure.ac.
+    
+    R=csilvers
+    DELTA=16  (0 added, 0 deleted, 16 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=989
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@7 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Use the correct license file for the Apache 2.0 license;
-spotted by Florian Weimer.
+commit 79752dd7033658e28dc894de55012bdf2c9afca3
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 23 11:11:54 2011 +0000
 
-R=csilvers
-DELTA=202  (174 added, 0 deleted, 28 changed)
+    Fix public issue #7: Don't use internal variables named ac_*, as those belong
+    to autoconf's namespace.
+    
+    R=csilvers
+    DELTA=6  (0 added, 0 deleted, 6 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=988
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@6 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
+commit 46e39fb20c297129494b969ac4ea64fcd04b4fa0
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 23 11:11:09 2011 +0000
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=985
+    Add missing licensing headers to a few files. (Part 2/2.)
+    
+    R=csilvers
+    DELTA=12  (12 added, 0 deleted, 0 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=987
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@5 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-------------------------------------------------------------------------
-r2 | snappy.mirrorbot@gmail.com | 2011-03-18 18:14:15 +0100 (Fri, 18 Mar 2011) | 6 lines
+commit 3e764216fc8edaafca480443b90e55c14eaae2c2
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 23 11:10:39 2011 +0000
 
-
-
+    Add mising licensing headers to a few files. (Part 1/2.)
+    
+    R=csilvers
+    DELTA=24  (24 added, 0 deleted, 0 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=986
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@4 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Revision created by MOE tool push_codebase.
-MOE_MIGRATION=
+commit 9a59f183c8ffec62dcdabd3499d0d515e44e4ef0
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Wed Mar 23 11:10:04 2011 +0000
 
-------------------------------------------------------------------------
-r1 | sesse@google.com | 2011-03-18 18:13:52 +0100 (Fri, 18 Mar 2011) | 2 lines
+    Use the correct license file for the Apache 2.0 license;
+    spotted by Florian Weimer.
+    
+    R=csilvers
+    DELTA=202  (174 added, 0 deleted, 28 changed)
+    
+    
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=985
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@3 03e5f5b5-db94-4691-08a0-1a8bf15f6143
 
-Create trunk directory.
+commit 28a64402392c791905d6e1384ea1b48a5cb0b281
+Author: snappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Mar 18 17:14:15 2011 +0000
 
-------------------------------------------------------------------------
+    Revision created by MOE tool push_codebase.
+    MOE_MIGRATION=
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@2 03e5f5b5-db94-4691-08a0-1a8bf15f6143
+
+commit 7c3c6077b72b4ae2237267a20f640b55e9a90569
+Author: sesse@google.com <sesse@google.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>
+Date:   Fri Mar 18 17:13:52 2011 +0000
+
+    Create trunk directory.
+    
+    
+    git-svn-id: https://snappy.googlecode.com/svn/trunk@1 03e5f5b5-db94-4691-08a0-1a8bf15f6143
--- a/other-licenses/snappy/src/NEWS
+++ b/other-licenses/snappy/src/NEWS
@@ -1,8 +1,90 @@
+Snappy v1.1.3, July 6th 2015:
+
+This is the first release to be done from GitHub, which means that
+some minor things like the ChangeLog format has changed (git log
+format instead of svn log).
+
+  * Add support for Uncompress() from a Source to a Sink.
+
+  * Various minor changes to improve MSVC support; in particular,
+    the unit tests now compile and run under MSVC.
+
+
+Snappy v1.1.2, February 28th 2014:
+
+This is a maintenance release with no changes to the actual library
+source code.
+
+  * Stop distributing benchmark data files that have unclear
+    or unsuitable licensing.
+
+  * Add support for padding chunks in the framing format.
+
+
+Snappy v1.1.1, October 15th 2013:
+
+  * Add support for uncompressing to iovecs (scatter I/O).
+    The bulk of this patch was contributed by Mohit Aron.
+
+  * Speed up decompression by ~2%; much more so (~13-20%) on
+    a few benchmarks on given compilers and CPUs.
+
+  * Fix a few issues with MSVC compilation.
+
+  * Support truncated test data in the benchmark.
+
+
+Snappy v1.1.0, January 18th 2013:
+
+  * Snappy now uses 64 kB block size instead of 32 kB. On average,
+    this means it compresses about 3% denser (more so for some
+    inputs), at the same or better speeds.
+
+  * libsnappy no longer depends on iostream.
+
+  * Some small performance improvements in compression on x86
+    (0.5–1%).
+
+  * Various portability fixes for ARM-based platforms, for MSVC,
+    and for GNU/Hurd.
+
+
+Snappy v1.0.5, February 24th 2012:
+
+  * More speed improvements. Exactly how big will depend on
+    the architecture:
+
+    - 3–10% faster decompression for the base case (x86-64).
+
+    - ARMv7 and higher can now use unaligned accesses,
+      and will see about 30% faster decompression and
+      20–40% faster compression.
+
+    - 32-bit platforms (ARM and 32-bit x86) will see 2–5%
+      faster compression.
+
+    These are all cumulative (e.g., ARM gets all three speedups).
+
+  * Fixed an issue where the unit test would crash on system
+    with less than 256 MB address space available,
+    e.g. some embedded platforms.
+
+  * Added a framing format description, for use over e.g. HTTP,
+    or for a command-line compressor. We do not have any
+    implementations of this at the current point, but there seems
+    to be enough of a general interest in the topic.
+    Also make the format description slightly clearer.
+
+  * Remove some compile-time warnings in -Wall
+    (mostly signed/unsigned comparisons), for easier embedding
+    into projects that use -Wall -Werror.
+
+
 Snappy v1.0.4, September 15th 2011:
 
   * Speeded up the decompressor somewhat; typically about 2–8%
     for Core i7, in 64-bit mode (comparable for Opteron).
     Somewhat more for some tests, almost no gain for others.
   
   * Make Snappy compile on certain platforms it didn't before
     (Solaris with SunPro C++, HP-UX, AIX).
--- a/other-licenses/snappy/src/README
+++ b/other-licenses/snappy/src/README
@@ -24,17 +24,17 @@ Snappy has the following properties:
    For more information, see the included COPYING file.
 
 Snappy has previously been called "Zippy" in some Google presentations
 and the like.
 
 
 Performance
 ===========
- 
+
 Snappy is intended to be fast. On a single core of a Core i7 processor
 in 64-bit mode, it compresses at about 250 MB/sec or more and decompresses at
 about 500 MB/sec or more. (These numbers are for the slowest inputs in our
 benchmark suite; others are much faster.) In our tests, Snappy usually
 is faster than algorithms in the same class (e.g. LZO, LZF, FastLZ, QuickLZ,
 etc.) while achieving comparable compression ratios.
 
 Typical compression ratios (based on the benchmark suite) are about 1.5-1.7x
@@ -62,17 +62,17 @@ Performance optimizations, whether for 6
 are of course most welcome; see "Contact", below.
 
 
 Usage
 =====
 
 Note that Snappy, both the implementation and the main interface,
 is written in C++. However, several third-party bindings to other languages
-are available; see the Google Code page at http://code.google.com/p/snappy/
+are available; see the home page at http://google.github.io/snappy/
 for more information. Also, if you want to use Snappy from C code, you can
 use the included C bindings in snappy-c.h.
 
 To use Snappy from your own C++ program, include the file "snappy.h" from
 your calling file, and link against the compiled library.
 
 There are many ways to call Snappy, but the simplest possible is
 
@@ -97,22 +97,22 @@ library itself. You do not need it to us
 but it contains several useful components for Snappy development.
 
 First of all, it contains unit tests, verifying correctness on your machine in
 various scenarios. If you want to change or optimize Snappy, please run the
 tests to verify you have not broken anything. Note that if you have the
 Google Test library installed, unit test behavior (especially failures) will be
 significantly more user-friendly. You can find Google Test at
 
-  http://code.google.com/p/googletest/
+  http://github.com/google/googletest
 
 You probably also want the gflags library for handling of command-line flags;
 you can find it at
 
-  http://code.google.com/p/google-gflags/
+  http://gflags.github.io/gflags/
 
 In addition to the unit tests, snappy contains microbenchmarks used to
 tune compression and decompression performance. These are automatically run
 before the unit tests, but you can disable them using the flag
 --run_microbenchmarks=false if you have gflags installed (otherwise you will
 need to edit the source).
 
 Finally, snappy can benchmark Snappy against a few other compression libraries
@@ -124,12 +124,16 @@ microbenchmark, which should provide a r
 benchmarking. (Note that baddata[1-3].snappy are not intended as benchmarks; they
 are used to verify correctness in the presence of corrupted data in the unit
 test.)
 
 
 Contact
 =======
 
-Snappy is distributed through Google Code. For the latest version, a bug tracker,
+Snappy is distributed through GitHub. For the latest version, a bug tracker,
 and other information, see
 
-  http://code.google.com/p/snappy/
+  http://google.github.io/snappy/
+
+or the repository at
+
+  https://github.com/google/snappy
--- a/other-licenses/snappy/src/framing_format.txt
+++ b/other-licenses/snappy/src/framing_format.txt
@@ -1,28 +1,28 @@
 Snappy framing format description
-Last revised: 2011-12-15
+Last revised: 2013-10-25
 
 This format decribes a framing format for Snappy, allowing compressing to
 files or streams that can then more easily be decompressed without having
 to hold the entire stream in memory. It also provides data checksums to
 help verify integrity. It does not provide metadata checksums, so it does
 not protect against e.g. all forms of truncations.
 
 Implementation of the framing format is optional for Snappy compressors and
 decompressor; it is not part of the Snappy core specification.
 
 
 1. General structure
 
 The file consists solely of chunks, lying back-to-back with no padding
 in between. Each chunk consists first a single byte of chunk identifier,
-then a two-byte little-endian length of the chunk in bytes (from 0 to 65535,
-inclusive), and then the data if any. The three bytes of chunk header is not
-counted in the data length.
+then a three-byte little-endian length of the chunk in bytes (from 0 to
+16777215, inclusive), and then the data if any. The four bytes of chunk
+header is not counted in the data length.
 
 The different chunk types are listed below. The first chunk must always
 be the stream identifier chunk (see section 4.1, below). The stream
 ends when the file ends -- there is no explicit end-of-file marker.
 
 
 2. File type identification
 
@@ -66,59 +66,70 @@ be extended in the future.
 
 
 4.1. Stream identifier (chunk type 0xff)
 
 The stream identifier is always the first element in the stream.
 It is exactly six bytes long and contains "sNaPpY" in ASCII. This means that
 a valid Snappy framed stream always starts with the bytes
 
-  0xff 0x06 0x00 0x73 0x4e 0x61 0x50 0x70 0x59
+  0xff 0x06 0x00 0x00 0x73 0x4e 0x61 0x50 0x70 0x59
 
 The stream identifier chunk can come multiple times in the stream besides
 the first; if such a chunk shows up, it should simply be ignored, assuming
 it has the right length and contents. This allows for easy concatenation of
 compressed files without the need for re-framing.
 
 
 4.2. Compressed data (chunk type 0x00)
 
 Compressed data chunks contain a normal Snappy compressed bitstream;
 see the compressed format specification. The compressed data is preceded by
 the CRC-32C (see section 3) of the _uncompressed_ data.
 
 Note that the data portion of the chunk, i.e., the compressed contents,
-can be at most 65531 bytes (2^16 - 1, minus the checksum).
+can be at most 16777211 bytes (2^24 - 1, minus the checksum).
 However, we place an additional restriction that the uncompressed data
-in a chunk must be no longer than 32768 bytes. This allows consumers to
+in a chunk must be no longer than 65536 bytes. This allows consumers to
 easily use small fixed-size buffers.
 
 
 4.3. Uncompressed data (chunk type 0x01)
 
 Uncompressed data chunks allow a compressor to send uncompressed,
 raw data; this is useful if, for instance, uncompressible or
 near-incompressible data is detected, and faster decompression is desired.
 
 As in the compressed chunks, the data is preceded by its own masked
 CRC-32C (see section 3).
 
 An uncompressed data chunk, like compressed data chunks, should contain
-no more than 32768 data bytes, so the maximum legal chunk length with the
-checksum is 32772.
+no more than 65536 data bytes, so the maximum legal chunk length with the
+checksum is 65540.
 
 
-4.4. Reserved unskippable chunks (chunk types 0x02-0x7f)
+4.4. Padding (chunk type 0xfe)
+
+Padding chunks allow a compressor to increase the size of the data stream
+so that it complies with external demands, e.g. that the total number of
+bytes is a multiple of some value.
+
+All bytes of the padding chunk, except the chunk byte itself and the length,
+should be zero, but decompressors must not try to interpret or verify the
+padding data in any way.
+
+
+4.5. Reserved unskippable chunks (chunk types 0x02-0x7f)
 
 These are reserved for future expansion. A decoder that sees such a chunk
 should immediately return an error, as it must assume it cannot decode the
 stream correctly.
 
 Future versions of this specification may define meanings for these chunks.
 
 
-4.5. Reserved skippable chunks (chunk types 0x80-0xfe)
+4.6. Reserved skippable chunks (chunk types 0x80-0xfd)
 
 These are also reserved for future expansion, but unlike the chunks
-described in 4.4, a decoder seeing these must skip them and continue
+described in 4.5, a decoder seeing these must skip them and continue
 decoding.
 
 Future versions of this specification may define meanings for these chunks.
--- a/other-licenses/snappy/src/snappy-c.h
+++ b/other-licenses/snappy/src/snappy-c.h
@@ -25,18 +25,18 @@
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Plain C interface (a wrapper around the C++ implementation).
  */
 
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_C_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_C_H_
+#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_C_H_
+#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_C_H_
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 #include <stddef.h>
 
 /*
@@ -130,9 +130,9 @@ snappy_status snappy_uncompressed_length
  */
 snappy_status snappy_validate_compressed_buffer(const char* compressed,
                                                 size_t compressed_length);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  /* UTIL_SNAPPY_OPENSOURCE_SNAPPY_C_H_ */
+#endif  /* THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_C_H_ */
--- a/other-licenses/snappy/src/snappy-internal.h
+++ b/other-licenses/snappy/src/snappy-internal.h
@@ -23,18 +23,18 @@
 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
 // Internals shared between the Snappy implementation and its unittest.
 
-#ifndef UTIL_SNAPPY_SNAPPY_INTERNAL_H_
-#define UTIL_SNAPPY_SNAPPY_INTERNAL_H_
+#ifndef THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
+#define THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
 
 #include "snappy-stubs-internal.h"
 
 namespace snappy {
 namespace internal {
 
 class WorkingMemory {
  public:
@@ -80,54 +80,54 @@ char* CompressFragment(const char* input
 // Requires that s2_limit >= s2.
 //
 // Separate implementation for x86_64, for speed.  Uses the fact that
 // x86_64 is little endian.
 #if defined(ARCH_K8)
 static inline int FindMatchLength(const char* s1,
                                   const char* s2,
                                   const char* s2_limit) {
-  DCHECK_GE(s2_limit, s2);
+  assert(s2_limit >= s2);
   int matched = 0;
 
   // Find out how long the match is. We loop over the data 64 bits at a
   // time until we find a 64-bit block that doesn't match; then we find
   // the first non-matching bit and use that to calculate the total
   // length of the match.
   while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
-    if (PREDICT_FALSE(UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched))) {
+    if (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
       s2 += 8;
       matched += 8;
     } else {
       // On current (mid-2008) Opteron models there is a 3% more
       // efficient code sequence to find the first non-matching byte.
       // However, what follows is ~10% better on Intel Core 2 and newer,
       // and we expect AMD's bsf instruction to improve.
       uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
       int matching_bits = Bits::FindLSBSetNonZero64(x);
       matched += matching_bits >> 3;
       return matched;
     }
   }
   while (PREDICT_TRUE(s2 < s2_limit)) {
-    if (PREDICT_TRUE(s1[matched] == *s2)) {
+    if (s1[matched] == *s2) {
       ++s2;
       ++matched;
     } else {
       return matched;
     }
   }
   return matched;
 }
 #else
 static inline int FindMatchLength(const char* s1,
                                   const char* s2,
                                   const char* s2_limit) {
   // Implementation based on the x86-64 version, above.
-  DCHECK_GE(s2_limit, s2);
+  assert(s2_limit >= s2);
   int matched = 0;
 
   while (s2 <= s2_limit - 4 &&
          UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
     s2 += 4;
     matched += 4;
   }
   if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
@@ -139,12 +139,74 @@ static inline int FindMatchLength(const 
       ++s2;
       ++matched;
     }
   }
   return matched;
 }
 #endif
 
+// Lookup tables for decompression code.  Give --snappy_dump_decompression_table
+// to the unit test to recompute char_table.
+
+enum {
+  LITERAL = 0,
+  COPY_1_BYTE_OFFSET = 1,  // 3 bit length + 3 bits of offset in opcode
+  COPY_2_BYTE_OFFSET = 2,
+  COPY_4_BYTE_OFFSET = 3
+};
+static const int kMaximumTagLength = 5;  // COPY_4_BYTE_OFFSET plus the actual offset.
+
+// Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
+static const uint32 wordmask[] = {
+  0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
+};
+
+// Data stored per entry in lookup table:
+//      Range   Bits-used       Description
+//      ------------------------------------
+//      1..64   0..7            Literal/copy length encoded in opcode byte
+//      0..7    8..10           Copy offset encoded in opcode byte / 256
+//      0..4    11..13          Extra bytes after opcode
+//
+// We use eight bits for the length even though 7 would have sufficed
+// because of efficiency reasons:
+//      (1) Extracting a byte is faster than a bit-field
+//      (2) It properly aligns copy offset so we do not need a <<8
+static const uint16 char_table[256] = {
+  0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
+  0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
+  0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
+  0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
+  0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
+  0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
+  0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
+  0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
+  0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
+  0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
+  0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
+  0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
+  0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
+  0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
+  0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
+  0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
+  0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
+  0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
+  0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
+  0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
+  0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
+  0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
+  0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
+  0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
+  0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
+  0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
+  0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
+  0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
+  0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
+  0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
+  0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
+  0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
+};
+
 }  // end namespace internal
 }  // end namespace snappy
 
-#endif  // UTIL_SNAPPY_SNAPPY_INTERNAL_H_
+#endif  // THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
--- a/other-licenses/snappy/src/snappy-sinksource.cc
+++ b/other-licenses/snappy/src/snappy-sinksource.cc
@@ -35,16 +35,31 @@ namespace snappy {
 Source::~Source() { }
 
 Sink::~Sink() { }
 
 char* Sink::GetAppendBuffer(size_t length, char* scratch) {
   return scratch;
 }
 
+char* Sink::GetAppendBufferVariable(
+      size_t min_size, size_t desired_size_hint, char* scratch,
+      size_t scratch_size, size_t* allocated_size) {
+  *allocated_size = scratch_size;
+  return scratch;
+}
+
+void Sink::AppendAndTakeOwnership(
+    char* bytes, size_t n,
+    void (*deleter)(void*, const char*, size_t),
+    void *deleter_arg) {
+  Append(bytes, n);
+  (*deleter)(deleter_arg, bytes, n);
+}
+
 ByteArraySource::~ByteArraySource() { }
 
 size_t ByteArraySource::Available() const { return left_; }
 
 const char* ByteArraySource::Peek(size_t* len) {
   *len = left_;
   return ptr_;
 }
@@ -63,10 +78,27 @@ void UncheckedByteArraySink::Append(cons
   }
   dest_ += n;
 }
 
 char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
   return dest_;
 }
 
+void UncheckedByteArraySink::AppendAndTakeOwnership(
+    char* data, size_t n,
+    void (*deleter)(void*, const char*, size_t),
+    void *deleter_arg) {
+  if (data != dest_) {
+    memcpy(dest_, data, n);
+    (*deleter)(deleter_arg, data, n);
+  }
+  dest_ += n;
+}
 
-} // namespace snappy
+char* UncheckedByteArraySink::GetAppendBufferVariable(
+      size_t min_size, size_t desired_size_hint, char* scratch,
+      size_t scratch_size, size_t* allocated_size) {
+  *allocated_size = desired_size_hint;
+  return dest_;
+}
+
+}  // namespace snappy
--- a/other-licenses/snappy/src/snappy-sinksource.h
+++ b/other-licenses/snappy/src/snappy-sinksource.h
@@ -21,22 +21,21 @@
 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
-#define UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
+#ifndef THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_
+#define THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_
 
 #include <stddef.h>
 
-
 namespace snappy {
 
 // A Sink is an interface that consumes a sequence of bytes.
 class Sink {
  public:
   Sink() { }
   virtual ~Sink();
 
@@ -55,16 +54,58 @@ class Sink {
   //
   // If a non-scratch buffer is returned, the caller may only pass a
   // prefix of it to Append().  That is, it is not correct to pass an
   // interior pointer of the returned array to Append().
   //
   // The default implementation always returns the scratch buffer.
   virtual char* GetAppendBuffer(size_t length, char* scratch);
 
+  // For higher performance, Sink implementations can provide custom
+  // AppendAndTakeOwnership() and GetAppendBufferVariable() methods.
+  // These methods can reduce the number of copies done during
+  // compression/decompression.
+
+  // Append "bytes[0,n-1] to the sink. Takes ownership of "bytes"
+  // and calls the deleter function as (*deleter)(deleter_arg, bytes, n)
+  // to free the buffer. deleter function must be non NULL.
+  //
+  // The default implementation just calls Append and frees "bytes".
+  // Other implementations may avoid a copy while appending the buffer.
+  virtual void AppendAndTakeOwnership(
+      char* bytes, size_t n, void (*deleter)(void*, const char*, size_t),
+      void *deleter_arg);
+
+  // Returns a writable buffer for appending and writes the buffer's capacity to
+  // *allocated_size. Guarantees *allocated_size >= min_size.
+  // May return a pointer to the caller-owned scratch buffer which must have
+  // scratch_size >= min_size.
+  //
+  // The returned buffer is only valid until the next operation
+  // on this ByteSink.
+  //
+  // After writing at most *allocated_size bytes, call Append() with the
+  // pointer returned from this function and the number of bytes written.
+  // Many Append() implementations will avoid copying bytes if this function
+  // returned an internal buffer.
+  //
+  // If the sink implementation allocates or reallocates an internal buffer,
+  // it should use the desired_size_hint if appropriate. If a caller cannot
+  // provide a reasonable guess at the desired capacity, it should set
+  // desired_size_hint = 0.
+  //
+  // If a non-scratch buffer is returned, the caller may only pass
+  // a prefix to it to Append(). That is, it is not correct to pass an
+  // interior pointer to Append().
+  //
+  // The default implementation always returns the scratch buffer.
+  virtual char* GetAppendBufferVariable(
+      size_t min_size, size_t desired_size_hint, char* scratch,
+      size_t scratch_size, size_t* allocated_size);
+
  private:
   // No copying
   Sink(const Sink&);
   void operator=(const Sink&);
 };
 
 // A Source is an interface that yields a sequence of bytes
 class Source {
@@ -116,21 +157,26 @@ class ByteArraySource : public Source {
 
 // A Sink implementation that writes to a flat array without any bound checks.
 class UncheckedByteArraySink : public Sink {
  public:
   explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
   virtual ~UncheckedByteArraySink();
   virtual void Append(const char* data, size_t n);
   virtual char* GetAppendBuffer(size_t len, char* scratch);
+  virtual char* GetAppendBufferVariable(
+      size_t min_size, size_t desired_size_hint, char* scratch,
+      size_t scratch_size, size_t* allocated_size);
+  virtual void AppendAndTakeOwnership(
+      char* bytes, size_t n, void (*deleter)(void*, const char*, size_t),
+      void *deleter_arg);
 
   // Return the current output pointer so that a caller can see how
   // many bytes were produced.
   // Note: this is not a Sink method.
   char* CurrentDestination() const { return dest_; }
  private:
   char* dest_;
 };
 
+}  // namespace snappy
 
-} // namespace snappy
-
-#endif  // UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
+#endif  // THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_
--- a/other-licenses/snappy/src/snappy-stubs-internal.h
+++ b/other-licenses/snappy/src/snappy-stubs-internal.h
@@ -23,24 +23,23 @@
 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
 // Various stubs for the open-source version of Snappy.
 
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
 
 #ifdef HAVE_CONFIG_H
 #include "config.h"
 #endif
 
-#include <iostream>
 #include <string>
 
 #include <assert.h>
 #include <stdlib.h>
 #include <string.h>
 
 #ifdef HAVE_SYS_MMAN_H
 #include <sys/mman.h>
@@ -90,109 +89,110 @@ using namespace std;
 #define DECLARE_bool(flag_name) \
   extern bool FLAGS_ ## flag_name
 
 namespace snappy {
 
 static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
 static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
 
-// Logging.
-
-#define LOG(level) LogMessage()
-#define VLOG(level) true ? (void)0 : \
-    snappy::LogMessageVoidify() & snappy::LogMessage()
-
-class LogMessage {
- public:
-  LogMessage() { }
-  ~LogMessage() {
-    cerr << endl;
-  }
-
-  LogMessage& operator<<(const std::string& msg) {
-    cerr << msg;
-    return *this;
-  }
-  LogMessage& operator<<(int x) {
-    cerr << x;
-    return *this;
-  }
-};
-
-// Asserts, both versions activated in debug mode only,
-// and ones that are always active.
-
-#define CRASH_UNLESS(condition) \
-    PREDICT_TRUE(condition) ? (void)0 : \
-    snappy::LogMessageVoidify() & snappy::LogMessageCrash()
-
-class LogMessageCrash : public LogMessage {
- public:
-  LogMessageCrash() { }
-  ~LogMessageCrash() {
-    cerr << endl;
-    abort();
-  }
-};
+// Potentially unaligned loads and stores.
 
-// This class is used to explicitly ignore values in the conditional
-// logging macros.  This avoids compiler warnings like "value computed
-// is not used" and "statement has no effect".
-
-class LogMessageVoidify {
- public:
-  LogMessageVoidify() { }
-  // This has to be an operator with a precedence lower than << but
-  // higher than ?:
-  void operator&(const LogMessage&) { }
-};
-
-#define CHECK(cond) CRASH_UNLESS(cond)
-#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
-#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
-#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
-#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
-#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
-#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
-
-#ifdef NDEBUG
-
-#define DCHECK(cond) CRASH_UNLESS(true)
-#define DCHECK_LE(a, b) CRASH_UNLESS(true)
-#define DCHECK_GE(a, b) CRASH_UNLESS(true)
-#define DCHECK_EQ(a, b) CRASH_UNLESS(true)
-#define DCHECK_NE(a, b) CRASH_UNLESS(true)
-#define DCHECK_LT(a, b) CRASH_UNLESS(true)
-#define DCHECK_GT(a, b) CRASH_UNLESS(true)
-
-#else
-
-#define DCHECK(cond) CHECK(cond)
-#define DCHECK_LE(a, b) CHECK_LE(a, b)
-#define DCHECK_GE(a, b) CHECK_GE(a, b)
-#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
-#define DCHECK_NE(a, b) CHECK_NE(a, b)
-#define DCHECK_LT(a, b) CHECK_LT(a, b)
-#define DCHECK_GT(a, b) CHECK_GT(a, b)
-
-#endif
-
-// Potentially unaligned loads and stores.
+// x86 and PowerPC can simply do these loads and stores native.
 
 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
 
 #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
 #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
 #define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
 
 #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
 #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
 #define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
 
+// ARMv7 and newer support native unaligned accesses, but only of 16-bit
+// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
+// do an unaligned read and rotate the words around a bit, or do the reads very
+// slowly (trip through kernel mode). There's no simple #define that says just
+// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
+// sub-architectures.
+//
+// This is a mess, but there's not much we can do about it.
+//
+// To further complicate matters, only LDR instructions (single reads) are
+// allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we
+// explicitly tell the compiler that these accesses can be unaligned, it can and
+// will combine accesses. On armcc, the way to signal this is done by accessing
+// through the type (uint32 __packed *), but GCC has no such attribute
+// (it ignores __attribute__((packed)) on individual variables). However,
+// we can tell it that a _struct_ is unaligned, which has the same effect,
+// so we do that.
+
+#elif defined(__arm__) && \
+      !defined(__ARM_ARCH_4__) && \
+      !defined(__ARM_ARCH_4T__) && \
+      !defined(__ARM_ARCH_5__) && \
+      !defined(__ARM_ARCH_5T__) && \
+      !defined(__ARM_ARCH_5TE__) && \
+      !defined(__ARM_ARCH_5TEJ__) && \
+      !defined(__ARM_ARCH_6__) && \
+      !defined(__ARM_ARCH_6J__) && \
+      !defined(__ARM_ARCH_6K__) && \
+      !defined(__ARM_ARCH_6Z__) && \
+      !defined(__ARM_ARCH_6ZK__) && \
+      !defined(__ARM_ARCH_6T2__)
+
+#if __GNUC__
+#define ATTRIBUTE_PACKED __attribute__((__packed__))
+#else
+#define ATTRIBUTE_PACKED
+#endif
+
+namespace base {
+namespace internal {
+
+struct Unaligned16Struct {
+  uint16 value;
+  uint8 dummy;  // To make the size non-power-of-two.
+} ATTRIBUTE_PACKED;
+
+struct Unaligned32Struct {
+  uint32 value;
+  uint8 dummy;  // To make the size non-power-of-two.
+} ATTRIBUTE_PACKED;
+
+}  // namespace internal
+}  // namespace base
+
+#define UNALIGNED_LOAD16(_p) \
+    ((reinterpret_cast<const ::snappy::base::internal::Unaligned16Struct *>(_p))->value)
+#define UNALIGNED_LOAD32(_p) \
+    ((reinterpret_cast<const ::snappy::base::internal::Unaligned32Struct *>(_p))->value)
+
+#define UNALIGNED_STORE16(_p, _val) \
+    ((reinterpret_cast< ::snappy::base::internal::Unaligned16Struct *>(_p))->value = \
+         (_val))
+#define UNALIGNED_STORE32(_p, _val) \
+    ((reinterpret_cast< ::snappy::base::internal::Unaligned32Struct *>(_p))->value = \
+         (_val))
+
+// TODO(user): NEON supports unaligned 64-bit loads and stores.
+// See if that would be more efficient on platforms supporting it,
+// at least for copies.
+
+inline uint64 UNALIGNED_LOAD64(const void *p) {
+  uint64 t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline void UNALIGNED_STORE64(void *p, uint64 v) {
+  memcpy(p, &v, sizeof v);
+}
+
 #else
 
 // These functions are provided for architectures that don't support
 // unaligned loads and stores.
 
 inline uint16 UNALIGNED_LOAD16(const void *p) {
   uint16 t;
   memcpy(&t, p, sizeof t);
@@ -220,16 +220,30 @@ inline void UNALIGNED_STORE32(void *p, u
 }
 
 inline void UNALIGNED_STORE64(void *p, uint64 v) {
   memcpy(p, &v, sizeof v);
 }
 
 #endif
 
+// This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
+// on some platforms, in particular ARM.
+inline void UnalignedCopy64(const void *src, void *dst) {
+  if (sizeof(void *) == 8) {
+    UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
+  } else {
+    const char *src_char = reinterpret_cast<const char *>(src);
+    char *dst_char = reinterpret_cast<char *>(dst);
+
+    UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char));
+    UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4));
+  }
+}
+
 // The following guarantees declaration of the byte swap functions.
 #ifdef WORDS_BIGENDIAN
 
 #ifdef HAVE_SYS_BYTEORDER_H
 #include <sys/byteorder.h>
 #endif
 
 #ifdef HAVE_SYS_ENDIAN_H
@@ -506,9 +520,9 @@ inline void STLStringResizeUninitialized
 // proposes this as the method. It will officially be part of the standard
 // for C++0x. This should already work on all current implementations.
 inline char* string_as_array(string* str) {
   return str->empty() ? NULL : &*str->begin();
 }
 
 }  // namespace snappy
 
-#endif  // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+#endif  // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
--- a/other-licenses/snappy/src/snappy-stubs-public.h.in
+++ b/other-licenses/snappy/src/snappy-stubs-public.h.in
@@ -28,27 +28,31 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
 // Various type stubs for the open-source version of Snappy.
 //
 // This file cannot include config.h, as it is included from snappy.h,
 // which is a public header. Instead, snappy-stubs-public.h is generated by
 // from snappy-stubs-public.h.in at configure time.
 
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
 
 #if @ac_cv_have_stdint_h@
 #include <stdint.h>
 #endif
 
 #if @ac_cv_have_stddef_h@
 #include <stddef.h>
 #endif
 
+#if @ac_cv_have_sys_uio_h@
+#include <sys/uio.h>
+#endif
+
 #define SNAPPY_MAJOR @SNAPPY_MAJOR@
 #define SNAPPY_MINOR @SNAPPY_MINOR@
 #define SNAPPY_PATCHLEVEL @SNAPPY_PATCHLEVEL@
 #define SNAPPY_VERSION \
     ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
 
 #include <string>
 
@@ -71,15 +75,26 @@ typedef unsigned short uint16;
 typedef int int32;
 typedef unsigned int uint32;
 typedef long long int64;
 typedef unsigned long long uint64;
 #endif
 
 typedef std::string string;
 
+#ifndef DISALLOW_COPY_AND_ASSIGN
 #define DISALLOW_COPY_AND_ASSIGN(TypeName) \
   TypeName(const TypeName&);               \
   void operator=(const TypeName&)
+#endif
+
+#if !@ac_cv_have_sys_uio_h@
+// Windows does not have an iovec type, yet the concept is universally useful.
+// It is simple to define it ourselves, so we put it inside our own namespace.
+struct iovec {
+	void* iov_base;
+	size_t iov_len;
+};
+#endif
 
 }  // namespace snappy
 
-#endif  // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+#endif  // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
--- a/other-licenses/snappy/src/snappy-test.cc
+++ b/other-licenses/snappy/src/snappy-test.cc
@@ -23,42 +23,52 @@
 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
 // Various stubs for the unit tests for the open-source version of Snappy.
 
-#include "snappy-test.h"
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
 
 #ifdef HAVE_WINDOWS_H
-#define WIN32_LEAN_AND_MEAN
 #include <windows.h>
 #endif
 
+#include "snappy-test.h"
+
 #include <algorithm>
 
 DEFINE_bool(run_microbenchmarks, true,
             "Run microbenchmarks before doing anything else.");
 
 namespace snappy {
 
-string ReadTestDataFile(const string& base) {
+string ReadTestDataFile(const string& base, size_t size_limit) {
   string contents;
   const char* srcdir = getenv("srcdir");  // This is set by Automake.
+  string prefix;
   if (srcdir) {
-    File::ReadFileToStringOrDie(
-        string(srcdir) + "/testdata/" + base, &contents);
-  } else {
-    File::ReadFileToStringOrDie("testdata/" + base, &contents);
+    prefix = string(srcdir) + "/";
+  }
+  file::GetContents(prefix + "testdata/" + base, &contents, file::Defaults()
+      ).CheckSuccess();
+  if (size_limit > 0) {
+    contents = contents.substr(0, size_limit);
   }
   return contents;
 }
 
+string ReadTestDataFile(const string& base) {
+  return ReadTestDataFile(base, 0);
+}
+
 string StringPrintf(const char* format, ...) {
   char buf[4096];
   va_list ap;
   va_start(ap, format);
   vsnprintf(buf, sizeof(buf), format, ap);
   va_end(ap);
   return buf;
 }
@@ -199,37 +209,42 @@ void Benchmark::Run() {
       StartBenchmarkTiming();
       (*function_)(num_iterations, test_case_num);
       StopBenchmarkTiming();
 
       benchmark_runs[run].real_time_us = benchmark_real_time_us;
       benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
     }
 
+    string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num);
+    string human_readable_speed;
+
     nth_element(benchmark_runs,
                 benchmark_runs + kMedianPos,
                 benchmark_runs + kNumRuns,
                 BenchmarkCompareCPUTime());
     int64 real_time_us = benchmark_runs[kMedianPos].real_time_us;
     int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
-    int64 bytes_per_second = benchmark_bytes_processed * 1000000 / cpu_time_us;
-
-    string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num);
-    string human_readable_speed;
-    if (bytes_per_second < 1024) {
-      human_readable_speed = StringPrintf("%dB/s", bytes_per_second);
-    } else if (bytes_per_second < 1024 * 1024) {
-      human_readable_speed = StringPrintf(
-          "%.1fkB/s", bytes_per_second / 1024.0f);
-    } else if (bytes_per_second < 1024 * 1024 * 1024) {
-      human_readable_speed = StringPrintf(
-          "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
+    if (cpu_time_us <= 0) {
+      human_readable_speed = "?";
     } else {
-      human_readable_speed = StringPrintf(
-          "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
+      int64 bytes_per_second =
+          benchmark_bytes_processed * 1000000 / cpu_time_us;
+      if (bytes_per_second < 1024) {
+        human_readable_speed = StringPrintf("%dB/s", bytes_per_second);
+      } else if (bytes_per_second < 1024 * 1024) {
+        human_readable_speed = StringPrintf(
+            "%.1fkB/s", bytes_per_second / 1024.0f);
+      } else if (bytes_per_second < 1024 * 1024 * 1024) {
+        human_readable_speed = StringPrintf(
+            "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
+      } else {
+        human_readable_speed = StringPrintf(
+            "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
+      }
     }
 
     fprintf(stderr,
 #ifdef WIN32
             "%-18s %10I64d %10I64d %10d %s  %s\n",
 #else
             "%-18s %10lld %10lld %10d %s  %s\n",
 #endif
--- a/other-licenses/snappy/src/snappy-test.h
+++ b/other-licenses/snappy/src/snappy-test.h
@@ -23,36 +23,40 @@
 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
 // Various stubs for the unit tests for the open-source version of Snappy.
 
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
+#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
+#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
+
+#include <iostream>
+#include <string>
 
 #include "snappy-stubs-internal.h"
 
 #include <stdio.h>
 #include <stdarg.h>
 
 #ifdef HAVE_SYS_MMAN_H
 #include <sys/mman.h>
 #endif
 
 #ifdef HAVE_SYS_RESOURCE_H
 #include <sys/resource.h>
 #endif
 
+#ifdef HAVE_SYS_TIME_H
 #include <sys/time.h>
+#endif
 
 #ifdef HAVE_WINDOWS_H
-#define WIN32_LEAN_AND_MEAN
 #include <windows.h>
 #endif
 
 #include <string>
 
 #ifdef HAVE_GTEST
 
 #include <gtest/gtest.h>
@@ -116,78 +120,94 @@ extern "C" {
 #include "fastlz.h"
 #endif
 
 #ifdef HAVE_LIBQUICKLZ
 #include "quicklz.h"
 #endif
 
 namespace {
+
 namespace File {
   void Init() { }
+}  // namespace File
 
-  void ReadFileToStringOrDie(const char* filename, string* data) {
-    FILE* fp = fopen(filename, "rb");
+namespace file {
+  int Defaults() { return 0; }
+
+  class DummyStatus {
+   public:
+    void CheckSuccess() { }
+  };
+
+  DummyStatus GetContents(const string& filename, string* data, int unused) {
+    FILE* fp = fopen(filename.c_str(), "rb");
     if (fp == NULL) {
-      perror(filename);
+      perror(filename.c_str());
       exit(1);
     }
 
     data->clear();
     while (!feof(fp)) {
       char buf[4096];
       size_t ret = fread(buf, 1, 4096, fp);
       if (ret == 0 && ferror(fp)) {
         perror("fread");
         exit(1);
       }
       data->append(string(buf, ret));
     }
 
     fclose(fp);
+
+    return DummyStatus();
   }
 
-  void ReadFileToStringOrDie(const string& filename, string* data) {
-    ReadFileToStringOrDie(filename.c_str(), data);
-  }
-
-  void WriteStringToFileOrDie(const string& str, const char* filename) {
-    FILE* fp = fopen(filename, "wb");
+  DummyStatus SetContents(const string& filename,
+                          const string& str,
+                          int unused) {
+    FILE* fp = fopen(filename.c_str(), "wb");
     if (fp == NULL) {
-      perror(filename);
+      perror(filename.c_str());
       exit(1);
     }
 
     int ret = fwrite(str.data(), str.size(), 1, fp);
     if (ret != 1) {
       perror("fwrite");
       exit(1);
     }
 
     fclose(fp);
+
+    return DummyStatus();
   }
-}  // namespace File
+}  // namespace file
+
 }  // namespace
 
 namespace snappy {
 
 #define FLAGS_test_random_seed 301
 typedef string TypeParam;
 
 void Test_CorruptedTest_VerifyCorrupted();
 void Test_Snappy_SimpleTests();
 void Test_Snappy_MaxBlowup();
 void Test_Snappy_RandomData();
 void Test_Snappy_FourByteOffset();
 void Test_SnappyCorruption_TruncatedVarint();
 void Test_SnappyCorruption_UnterminatedVarint();
+void Test_SnappyCorruption_OverflowingVarint();
 void Test_Snappy_ReadPastEndOfBuffer();
 void Test_Snappy_FindMatchLength();
 void Test_Snappy_FindMatchLengthRandom();
 
+string ReadTestDataFile(const string& base, size_t size_limit);
+
 string ReadTestDataFile(const string& base);
 
 // A sprintf() variant that returns a std::string.
 // Not safe for general use due to truncation issues.
 string StringPrintf(const char* format, ...);
 
 // A simple, non-cryptographically-secure random generator.
 class ACMRandom {
@@ -307,16 +327,17 @@ class Benchmark {
   const BenchmarkFunction function_;
   int start_, stop_;
 };
 #define BENCHMARK(benchmark_name) \
   Benchmark* Benchmark_ ## benchmark_name = \
           (new Benchmark(#benchmark_name, benchmark_name))
 
 extern Benchmark* Benchmark_BM_UFlat;
+extern Benchmark* Benchmark_BM_UIOVec;
 extern Benchmark* Benchmark_BM_UValidate;
 extern Benchmark* Benchmark_BM_ZFlat;
 
 void ResetBenchmarkTiming();
 void StartBenchmarkTiming();
 void StopBenchmarkTiming();
 void SetBenchmarkLabel(const string& str);
 void SetBenchmarkBytesProcessed(int64 bytes);
@@ -457,16 +478,17 @@ static void RunSpecifiedBenchmarks() {
 #endif
 #ifndef __OPTIMIZE__
   fprintf(stderr, "WARNING: Compiled without optimization, will be slow.\n");
 #endif
   fprintf(stderr, "Benchmark            Time(ns)    CPU(ns) Iterations\n");
   fprintf(stderr, "---------------------------------------------------\n");
 
   snappy::Benchmark_BM_UFlat->Run();
+  snappy::Benchmark_BM_UIOVec->Run();
   snappy::Benchmark_BM_UValidate->Run();
   snappy::Benchmark_BM_ZFlat->Run();
 
   fprintf(stderr, "\n");
 }
 
 #ifndef HAVE_GTEST
 
@@ -474,16 +496,17 @@ static inline int RUN_ALL_TESTS() {
   fprintf(stderr, "Running correctness tests.\n");
   snappy::Test_CorruptedTest_VerifyCorrupted();
   snappy::Test_Snappy_SimpleTests();
   snappy::Test_Snappy_MaxBlowup();
   snappy::Test_Snappy_RandomData();
   snappy::Test_Snappy_FourByteOffset();
   snappy::Test_SnappyCorruption_TruncatedVarint();
   snappy::Test_SnappyCorruption_UnterminatedVarint();
+  snappy::Test_SnappyCorruption_OverflowingVarint();
   snappy::Test_Snappy_ReadPastEndOfBuffer();
   snappy::Test_Snappy_FindMatchLength();
   snappy::Test_Snappy_FindMatchLengthRandom();
   fprintf(stderr, "All tests passed.\n");
 
   return 0;
 }
 
@@ -491,15 +514,86 @@ static inline int RUN_ALL_TESTS() {
 
 // For main().
 namespace snappy {
 
 static void CompressFile(const char* fname);
 static void UncompressFile(const char* fname);
 static void MeasureFile(const char* fname);
 
+// Logging.
+
+#define LOG(level) LogMessage()
+#define VLOG(level) true ? (void)0 : \
+    snappy::LogMessageVoidify() & snappy::LogMessage()
+
+class LogMessage {
+ public:
+  LogMessage() { }
+  ~LogMessage() {
+    cerr << endl;
+  }
+
+  LogMessage& operator<<(const std::string& msg) {
+    cerr << msg;
+    return *this;
+  }
+  LogMessage& operator<<(int x) {
+    cerr << x;
+    return *this;
+  }
+};
+
+// Asserts, both versions activated in debug mode only,
+// and ones that are always active.
+
+#define CRASH_UNLESS(condition) \
+    PREDICT_TRUE(condition) ? (void)0 : \
+    snappy::LogMessageVoidify() & snappy::LogMessageCrash()
+
+#ifdef _MSC_VER
+// ~LogMessageCrash calls abort() and therefore never exits. This is by design
+// so temporarily disable warning C4722.
+#pragma warning(push)
+#pragma warning(disable:4722)
+#endif
+
+class LogMessageCrash : public LogMessage {
+ public:
+  LogMessageCrash() { }
+  ~LogMessageCrash() {
+    cerr << endl;
+    abort();
+  }
+};
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros.  This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+
+class LogMessageVoidify {
+ public:
+  LogMessageVoidify() { }
+  // This has to be an operator with a precedence lower than << but
+  // higher than ?:
+  void operator&(const LogMessage&) { }
+};
+
+#define CHECK(cond) CRASH_UNLESS(cond)
+#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
+#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
+#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
+#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
+#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
+#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
+#define CHECK_OK(cond) (cond).CheckSuccess()
+
 }  // namespace
 
 using snappy::CompressFile;
 using snappy::UncompressFile;
 using snappy::MeasureFile;
 
-#endif  // UTIL_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
+#endif  // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
--- a/other-licenses/snappy/src/snappy.cc
+++ b/other-licenses/snappy/src/snappy.cc
@@ -34,16 +34,24 @@
 
 #include <algorithm>
 #include <string>
 #include <vector>
 
 
 namespace snappy {
 
+using internal::COPY_1_BYTE_OFFSET;
+using internal::COPY_2_BYTE_OFFSET;
+using internal::COPY_4_BYTE_OFFSET;
+using internal::LITERAL;
+using internal::char_table;
+using internal::kMaximumTagLength;
+using internal::wordmask;
+
 // Any hash function will produce a valid compressed bitstream, but a good
 // hash function reduces the number of collisions and thus yields better
 // compression for compressible input, and more speed for incompressible
 // input. Of course, it doesn't hurt if the hash function is reasonably fast
 // either, as it gets called a lot.
 static inline uint32 HashBytes(uint32 bytes, int shift) {
   uint32 kMul = 0x1e35a7bd;
   return (bytes * kMul) >> shift;
@@ -71,36 +79,29 @@ size_t MaxCompressedLength(size_t source
   // enough, it will take 5 bytes to encode the copy op.  Therefore the
   // worst case here is a one-byte literal followed by a five-byte copy.
   // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
   //
   // This last factor dominates the blowup, so the final estimate is:
   return 32 + source_len + source_len/6;
 }
 
-enum {
-  LITERAL = 0,
-  COPY_1_BYTE_OFFSET = 1,  // 3 bit length + 3 bits of offset in opcode
-  COPY_2_BYTE_OFFSET = 2,
-  COPY_4_BYTE_OFFSET = 3
-};
-
 // Copy "len" bytes from "src" to "op", one byte at a time.  Used for
 // handling COPY operations where the input and output regions may
 // overlap.  For example, suppose:
 //    src    == "ab"
 //    op     == src + 2
 //    len    == 20
 // After IncrementalCopy(src, op, len), the result will have
 // eleven copies of "ab"
 //    ababababababababababab
 // Note that this does not match the semantics of either memcpy()
 // or memmove().
-static inline void IncrementalCopy(const char* src, char* op, int len) {
-  DCHECK_GT(len, 0);
+static inline void IncrementalCopy(const char* src, char* op, ssize_t len) {
+  assert(len > 0);
   do {
     *op++ = *src++;
   } while (--len > 0);
 }
 
 // Equivalent to IncrementalCopy except that it can write up to ten extra
 // bytes after the end of the copy, and that it is faster.
 //
@@ -131,32 +132,32 @@ static inline void IncrementalCopy(const
 // op - src == 1 and len == 1; the last copy will read from byte positions
 // [0..7] and write to [4..11], whereas it was only supposed to write to
 // position 1. Thus, ten excess bytes.
 
 namespace {
 
 const int kMaxIncrementCopyOverflow = 10;
 
-}  // namespace
-
-static inline void IncrementalCopyFastPath(const char* src, char* op, int len) {
-  while (op - src < 8) {
-    UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
+inline void IncrementalCopyFastPath(const char* src, char* op, ssize_t len) {
+  while (PREDICT_FALSE(op - src < 8)) {
+    UnalignedCopy64(src, op);
     len -= op - src;
     op += op - src;
   }
   while (len > 0) {
-    UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
+    UnalignedCopy64(src, op);
     src += 8;
     op += 8;
     len -= 8;
   }
 }
 
+}  // namespace
+
 static inline char* EmitLiteral(char* op,
                                 const char* literal,
                                 int len,
                                 bool allow_fast_path) {
   int n = len - 1;      // Zero-length literals are disallowed
   if (n < 60) {
     // Fits in tag byte
     *op++ = LITERAL | (n << 2);
@@ -167,18 +168,18 @@ static inline char* EmitLiteral(char* op
     // main loop, since we have a bit to go on for both sides:
     //
     //   - The input will always have kInputMarginBytes = 15 extra
     //     available bytes, as long as we're in the main loop, and
     //     if not, allow_fast_path = false.
     //   - The output will always have 32 spare bytes (see
     //     MaxCompressedLength).
     if (allow_fast_path && len <= 16) {
-      UNALIGNED_STORE64(op, UNALIGNED_LOAD64(literal));
-      UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(literal + 8));
+      UnalignedCopy64(literal, op);
+      UnalignedCopy64(literal + 8, op + 8);
       return op + len;
     }
   } else {
     // Encode in upcoming bytes
     char* base = op;
     int count = 0;
     op++;
     while (n > 0) {
@@ -190,36 +191,36 @@ static inline char* EmitLiteral(char* op
     assert(count <= 4);
     *base = LITERAL | ((59+count) << 2);
   }
   memcpy(op, literal, len);
   return op + len;
 }
 
 static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) {
-  DCHECK_LE(len, 64);
-  DCHECK_GE(len, 4);
-  DCHECK_LT(offset, 65536);
+  assert(len <= 64);
+  assert(len >= 4);
+  assert(offset < 65536);
 
   if ((len < 12) && (offset < 2048)) {
     size_t len_minus_4 = len - 4;
     assert(len_minus_4 < 8);            // Must fit in 3 bits
-    *op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
+    *op++ = COPY_1_BYTE_OFFSET + ((len_minus_4) << 2) + ((offset >> 8) << 5);
     *op++ = offset & 0xff;
   } else {
-    *op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2);
+    *op++ = COPY_2_BYTE_OFFSET + ((len-1) << 2);
     LittleEndian::Store16(op, offset);
     op += 2;
   }
   return op;
 }
 
 static inline char* EmitCopy(char* op, size_t offset, int len) {
   // Emit 64 byte copies but make sure to keep at least four bytes reserved
-  while (len >= 68) {
+  while (PREDICT_FALSE(len >= 68)) {
     op = EmitCopyLessThan64(op, offset, 64);
     len -= 64;
   }
 
   // Emit an extra 60 byte copy if have too much data to fit in one copy
   if (len > 64) {
     op = EmitCopyLessThan64(op, offset, 60);
     len -= 60;
@@ -248,18 +249,16 @@ uint16* WorkingMemory::GetHashTable(size
   // fill the table, incurring O(hash table size) overhead for
   // compression, and if the input is short, we won't need that
   // many hash table entries anyway.
   assert(kMaxHashTableSize >= 256);
   size_t htsize = 256;
   while (htsize < kMaxHashTableSize && htsize < input_size) {
     htsize <<= 1;
   }
-  CHECK_EQ(0, htsize & (htsize - 1)) << ": must be power of two";
-  CHECK_LE(htsize, kMaxHashTableSize) << ": hash table too large";
 
   uint16* table;
   if (htsize <= ARRAYSIZE(small_table_)) {
     table = small_table_;
   } else {
     if (large_table_ == NULL) {
       large_table_ = new uint16[kMaxHashTableSize];
     }
@@ -267,26 +266,59 @@ uint16* WorkingMemory::GetHashTable(size
   }
 
   *table_size = htsize;
   memset(table, 0, htsize * sizeof(*table));
   return table;
 }
 }  // end namespace internal
 
-// For 0 <= offset <= 4, GetUint32AtOffset(UNALIGNED_LOAD64(p), offset) will
+// For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will
 // equal UNALIGNED_LOAD32(p + offset).  Motivation: On x86-64 hardware we have
 // empirically found that overlapping loads such as
 //  UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
 // are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
+//
+// We have different versions for 64- and 32-bit; ideally we would avoid the
+// two functions and just inline the UNALIGNED_LOAD64 call into
+// GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
+// enough to avoid loading the value multiple times then. For 64-bit, the load
+// is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
+// done at GetUint32AtOffset() time.
+
+#ifdef ARCH_K8
+
+typedef uint64 EightBytesReference;
+
+static inline EightBytesReference GetEightBytesAt(const char* ptr) {
+  return UNALIGNED_LOAD64(ptr);
+}
+
 static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
-  DCHECK(0 <= offset && offset <= 4) << offset;
+  assert(offset >= 0);
+  assert(offset <= 4);
   return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
 }
 
+#else
+
+typedef const char* EightBytesReference;
+
+static inline EightBytesReference GetEightBytesAt(const char* ptr) {
+  return ptr;
+}
+
+static inline uint32 GetUint32AtOffset(const char* v, int offset) {
+  assert(offset >= 0);
+  assert(offset <= 4);
+  return UNALIGNED_LOAD32(v + offset);
+}
+
+#endif
+
 // Flat array compression that does not emit the "uncompressed length"
 // prefix. Compresses "input" string to the "*op" buffer.
 //
 // REQUIRES: "input" is at most "kBlockSize" bytes long.
 // REQUIRES: "op" points to an array of memory that is at least
 // "MaxCompressedLength(input.size())" in size.
 // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
 // REQUIRES: "table_size" is a power of two
@@ -296,113 +328,114 @@ static inline uint32 GetUint32AtOffset(u
 namespace internal {
 char* CompressFragment(const char* input,
                        size_t input_size,
                        char* op,
                        uint16* table,
                        const int table_size) {
   // "ip" is the input pointer, and "op" is the output pointer.
   const char* ip = input;
-  CHECK_LE(input_size, kBlockSize);
-  CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
+  assert(input_size <= kBlockSize);
+  assert((table_size & (table_size - 1)) == 0); // table must be power of two
   const int shift = 32 - Bits::Log2Floor(table_size);
-  DCHECK_EQ(static_cast<int>(kuint32max >> shift), table_size - 1);
+  assert(static_cast<int>(kuint32max >> shift) == table_size - 1);
   const char* ip_end = input + input_size;
   const char* base_ip = ip;
   // Bytes in [next_emit, ip) will be emitted as literal bytes.  Or
   // [next_emit, ip_end) after the main loop.
   const char* next_emit = ip;
 
   const size_t kInputMarginBytes = 15;
   if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
     const char* ip_limit = input + input_size - kInputMarginBytes;
 
     for (uint32 next_hash = Hash(++ip, shift); ; ) {
-      DCHECK_LT(next_emit, ip);
+      assert(next_emit < ip);
       // The body of this loop calls EmitLiteral once and then EmitCopy one or
       // more times.  (The exception is that when we're close to exhausting
       // the input we goto emit_remainder.)
       //
       // In the first iteration of this loop we're just starting, so
       // there's nothing to copy, so calling EmitLiteral once is
       // necessary.  And we only start a new iteration when the
       // current iteration has determined that a call to EmitLiteral will
       // precede the next call to EmitCopy (if any).
       //
       // Step 1: Scan forward in the input looking for a 4-byte-long match.
       // If we get close to exhausting the input then goto emit_remainder.
       //
       // Heuristic match skipping: If 32 bytes are scanned with no matches
       // found, start looking only at every other byte. If 32 more bytes are
-      // scanned, look at every third byte, etc.. When a match is found,
-      // immediately go back to looking at every byte. This is a small loss
-      // (~5% performance, ~0.1% density) for compressible data due to more
+      // scanned (or skipped), look at every third byte, etc.. When a match is
+      // found, immediately go back to looking at every byte. This is a small
+      // loss (~5% performance, ~0.1% density) for compressible data due to more
       // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
       // win since the compressor quickly "realizes" the data is incompressible
       // and doesn't bother looking for matches everywhere.
       //
       // The "skip" variable keeps track of how many bytes there are since the
       // last match; dividing it by 32 (ie. right-shifting by five) gives the
       // number of bytes to move ahead for each iteration.
       uint32 skip = 32;
 
       const char* next_ip = ip;
       const char* candidate;
       do {
         ip = next_ip;
         uint32 hash = next_hash;
-        DCHECK_EQ(hash, Hash(ip, shift));
-        uint32 bytes_between_hash_lookups = skip++ >> 5;
+        assert(hash == Hash(ip, shift));
+        uint32 bytes_between_hash_lookups = skip >> 5;
+        skip += bytes_between_hash_lookups;
         next_ip = ip + bytes_between_hash_lookups;
         if (PREDICT_FALSE(next_ip > ip_limit)) {
           goto emit_remainder;
         }
         next_hash = Hash(next_ip, shift);
         candidate = base_ip + table[hash];
-        DCHECK_GE(candidate, base_ip);
-        DCHECK_LT(candidate, ip);
+        assert(candidate >= base_ip);
+        assert(candidate < ip);
 
         table[hash] = ip - base_ip;
       } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
                             UNALIGNED_LOAD32(candidate)));
 
       // Step 2: A 4-byte match has been found.  We'll later see if more
       // than 4 bytes match.  But, prior to the match, input
       // bytes [next_emit, ip) are unmatched.  Emit them as "literal bytes."
-      DCHECK_LE(next_emit + 16, ip_end);
+      assert(next_emit + 16 <= ip_end);
       op = EmitLiteral(op, next_emit, ip - next_emit, true);
 
       // Step 3: Call EmitCopy, and then see if another EmitCopy could
       // be our next move.  Repeat until we find no match for the
       // input immediately after what was consumed by the last EmitCopy call.
       //
       // If we exit this loop normally then we need to call EmitLiteral next,
       // though we don't yet know how big the literal will be.  We handle that
       // by proceeding to the next iteration of the main loop.  We also can exit
       // this loop via goto if we get close to exhausting the input.
-      uint64 input_bytes = 0;
+      EightBytesReference input_bytes;
       uint32 candidate_bytes = 0;
 
       do {
         // We have a 4-byte match at ip, and no need to emit any
         // "literal bytes" prior to ip.
         const char* base = ip;
         int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
         ip += matched;
         size_t offset = base - candidate;
-        DCHECK_EQ(0, memcmp(base, candidate, matched));
+        assert(0 == memcmp(base, candidate, matched));
         op = EmitCopy(op, offset, matched);
         // We could immediately start working at ip now, but to improve
         // compression we first update table[Hash(ip - 1, ...)].
         const char* insert_tail = ip - 1;
         next_emit = ip;
         if (PREDICT_FALSE(ip >= ip_limit)) {
           goto emit_remainder;
         }
-        input_bytes = UNALIGNED_LOAD64(insert_tail);
+        input_bytes = GetEightBytesAt(insert_tail);
         uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
         table[prev_hash] = ip - base_ip - 1;
         uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
         candidate = base_ip + table[cur_hash];
         candidate_bytes = UNALIGNED_LOAD32(candidate);
         table[cur_hash] = ip - base_ip;
       } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
 
@@ -433,190 +466,49 @@ char* CompressFragment(const char* input
 //
 //   // Called after decompression
 //   bool CheckLength() const;
 //
 //   // Called repeatedly during decompression
 //   bool Append(const char* ip, size_t length);
 //   bool AppendFromSelf(uint32 offset, size_t length);
 //
-//   // The difference between TryFastAppend and Append is that TryFastAppend
-//   // is allowed to read up to <available> bytes from the input buffer,
-//   // whereas Append is allowed to read <length>.
+//   // The rules for how TryFastAppend differs from Append are somewhat
+//   // convoluted:
 //   //
-//   // Also, TryFastAppend is allowed to return false, declining the append,
-//   // without it being a fatal error -- just "return false" would be
-//   // a perfectly legal implementation of TryFastAppend. The intention
-//   // is for TryFastAppend to allow a fast path in the common case of
-//   // a small append.
+//   //  - TryFastAppend is allowed to decline (return false) at any
+//   //    time, for any reason -- just "return false" would be
+//   //    a perfectly legal implementation of TryFastAppend.
+//   //    The intention is for TryFastAppend to allow a fast path
+//   //    in the common case of a small append.
+//   //  - TryFastAppend is allowed to read up to <available> bytes
+//   //    from the input buffer, whereas Append is allowed to read
+//   //    <length>. However, if it returns true, it must leave
+//   //    at least five (kMaximumTagLength) bytes in the input buffer
+//   //    afterwards, so that there is always enough space to read the
+//   //    next tag without checking for a refill.
+//   //  - TryFastAppend must always return decline (return false)
+//   //    if <length> is 61 or more, as in this case the literal length is not
+//   //    decoded fully. In practice, this should not be a big problem,
+//   //    as it is unlikely that one would implement a fast path accepting
+//   //    this much data.
 //   //
-//   // NOTE(user): TryFastAppend must always return decline (return false)
-//   // if <length> is 61 or more, as in this case the literal length is not
-//   // decoded fully. In practice, this should not be a big problem,
-//   // as it is unlikely that one would implement a fast path accepting
-//   // this much data.
 //   bool TryFastAppend(const char* ip, size_t available, size_t length);
 // };
 
-// -----------------------------------------------------------------------
-// Lookup table for decompression code.  Generated by ComputeTable() below.
-// -----------------------------------------------------------------------
-
-// Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
-static const uint32 wordmask[] = {
-  0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
-};
-
-// Data stored per entry in lookup table:
-//      Range   Bits-used       Description
-//      ------------------------------------
-//      1..64   0..7            Literal/copy length encoded in opcode byte
-//      0..7    8..10           Copy offset encoded in opcode byte / 256
-//      0..4    11..13          Extra bytes after opcode
-//
-// We use eight bits for the length even though 7 would have sufficed
-// because of efficiency reasons:
-//      (1) Extracting a byte is faster than a bit-field
-//      (2) It properly aligns copy offset so we do not need a <<8
-static const uint16 char_table[256] = {
-  0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
-  0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
-  0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
-  0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
-  0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
-  0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
-  0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
-  0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
-  0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
-  0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
-  0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
-  0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
-  0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
-  0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
-  0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
-  0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
-  0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
-  0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
-  0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
-  0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
-  0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
-  0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
-  0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
-  0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
-  0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
-  0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
-  0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
-  0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
-  0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
-  0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
-  0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
-  0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
-};
-
-// In debug mode, allow optional computation of the table at startup.
-// Also, check that the decompression table is correct.
-#ifndef NDEBUG
-DEFINE_bool(snappy_dump_decompression_table, false,
-            "If true, we print the decompression table at startup.");
-
-static uint16 MakeEntry(unsigned int extra,
-                        unsigned int len,
-                        unsigned int copy_offset) {
-  // Check that all of the fields fit within the allocated space
-  DCHECK_EQ(extra,       extra & 0x7);          // At most 3 bits
-  DCHECK_EQ(copy_offset, copy_offset & 0x7);    // At most 3 bits
-  DCHECK_EQ(len,         len & 0x7f);           // At most 7 bits
-  return len | (copy_offset << 8) | (extra << 11);
-}
-
-static void ComputeTable() {
-  uint16 dst[256];
-
-  // Place invalid entries in all places to detect missing initialization
-  int assigned = 0;
-  for (int i = 0; i < 256; i++) {
-    dst[i] = 0xffff;
-  }
-
-  // Small LITERAL entries.  We store (len-1) in the top 6 bits.
-  for (unsigned int len = 1; len <= 60; len++) {
-    dst[LITERAL | ((len-1) << 2)] = MakeEntry(0, len, 0);
-    assigned++;
-  }
-
-  // Large LITERAL entries.  We use 60..63 in the high 6 bits to
-  // encode the number of bytes of length info that follow the opcode.
-  for (unsigned int extra_bytes = 1; extra_bytes <= 4; extra_bytes++) {
-    // We set the length field in the lookup table to 1 because extra
-    // bytes encode len-1.
-    dst[LITERAL | ((extra_bytes+59) << 2)] = MakeEntry(extra_bytes, 1, 0);
-    assigned++;
-  }
-
-  // COPY_1_BYTE_OFFSET.
-  //
-  // The tag byte in the compressed data stores len-4 in 3 bits, and
-  // offset/256 in 5 bits.  offset%256 is stored in the next byte.
-  //
-  // This format is used for length in range [4..11] and offset in
-  // range [0..2047]
-  for (unsigned int len = 4; len < 12; len++) {
-    for (unsigned int offset = 0; offset < 2048; offset += 256) {
-      dst[COPY_1_BYTE_OFFSET | ((len-4)<<2) | ((offset>>8)<<5)] =
-        MakeEntry(1, len, offset>>8);
-      assigned++;
-    }
-  }
-
-  // COPY_2_BYTE_OFFSET.
-  // Tag contains len-1 in top 6 bits, and offset in next two bytes.
-  for (unsigned int len = 1; len <= 64; len++) {
-    dst[COPY_2_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(2, len, 0);
-    assigned++;
-  }
-
-  // COPY_4_BYTE_OFFSET.
-  // Tag contents len-1 in top 6 bits, and offset in next four bytes.
-  for (unsigned int len = 1; len <= 64; len++) {
-    dst[COPY_4_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(4, len, 0);
-    assigned++;
-  }
-
-  // Check that each entry was initialized exactly once.
-  CHECK_EQ(assigned, 256);
-  for (int i = 0; i < 256; i++) {
-    CHECK_NE(dst[i], 0xffff);
-  }
-
-  if (FLAGS_snappy_dump_decompression_table) {
-    printf("static const uint16 char_table[256] = {\n  ");
-    for (int i = 0; i < 256; i++) {
-      printf("0x%04x%s",
-             dst[i],
-             ((i == 255) ? "\n" : (((i%8) == 7) ? ",\n  " : ", ")));
-    }
-    printf("};\n");
-  }
-
-  // Check that computed table matched recorded table
-  for (int i = 0; i < 256; i++) {
-    CHECK_EQ(dst[i], char_table[i]);
-  }
-}
-#endif /* !NDEBUG */
 
 // Helper class for decompression
 class SnappyDecompressor {
  private:
   Source*       reader_;         // Underlying source of bytes to decompress
   const char*   ip_;             // Points to next buffered byte
   const char*   ip_limit_;       // Points just past buffered bytes
   uint32        peeked_;         // Bytes peeked from reader (need to skip)
   bool          eof_;            // Hit end of input without an error?
-  char          scratch_[5];     // Temporary buffer for PeekFast() boundaries
+  char          scratch_[kMaximumTagLength];  // See RefillTag().
 
   // Ensure that all of the tag metadata for the next tag is available
   // in [ip_..ip_limit_-1].  Also ensures that [ip,ip+4] is readable even
   // if (ip_limit_ - ip_ < 5).
   //
   // Returns true on success, false on error or end of input.
   bool RefillTag();
 
@@ -638,28 +530,30 @@ class SnappyDecompressor {
   bool eof() const {
     return eof_;
   }
 
   // Read the uncompressed length stored at the start of the compressed data.
   // On succcess, stores the length in *result and returns true.
   // On failure, returns false.
   bool ReadUncompressedLength(uint32* result) {
-    DCHECK(ip_ == NULL);       // Must not have read anything yet
+    assert(ip_ == NULL);       // Must not have read anything yet
     // Length is encoded in 1..5 bytes
     *result = 0;
     uint32 shift = 0;
     while (true) {
       if (shift >= 32) return false;
       size_t n;
       const char* ip = reader_->Peek(&n);
       if (n == 0) return false;
       const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
       reader_->Skip(1);
-      *result |= static_cast<uint32>(c & 0x7f) << shift;
+      uint32 val = c & 0x7f;
+      if (((val << shift) >> shift) != val) return false;
+      *result |= val << shift;
       if (c < 128) {
         break;
       }
       shift += 7;
     }
     return true;
   }
 
@@ -669,32 +563,34 @@ class SnappyDecompressor {
   void DecompressAllTags(Writer* writer) {
     const char* ip = ip_;
 
     // We could have put this refill fragment only at the beginning of the loop.
     // However, duplicating it at the end of each branch gives the compiler more
     // scope to optimize the <ip_limit_ - ip> expression based on the local
     // context, which overall increases speed.
     #define MAYBE_REFILL() \
-        if (ip_limit_ - ip < 5) { \
+        if (ip_limit_ - ip < kMaximumTagLength) { \
           ip_ = ip; \
           if (!RefillTag()) return; \
           ip = ip_; \
         }
 
     MAYBE_REFILL();
     for ( ;; ) {
       const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
 
       if ((c & 0x3) == LITERAL) {
         size_t literal_length = (c >> 2) + 1u;
         if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
-          DCHECK_LT(literal_length, 61);
+          assert(literal_length < 61);
           ip += literal_length;
-          MAYBE_REFILL();
+          // NOTE(user): There is no MAYBE_REFILL() here, as TryFastAppend()
+          // will not return true unless there's already at least five spare
+          // bytes in addition to the literal.
           continue;
         }
         if (PREDICT_FALSE(literal_length >= 61)) {
           // Long literal.
           const size_t literal_length_length = literal_length - 60;
           literal_length =
               (LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1;
           ip += literal_length_length;
@@ -749,21 +645,21 @@ bool SnappyDecompressor::RefillTag() {
     if (n == 0) {
       eof_ = true;
       return false;
     }
     ip_limit_ = ip + n;
   }
 
   // Read the tag character
-  DCHECK_LT(ip, ip_limit_);
+  assert(ip < ip_limit_);
   const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
   const uint32 entry = char_table[c];
   const uint32 needed = (entry >> 11) + 1;  // +1 byte for 'c'
-  DCHECK_LE(needed, sizeof(scratch_));
+  assert(needed <= sizeof(scratch_));
 
   // Read more bytes from reader if needed
   uint32 nbuf = ip_limit_ - ip;
   if (nbuf < needed) {
     // Stitch together bytes from ip and reader to form the word
     // contents.  We store the needed bytes in "scratch_".  They
     // will be consumed immediately by the caller since we do not
     // read more than we need.
@@ -774,52 +670,53 @@ bool SnappyDecompressor::RefillTag() {
       size_t length;
       const char* src = reader_->Peek(&length);
       if (length == 0) return false;
       uint32 to_add = min<uint32>(needed - nbuf, length);
       memcpy(scratch_ + nbuf, src, to_add);
       nbuf += to_add;
       reader_->Skip(to_add);
     }
-    DCHECK_EQ(nbuf, needed);
+    assert(nbuf == needed);
     ip_ = scratch_;
     ip_limit_ = scratch_ + needed;
-  } else if (nbuf < 5) {
+  } else if (nbuf < kMaximumTagLength) {
     // Have enough bytes, but move into scratch_ so that we do not
     // read past end of input
     memmove(scratch_, ip, nbuf);
     reader_->Skip(peeked_);  // All peeked bytes are used up
     peeked_ = 0;
     ip_ = scratch_;
     ip_limit_ = scratch_ + nbuf;
   } else {
     // Pass pointer to buffer returned by reader_.
     ip_ = ip;
   }
   return true;
 }
 
 template <typename Writer>
-static bool InternalUncompress(Source* r,
-                               Writer* writer,
-                               uint32 max_len) {
+static bool InternalUncompress(Source* r, Writer* writer) {
   // Read the uncompressed length from the front of the compressed input
   SnappyDecompressor decompressor(r);
   uint32 uncompressed_len = 0;
   if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
-  // Protect against possible DoS attack
-  if (static_cast<uint64>(uncompressed_len) > max_len) {
-    return false;
-  }
+  return InternalUncompressAllTags(&decompressor, writer, uncompressed_len);
+}
 
+template <typename Writer>
+static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
+                                      Writer* writer,
+                                      uint32 uncompressed_len) {
   writer->SetExpectedLength(uncompressed_len);
 
   // Process the entire input
-  decompressor.DecompressAllTags(writer);
-  return (decompressor.eof() && writer->CheckLength());
+  decompressor->DecompressAllTags(writer);
+  writer->Flush();
+  return (decompressor->eof() && writer->CheckLength());
 }
 
 bool GetUncompressedLength(Source* source, uint32* result) {
   SnappyDecompressor decompressor(source);
   return decompressor.ReadUncompressedLength(result);
 }
 
 size_t Compress(Source* reader, Sink* writer) {
@@ -833,17 +730,17 @@ size_t Compress(Source* reader, Sink* wr
   internal::WorkingMemory wmem;
   char* scratch = NULL;
   char* scratch_output = NULL;
 
   while (N > 0) {
     // Get next block to compress (without copying if possible)
     size_t fragment_size;
     const char* fragment = reader->Peek(&fragment_size);
-    DCHECK_NE(fragment_size, 0) << ": premature end of input";
+    assert(fragment_size != 0);  // premature end of input
     const size_t num_to_read = min(N, kBlockSize);
     size_t bytes_read = fragment_size;
 
     size_t pending_advance = 0;
     if (bytes_read >= num_to_read) {
       // Buffer returned by reader is large enough
       pending_advance = num_to_read;
       fragment_size = num_to_read;
@@ -860,21 +757,21 @@ size_t Compress(Source* reader, Sink* wr
 
       while (bytes_read < num_to_read) {
         fragment = reader->Peek(&fragment_size);
         size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
         memcpy(scratch + bytes_read, fragment, n);
         bytes_read += n;
         reader->Skip(n);
       }
-      DCHECK_EQ(bytes_read, num_to_read);
+      assert(bytes_read == num_to_read);
       fragment = scratch;
       fragment_size = num_to_read;
     }
-    DCHECK_EQ(fragment_size, num_to_read);
+    assert(fragment_size == num_to_read);
 
     // Get encoding table for compression
     int table_size;
     uint16* table = wmem.GetHashTable(num_to_read, &table_size);
 
     // Compress input_fragment and append to dest
     const int max_output = MaxCompressedLength(num_to_read);
 
@@ -899,32 +796,211 @@ size_t Compress(Source* reader, Sink* wr
 
   delete[] scratch;
   delete[] scratch_output;
 
   return written;
 }
 
 // -----------------------------------------------------------------------
+// IOVec interfaces
+// -----------------------------------------------------------------------
+
+// A type that writes to an iovec.
+// Note that this is not a "ByteSink", but a type that matches the
+// Writer template argument to SnappyDecompressor::DecompressAllTags().
+class SnappyIOVecWriter {
+ private:
+  const struct iovec* output_iov_;
+  const size_t output_iov_count_;
+
+  // We are currently writing into output_iov_[curr_iov_index_].
+  size_t curr_iov_index_;
+
+  // Bytes written to output_iov_[curr_iov_index_] so far.
+  size_t curr_iov_written_;
+
+  // Total bytes decompressed into output_iov_ so far.
+  size_t total_written_;
+
+  // Maximum number of bytes that will be decompressed into output_iov_.
+  size_t output_limit_;
+
+  inline char* GetIOVecPointer(size_t index, size_t offset) {
+    return reinterpret_cast<char*>(output_iov_[index].iov_base) +
+        offset;
+  }
+
+ public:
+  // Does not take ownership of iov. iov must be valid during the
+  // entire lifetime of the SnappyIOVecWriter.
+  inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count)
+      : output_iov_(iov),
+        output_iov_count_(iov_count),
+        curr_iov_index_(0),
+        curr_iov_written_(0),
+        total_written_(0),
+        output_limit_(-1) {
+  }
+
+  inline void SetExpectedLength(size_t len) {
+    output_limit_ = len;
+  }
+
+  inline bool CheckLength() const {
+    return total_written_ == output_limit_;
+  }
+
+  inline bool Append(const char* ip, size_t len) {
+    if (total_written_ + len > output_limit_) {
+      return false;
+    }
+
+    while (len > 0) {
+      assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len);
+      if (curr_iov_written_ >= output_iov_[curr_iov_index_].iov_len) {
+        // This iovec is full. Go to the next one.
+        if (curr_iov_index_ + 1 >= output_iov_count_) {
+          return false;
+        }
+        curr_iov_written_ = 0;
+        ++curr_iov_index_;
+      }
+
+      const size_t to_write = std::min(
+          len, output_iov_[curr_iov_index_].iov_len - curr_iov_written_);
+      memcpy(GetIOVecPointer(curr_iov_index_, curr_iov_written_),
+             ip,
+             to_write);
+      curr_iov_written_ += to_write;
+      total_written_ += to_write;
+      ip += to_write;
+      len -= to_write;
+    }
+
+    return true;
+  }
+
+  inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
+    const size_t space_left = output_limit_ - total_written_;
+    if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 &&
+        output_iov_[curr_iov_index_].iov_len - curr_iov_written_ >= 16) {
+      // Fast path, used for the majority (about 95%) of invocations.
+      char* ptr = GetIOVecPointer(curr_iov_index_, curr_iov_written_);
+      UnalignedCopy64(ip, ptr);
+      UnalignedCopy64(ip + 8, ptr + 8);
+      curr_iov_written_ += len;
+      total_written_ += len;
+      return true;
+    }
+
+    return false;
+  }
+
+  inline bool AppendFromSelf(size_t offset, size_t len) {
+    if (offset > total_written_ || offset == 0) {
+      return false;
+    }
+    const size_t space_left = output_limit_ - total_written_;
+    if (len > space_left) {
+      return false;
+    }
+
+    // Locate the iovec from which we need to start the copy.
+    size_t from_iov_index = curr_iov_index_;
+    size_t from_iov_offset = curr_iov_written_;
+    while (offset > 0) {
+      if (from_iov_offset >= offset) {
+        from_iov_offset -= offset;
+        break;
+      }
+
+      offset -= from_iov_offset;
+      assert(from_iov_index > 0);
+      --from_iov_index;
+      from_iov_offset = output_iov_[from_iov_index].iov_len;
+    }
+
+    // Copy <len> bytes starting from the iovec pointed to by from_iov_index to
+    // the current iovec.
+    while (len > 0) {
+      assert(from_iov_index <= curr_iov_index_);
+      if (from_iov_index != curr_iov_index_) {
+        const size_t to_copy = std::min(
+            output_iov_[from_iov_index].iov_len - from_iov_offset,
+            len);
+        Append(GetIOVecPointer(from_iov_index, from_iov_offset), to_copy);
+        len -= to_copy;
+        if (len > 0) {
+          ++from_iov_index;
+          from_iov_offset = 0;
+        }
+      } else {
+        assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len);
+        size_t to_copy = std::min(output_iov_[curr_iov_index_].iov_len -
+                                      curr_iov_written_,
+                                  len);
+        if (to_copy == 0) {
+          // This iovec is full. Go to the next one.
+          if (curr_iov_index_ + 1 >= output_iov_count_) {
+            return false;
+          }
+          ++curr_iov_index_;
+          curr_iov_written_ = 0;
+          continue;
+        }
+        if (to_copy > len) {
+          to_copy = len;
+        }
+        IncrementalCopy(GetIOVecPointer(from_iov_index, from_iov_offset),
+                        GetIOVecPointer(curr_iov_index_, curr_iov_written_),
+                        to_copy);
+        curr_iov_written_ += to_copy;
+        from_iov_offset += to_copy;
+        total_written_ += to_copy;
+        len -= to_copy;
+      }
+    }
+
+    return true;
+  }
+
+  inline void Flush() {}
+};
+
+bool RawUncompressToIOVec(const char* compressed, size_t compressed_length,
+                          const struct iovec* iov, size_t iov_cnt) {
+  ByteArraySource reader(compressed, compressed_length);
+  return RawUncompressToIOVec(&reader, iov, iov_cnt);
+}
+
+bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov,
+                          size_t iov_cnt) {
+  SnappyIOVecWriter output(iov, iov_cnt);
+  return InternalUncompress(compressed, &output);
+}
+
+// -----------------------------------------------------------------------
 // Flat array interfaces
 // -----------------------------------------------------------------------
 
 // A type that writes to a flat array.
 // Note that this is not a "ByteSink", but a type that matches the
 // Writer template argument to SnappyDecompressor::DecompressAllTags().
 class SnappyArrayWriter {
  private:
   char* base_;
   char* op_;
   char* op_limit_;
 
  public:
   inline explicit SnappyArrayWriter(char* dst)
       : base_(dst),
-        op_(dst) {
+        op_(dst),
+        op_limit_(dst) {
   }
 
   inline void SetExpectedLength(size_t len) {
     op_limit_ = op_ + len;
   }
 
   inline bool CheckLength() const {
     return op_ == op_limit_;
@@ -939,111 +1015,131 @@ class SnappyArrayWriter {
     memcpy(op, ip, len);
     op_ = op + len;
     return true;
   }
 
   inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
     char* op = op_;
     const size_t space_left = op_limit_ - op;
-    if (len <= 16 && available >= 16 && space_left >= 16) {
+    if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
       // Fast path, used for the majority (about 95%) of invocations.
-      UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
-      UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
+      UnalignedCopy64(ip, op);
+      UnalignedCopy64(ip + 8, op + 8);
       op_ = op + len;
       return true;
     } else {
       return false;
     }
   }
 
   inline bool AppendFromSelf(size_t offset, size_t len) {
     char* op = op_;
     const size_t space_left = op_limit_ - op;
 
-    if (op - base_ <= offset - 1u) {  // -1u catches offset==0
+    // Check if we try to append from before the start of the buffer.
+    // Normally this would just be a check for "produced < offset",
+    // but "produced <= offset - 1u" is equivalent for every case
+    // except the one where offset==0, where the right side will wrap around
+    // to a very big number. This is convenient, as offset==0 is another
+    // invalid case that we also want to catch, so that we do not go
+    // into an infinite loop.
+    assert(op >= base_);
+    size_t produced = op - base_;
+    if (produced <= offset - 1u) {
       return false;
     }
     if (len <= 16 && offset >= 8 && space_left >= 16) {
       // Fast path, used for the majority (70-80%) of dynamic invocations.
-      UNALIGNED_STORE64(op, UNALIGNED_LOAD64(op - offset));
-      UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(op - offset + 8));
+      UnalignedCopy64(op - offset, op);
+      UnalignedCopy64(op - offset + 8, op + 8);
     } else {
       if (space_left >= len + kMaxIncrementCopyOverflow) {
         IncrementalCopyFastPath(op - offset, op, len);
       } else {
         if (space_left < len) {
           return false;
         }
         IncrementalCopy(op - offset, op, len);
       }
     }
 
     op_ = op + len;
     return true;
   }
+  inline size_t Produced() const {
+    return op_ - base_;
+  }
+  inline void Flush() {}
 };
 
 bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
   ByteArraySource reader(compressed, n);
   return RawUncompress(&reader, uncompressed);
 }
 
 bool RawUncompress(Source* compressed, char* uncompressed) {
   SnappyArrayWriter output(uncompressed);
-  return InternalUncompress(compressed, &output, kuint32max);
+  return InternalUncompress(compressed, &output);
 }
 
 bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
   size_t ulength;
   if (!GetUncompressedLength(compressed, n, &ulength)) {
     return false;
   }
-  // Protect against possible DoS attack
-  if ((static_cast<uint64>(ulength) + uncompressed->size()) >
-      uncompressed->max_size()) {
+  // On 32-bit builds: max_size() < kuint32max.  Check for that instead
+  // of crashing (e.g., consider externally specified compressed data).
+  if (ulength > uncompressed->max_size()) {
     return false;
   }
   STLStringResizeUninitialized(uncompressed, ulength);
   return RawUncompress(compressed, n, string_as_array(uncompressed));
 }
 
-
 // A Writer that drops everything on the floor and just does validation
 class SnappyDecompressionValidator {
  private:
   size_t expected_;
   size_t produced_;
 
  public:
-  inline SnappyDecompressionValidator() : produced_(0) { }
+  inline SnappyDecompressionValidator() : expected_(0), produced_(0) { }
   inline void SetExpectedLength(size_t len) {
     expected_ = len;
   }
   inline bool CheckLength() const {
     return expected_ == produced_;
   }
   inline bool Append(const char* ip, size_t len) {
     produced_ += len;
     return produced_ <= expected_;
   }
   inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
     return false;
   }
   inline bool AppendFromSelf(size_t offset, size_t len) {
-    if (produced_ <= offset - 1u) return false;  // -1u catches offset==0
+    // See SnappyArrayWriter::AppendFromSelf for an explanation of
+    // the "offset - 1u" trick.
+    if (produced_ <= offset - 1u) return false;
     produced_ += len;
     return produced_ <= expected_;
   }
+  inline void Flush() {}
 };
 
 bool IsValidCompressedBuffer(const char* compressed, size_t n) {
   ByteArraySource reader(compressed, n);
   SnappyDecompressionValidator writer;
-  return InternalUncompress(&reader, &writer, kuint32max);
+  return InternalUncompress(&reader, &writer);
+}
+
+bool IsValidCompressed(Source* compressed) {
+  SnappyDecompressionValidator writer;
+  return InternalUncompress(compressed, &writer);
 }
 
 void RawCompress(const char* input,
                  size_t input_length,
                  char* compressed,
                  size_t* compressed_length) {
   ByteArraySource reader(input, input_length);
   UncheckedByteArraySink writer(compressed);
@@ -1059,11 +1155,246 @@ size_t Compress(const char* input, size_
 
   size_t compressed_length;
   RawCompress(input, input_length, string_as_array(compressed),
               &compressed_length);
   compressed->resize(compressed_length);
   return compressed_length;
 }
 
+// -----------------------------------------------------------------------
+// Sink interface
+// -----------------------------------------------------------------------
+
+// A type that decompresses into a Sink. The template parameter
+// Allocator must export one method "char* Allocate(int size);", which
+// allocates a buffer of "size" and appends that to the destination.
+template <typename Allocator>
+class SnappyScatteredWriter {
+  Allocator allocator_;
+
+  // We need random access into the data generated so far.  Therefore
+  // we keep track of all of the generated data as an array of blocks.
+  // All of the blocks except the last have length kBlockSize.
+  vector<char*> blocks_;
+  size_t expected_;
+
+  // Total size of all fully generated blocks so far
+  size_t full_size_;
+
+  // Pointer into current output block
+  char* op_base_;       // Base of output block
+  char* op_ptr_;        // Pointer to next unfilled byte in block
+  char* op_limit_;      // Pointer just past block
+
+  inline size_t Size() const {
+    return full_size_ + (op_ptr_ - op_base_);
+  }
+
+  bool SlowAppend(const char* ip, size_t len);
+  bool SlowAppendFromSelf(size_t offset, size_t len);
+
+ public:
+  inline explicit SnappyScatteredWriter(const Allocator& allocator)
+      : allocator_(allocator),
+        full_size_(0),
+        op_base_(NULL),
+        op_ptr_(NULL),
+        op_limit_(NULL) {
+  }
+
+  inline void SetExpectedLength(size_t len) {
+    assert(blocks_.empty());
+    expected_ = len;
+  }
+
+  inline bool CheckLength() const {
+    return Size() == expected_;
+  }
+
+  // Return the number of bytes actually uncompressed so far
+  inline size_t Produced() const {
+    return Size();
+  }
+
+  inline bool Append(const char* ip, size_t len) {
+    size_t avail = op_limit_ - op_ptr_;
+    if (len <= avail) {
+      // Fast path
+      memcpy(op_ptr_, ip, len);
+      op_ptr_ += len;
+      return true;
+    } else {
+      return SlowAppend(ip, len);
+    }
+  }
+
+  inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
+    char* op = op_ptr_;
+    const int space_left = op_limit_ - op;
+    if (length <= 16 && available >= 16 + kMaximumTagLength &&
+        space_left >= 16) {
+      // Fast path, used for the majority (about 95%) of invocations.
+      UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
+      UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
+      op_ptr_ = op + length;
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  inline bool AppendFromSelf(size_t offset, size_t len) {
+    // See SnappyArrayWriter::AppendFromSelf for an explanation of
+    // the "offset - 1u" trick.
+    if (offset - 1u < op_ptr_ - op_base_) {
+      const size_t space_left = op_limit_ - op_ptr_;
+      if (space_left >= len + kMaxIncrementCopyOverflow) {
+        // Fast path: src and dst in current block.
+        IncrementalCopyFastPath(op_ptr_ - offset, op_ptr_, len);
+        op_ptr_ += len;
+        return true;
+      }
+    }
+    return SlowAppendFromSelf(offset, len);
+  }
+
+  // Called at the end of the decompress. We ask the allocator
+  // write all blocks to the sink.
+  inline void Flush() { allocator_.Flush(Produced()); }
+};
+
+template<typename Allocator>
+bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
+  size_t avail = op_limit_ - op_ptr_;
+  while (len > avail) {
+    // Completely fill this block
+    memcpy(op_ptr_, ip, avail);
+    op_ptr_ += avail;
+    assert(op_limit_ - op_ptr_ == 0);
+    full_size_ += (op_ptr_ - op_base_);
+    len -= avail;
+    ip += avail;
+
+    // Bounds check
+    if (full_size_ + len > expected_) {
+      return false;
+    }
+
+    // Make new block
+    size_t bsize = min<size_t>(kBlockSize, expected_ - full_size_);
+    op_base_ = allocator_.Allocate(bsize);
+    op_ptr_ = op_base_;
+    op_limit_ = op_base_ + bsize;
+    blocks_.push_back(op_base_);
+    avail = bsize;
+  }
+
+  memcpy(op_ptr_, ip, len);
+  op_ptr_ += len;
+  return true;
+}
+
+template<typename Allocator>
+bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
+                                                         size_t len) {
+  // Overflow check
+  // See SnappyArrayWriter::AppendFromSelf for an explanation of
+  // the "offset - 1u" trick.
+  const size_t cur = Size();
+  if (offset - 1u >= cur) return false;
+  if (expected_ - cur < len) return false;
+
+  // Currently we shouldn't ever hit this path because Compress() chops the
+  // input into blocks and does not create cross-block copies. However, it is
+  // nice if we do not rely on that, since we can get better compression if we
+  // allow cross-block copies and thus might want to change the compressor in
+  // the future.
+  size_t src = cur - offset;
+  while (len-- > 0) {
+    char c = blocks_[src >> kBlockLog][src & (kBlockSize-1)];
+    Append(&c, 1);
+    src++;
+  }
+  return true;
+}
+
+class SnappySinkAllocator {
+ public:
+  explicit SnappySinkAllocator(Sink* dest): dest_(dest) {}
+  ~SnappySinkAllocator() {}
+
+  char* Allocate(int size) {
+    Datablock block(new char[size], size);
+    blocks_.push_back(block);
+    return block.data;
+  }
+
+  // We flush only at the end, because the writer wants
+  // random access to the blocks and once we hand the
+  // block over to the sink, we can't access it anymore.
+  // Also we don't write more than has been actually written
+  // to the blocks.
+  void Flush(size_t size) {
+    size_t size_written = 0;
+    size_t block_size;
+    for (int i = 0; i < blocks_.size(); ++i) {
+      block_size = min<size_t>(blocks_[i].size, size - size_written);
+      dest_->AppendAndTakeOwnership(blocks_[i].data, block_size,
+                                    &SnappySinkAllocator::Deleter, NULL);
+      size_written += block_size;
+    }
+    blocks_.clear();
+  }
+
+ private:
+  struct Datablock {
+    char* data;
+    size_t size;
+    Datablock(char* p, size_t s) : data(p), size(s) {}
+  };
+
+  static void Deleter(void* arg, const char* bytes, size_t size) {
+    delete[] bytes;
+  }
+
+  Sink* dest_;
+  vector<Datablock> blocks_;
+
+  // Note: copying this object is allowed
+};
+
+size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) {
+  SnappySinkAllocator allocator(uncompressed);
+  SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
+  InternalUncompress(compressed, &writer);
+  return writer.Produced();
+}
+
+bool Uncompress(Source* compressed, Sink* uncompressed) {
+  // Read the uncompressed length from the front of the compressed input
+  SnappyDecompressor decompressor(compressed);
+  uint32 uncompressed_len = 0;
+  if (!decompressor.ReadUncompressedLength(&uncompressed_len)) {
+    return false;
+  }
+
+  char c;
+  size_t allocated_size;
+  char* buf = uncompressed->GetAppendBufferVariable(
+      1, uncompressed_len, &c, 1, &allocated_size);
+
+  // If we can get a flat buffer, then use it, otherwise do block by block
+  // uncompression
+  if (allocated_size >= uncompressed_len) {
+    SnappyArrayWriter writer(buf);
+    bool result = InternalUncompressAllTags(
+        &decompressor, &writer, uncompressed_len);
+    uncompressed->Append(buf, writer.Produced());
+    return result;
+  } else {
+    SnappySinkAllocator allocator(uncompressed);
+    SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
+    return InternalUncompressAllTags(&decompressor, &writer, uncompressed_len);
+  }
+}
 
 } // end namespace snappy
-
--- a/other-licenses/snappy/src/snappy.h
+++ b/other-licenses/snappy/src/snappy.h
@@ -31,18 +31,18 @@
 // savings.
 //
 // For getting better compression ratios when you are compressing data
 // with long repeated sequences or compressing data that is similar to
 // other data, while still compressing fast, you might look at first
 // using BMDiff and then compressing the output of BMDiff with
 // Snappy.
 
-#ifndef UTIL_SNAPPY_SNAPPY_H__
-#define UTIL_SNAPPY_SNAPPY_H__
+#ifndef THIRD_PARTY_SNAPPY_SNAPPY_H__
+#define THIRD_PARTY_SNAPPY_SNAPPY_H__
 
 #include <stddef.h>
 #include <string>
 
 #include "snappy-stubs-public.h"
 
 namespace snappy {
   class Source;
@@ -51,16 +51,23 @@ namespace snappy {
   // ------------------------------------------------------------------------
   // Generic compression/decompression routines.
   // ------------------------------------------------------------------------
 
   // Compress the bytes read from "*source" and append to "*sink". Return the
   // number of bytes written.
   size_t Compress(Source* source, Sink* sink);
 
+  // Find the uncompressed length of the given stream, as given by the header.
+  // Note that the true length could deviate from this; the stream could e.g.
+  // be truncated.
+  //
+  // Also note that this leaves "*source" in a state that is unsuitable for
+  // further operations, such as RawUncompress(). You will need to rewind
+  // or recreate the source yourself before attempting any further calls.
   bool GetUncompressedLength(Source* source, uint32* result);
 
   // ------------------------------------------------------------------------
   // Higher-level string based routines (should be sufficient for most users)
   // ------------------------------------------------------------------------
 
   // Sets "*output" to the compressed version of "input[0,input_length-1]".
   // Original contents of *output are lost.
@@ -72,16 +79,28 @@ namespace snappy {
   // Original contents of "*uncompressed" are lost.
   //
   // REQUIRES: "compressed[]" is not an alias of "*uncompressed".
   //
   // returns false if the message is corrupted and could not be decompressed
   bool Uncompress(const char* compressed, size_t compressed_length,
                   string* uncompressed);
 
+  // Decompresses "compressed" to "*uncompressed".
+  //
+  // returns false if the message is corrupted and could not be decompressed
+  bool Uncompress(Source* compressed, Sink* uncompressed);
+
+  // This routine uncompresses as much of the "compressed" as possible
+  // into sink.  It returns the number of valid bytes added to sink
+  // (extra invalid bytes may have been added due to errors; the caller
+  // should ignore those). The emitted data typically has length
+  // GetUncompressedLength(), but may be shorter if an error is
+  // encountered.
+  size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed);
 
   // ------------------------------------------------------------------------
   // Lower-level character array based routines.  May be useful for
   // efficiency reasons in certain circumstances.
   // ------------------------------------------------------------------------
 
   // REQUIRES: "compressed" must point to an area of memory that is at
   // least "MaxCompressedLength(input_length)" bytes in length.
@@ -112,16 +131,38 @@ namespace snappy {
 
   // Given data from the byte source 'compressed' generated by calling
   // the Snappy::Compress routine, this routine stores the uncompressed
   // data to
   //    uncompressed[0..GetUncompressedLength(compressed,compressed_length)-1]
   // returns false if the message is corrupted and could not be decrypted
   bool RawUncompress(Source* compressed, char* uncompressed);
 
+  // Given data in "compressed[0..compressed_length-1]" generated by
+  // calling the Snappy::Compress routine, this routine
+  // stores the uncompressed data to the iovec "iov". The number of physical
+  // buffers in "iov" is given by iov_cnt and their cumulative size
+  // must be at least GetUncompressedLength(compressed). The individual buffers
+  // in "iov" must not overlap with each other.
+  //
+  // returns false if the message is corrupted and could not be decrypted
+  bool RawUncompressToIOVec(const char* compressed, size_t compressed_length,
+                            const struct iovec* iov, size_t iov_cnt);
+
+  // Given data from the byte source 'compressed' generated by calling
+  // the Snappy::Compress routine, this routine stores the uncompressed
+  // data to the iovec "iov". The number of physical
+  // buffers in "iov" is given by iov_cnt and their cumulative size
+  // must be at least GetUncompressedLength(compressed). The individual buffers
+  // in "iov" must not overlap with each other.
+  //
+  // returns false if the message is corrupted and could not be decrypted
+  bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov,
+                            size_t iov_cnt);
+
   // Returns the maximal size of the compressed representation of
   // input data that is "source_bytes" bytes in length;
   size_t MaxCompressedLength(size_t source_bytes);
 
   // REQUIRES: "compressed[]" was produced by RawCompress() or Compress()
   // Returns true and stores the length of the uncompressed data in
   // *result normally.  Returns false on parsing error.
   // This operation takes O(1) time.
@@ -130,26 +171,33 @@ namespace snappy {
 
   // Returns true iff the contents of "compressed[]" can be uncompressed
   // successfully.  Does not return the uncompressed data.  Takes
   // time proportional to compressed_length, but is usually at least
   // a factor of four faster than actual decompression.
   bool IsValidCompressedBuffer(const char* compressed,
                                size_t compressed_length);
 
-  // *** DO NOT CHANGE THE VALUE OF kBlockSize ***
+  // Returns true iff the contents of "compressed" can be uncompressed
+  // successfully.  Does not return the uncompressed data.  Takes
+  // time proportional to *compressed length, but is usually at least
+  // a factor of four faster than actual decompression.
+  // On success, consumes all of *compressed.  On failure, consumes an
+  // unspecified prefix of *compressed.
+  bool IsValidCompressed(Source* compressed);
+
+  // The size of a compression block. Note that many parts of the compression
+  // code assumes that kBlockSize <= 65536; in particular, the hash table
+  // can only store 16-bit offsets, and EmitCopy() also assumes the offset
+  // is 65535 bytes or less. Note also that if you change this, it will
+  // affect the framing format (see framing_format.txt).
   //
-  // New Compression code chops up the input into blocks of at most
-  // the following size.  This ensures that back-references in the
-  // output never cross kBlockSize block boundaries.  This can be
-  // helpful in implementing blocked decompression.  However the
-  // decompression code should not rely on this guarantee since older
-  // compression code may not obey it.
-  static const int kBlockLog = 15;
+  // Note that there might be older data around that is compressed with larger
+  // block sizes, so the decompression code should not rely on the
+  // non-existence of long backreferences.
+  static const int kBlockLog = 16;
   static const size_t kBlockSize = 1 << kBlockLog;
 
   static const int kMaxHashTableBits = 14;
   static const size_t kMaxHashTableSize = 1 << kMaxHashTableBits;
-
 }  // end namespace snappy
 
-
-#endif  // UTIL_SNAPPY_SNAPPY_H__
+#endif  // THIRD_PARTY_SNAPPY_SNAPPY_H__
--- a/other-licenses/snappy/src/snappy_unittest.cc
+++ b/other-licenses/snappy/src/snappy_unittest.cc
@@ -54,22 +54,24 @@ DEFINE_bool(quicklz, false,
             "Run quickLZ compression (http://www.quicklz.com/)");
 DEFINE_bool(liblzf, false,
             "Run libLZF compression "
             "(http://www.goof.com/pcg/marc/liblzf.html)");
 DEFINE_bool(fastlz, false,
             "Run FastLZ compression (http://www.fastlz.org/");
 DEFINE_bool(snappy, true, "Run snappy compression");
 
-
 DEFINE_bool(write_compressed, false,
             "Write compressed versions of each file to <file>.comp");
 DEFINE_bool(write_uncompressed, false,
             "Write uncompressed versions of each file to <file>.uncomp");
 
+DEFINE_bool(snappy_dump_decompression_table, false,
+            "If true, we print the decompression table during tests.");
+
 namespace snappy {
 
 
 #ifdef HAVE_FUNC_MMAP
 
 // To test against code that reads beyond its input, this class copies a
 // string to a newly allocated group of pages, the last of which
 // is made unreadable via mprotect. Note that we need to allocate the
@@ -156,16 +158,17 @@ static size_t MinimumRequiredOutputSpace
       return max(static_cast<int>(ceil(input_size * 1.05)), 66);
 #endif  // FASTLZ_VERSION
 
     case SNAPPY:
       return snappy::MaxCompressedLength(input_size);
 
     default:
       LOG(FATAL) << "Unknown compression type number " << comp;
+      return 0;
   }
 }
 
 // Returns true if we successfully compressed, false otherwise.
 //
 // If compressed_is_preallocated is set, do not resize the compressed buffer.
 // This is typically what you want for a benchmark, in order to not spend
 // time in the memory allocator. If you do set this flag, however,
@@ -273,17 +276,16 @@ static bool Compress(const char* input, 
                           &destlen);
       CHECK_LE(destlen, snappy::MaxCompressedLength(input_size));
       if (!compressed_is_preallocated) {
         compressed->resize(destlen);
       }
       break;
     }
 
-
     default: {
       return false;     // the asked-for library wasn't compiled in
     }
   }
   return true;
 }
 
 static bool Uncompress(const string& compressed, CompressorType comp,
@@ -365,17 +367,16 @@ static bool Uncompress(const string& com
 #endif  // FASTLZ_VERSION
 
     case SNAPPY: {
       snappy::RawUncompress(compressed.data(), compressed.size(),
                             string_as_array(output));
       break;
     }
 
-
     default: {
       return false;     // the asked-for library wasn't compiled in
     }
   }
   return true;
 }
 
 static void Measure(const char* data,
@@ -443,17 +444,17 @@ static void Measure(const char* data,
           Uncompress(compressed[b], comp, input_length[b], &output[b]);
       utimer.Stop();
 
       ctime[run] = ctimer.Get();
       utime[run] = utimer.Get();
     }
 
     compressed_size = 0;
-    for (int i = 0; i < compressed.size(); i++) {
+    for (size_t i = 0; i < compressed.size(); i++) {
       compressed_size += compressed[i].size();
     }
   }
 
   sort(ctime, ctime + kRuns);
   sort(utime, utime + kRuns);
   const int med = kRuns/2;
 
@@ -469,33 +470,89 @@ static void Measure(const char* data,
          x.c_str(),
          block_size/(1<<20),
          static_cast<int>(length), static_cast<uint32>(compressed_size),
          (compressed_size * 100.0) / max<int>(1, length),
          comp_rate,
          urate.c_str());
 }
 
-
 static int VerifyString(const string& input) {
   string compressed;
   DataEndingAtUnreadablePage i(input);
   const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
   CHECK_EQ(written, compressed.size());
   CHECK_LE(compressed.size(),
            snappy::MaxCompressedLength(input.size()));
   CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
 
   string uncompressed;
   DataEndingAtUnreadablePage c(compressed);
   CHECK(snappy::Uncompress(c.data(), c.size(), &uncompressed));
   CHECK_EQ(uncompressed, input);
   return uncompressed.size();
 }
 
+static void VerifyStringSink(const string& input) {
+  string compressed;
+  DataEndingAtUnreadablePage i(input);
+  const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
+  CHECK_EQ(written, compressed.size());
+  CHECK_LE(compressed.size(),
+           snappy::MaxCompressedLength(input.size()));
+  CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+
+  string uncompressed;
+  uncompressed.resize(input.size());
+  snappy::UncheckedByteArraySink sink(string_as_array(&uncompressed));
+  DataEndingAtUnreadablePage c(compressed);
+  snappy::ByteArraySource source(c.data(), c.size());
+  CHECK(snappy::Uncompress(&source, &sink));
+  CHECK_EQ(uncompressed, input);
+}
+
+static void VerifyIOVec(const string& input) {
+  string compressed;
+  DataEndingAtUnreadablePage i(input);
+  const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
+  CHECK_EQ(written, compressed.size());
+  CHECK_LE(compressed.size(),
+           snappy::MaxCompressedLength(input.size()));
+  CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+
+  // Try uncompressing into an iovec containing a random number of entries
+  // ranging from 1 to 10.
+  char* buf = new char[input.size()];
+  ACMRandom rnd(input.size());
+  size_t num = rnd.Next() % 10 + 1;
+  if (input.size() < num) {
+    num = input.size();
+  }
+  struct iovec* iov = new iovec[num];
+  int used_so_far = 0;
+  for (size_t i = 0; i < num; ++i) {
+    iov[i].iov_base = buf + used_so_far;
+    if (i == num - 1) {
+      iov[i].iov_len = input.size() - used_so_far;
+    } else {
+      // Randomly choose to insert a 0 byte entry.
+      if (rnd.OneIn(5)) {
+        iov[i].iov_len = 0;
+      } else {
+        iov[i].iov_len = rnd.Uniform(input.size());
+      }
+    }
+    used_so_far += iov[i].iov_len;
+  }
+  CHECK(snappy::RawUncompressToIOVec(
+      compressed.data(), compressed.size(), iov, num));
+  CHECK(!memcmp(buf, input.data(), input.size()));
+  delete[] iov;
+  delete[] buf;
+}
 
 // Test that data compressed by a compressor that does not
 // obey block sizes is uncompressed properly.
 static void VerifyNonBlockedCompression(const string& input) {
   if (input.length() > snappy::kBlockSize) {
     // We cannot test larger blocks than the maximum block size, obviously.
     return;
   }
@@ -517,16 +574,38 @@ static void VerifyNonBlockedCompression(
                                                 dest, table, table_size);
   compressed.resize(end - compressed.data());
 
   // Uncompress into string
   string uncomp_str;
   CHECK(snappy::Uncompress(compressed.data(), compressed.size(), &uncomp_str));
   CHECK_EQ(uncomp_str, input);
 
+  // Uncompress using source/sink
+  string uncomp_str2;
+  uncomp_str2.resize(input.size());
+  snappy::UncheckedByteArraySink sink(string_as_array(&uncomp_str2));
+  snappy::ByteArraySource source(compressed.data(), compressed.size());
+  CHECK(snappy::Uncompress(&source, &sink));
+  CHECK_EQ(uncomp_str2, input);
+
+  // Uncompress into iovec
+  {
+    static const int kNumBlocks = 10;
+    struct iovec vec[kNumBlocks];
+    const int block_size = 1 + input.size() / kNumBlocks;
+    string iovec_data(block_size * kNumBlocks, 'x');
+    for (int i = 0; i < kNumBlocks; i++) {
+      vec[i].iov_base = string_as_array(&iovec_data) + i * block_size;
+      vec[i].iov_len = block_size;
+    }
+    CHECK(snappy::RawUncompressToIOVec(compressed.data(), compressed.size(),
+                                       vec, kNumBlocks));
+    CHECK_EQ(string(iovec_data.data(), input.size()), input);
+  }
 }
 
 // Expand the input so that it is at least K times as big as block size
 static string Expand(const string& input) {
   static const int K = 3;
   string data = input;
   while (data.size() < K * snappy::kBlockSize) {
     data += input;
@@ -535,97 +614,104 @@ static string Expand(const string& input
 }
 
 static int Verify(const string& input) {
   VLOG(1) << "Verifying input of size " << input.size();
 
   // Compress using string based routines
   const int result = VerifyString(input);
 
+  // Verify using sink based routines
+  VerifyStringSink(input);
 
   VerifyNonBlockedCompression(input);
+  VerifyIOVec(input);
   if (!input.empty()) {
-    VerifyNonBlockedCompression(Expand(input));
+    const string expanded = Expand(input);
+    VerifyNonBlockedCompression(expanded);
+    VerifyIOVec(input);
   }
 
-
   return result;
 }
 
-// This test checks to ensure that snappy doesn't coredump if it gets
-// corrupted data.
 
 static bool IsValidCompressedBuffer(const string& c) {
   return snappy::IsValidCompressedBuffer(c.data(), c.size());
 }
 static bool Uncompress(const string& c, string* u) {
   return snappy::Uncompress(c.data(), c.size(), u);
 }
 
-TYPED_TEST(CorruptedTest, VerifyCorrupted) {
+// This test checks to ensure that snappy doesn't coredump if it gets
+// corrupted data.
+TEST(CorruptedTest, VerifyCorrupted) {
   string source = "making sure we don't crash with corrupted input";
   VLOG(1) << source;
   string dest;
-  TypeParam uncmp;
+  string uncmp;
   snappy::Compress(source.data(), source.size(), &dest);
 
   // Mess around with the data. It's hard to simulate all possible
   // corruptions; this is just one example ...
   CHECK_GT(dest.size(), 3);
   dest[1]--;
   dest[3]++;
   // this really ought to fail.
-  CHECK(!IsValidCompressedBuffer(TypeParam(dest)));
-  CHECK(!Uncompress(TypeParam(dest), &uncmp));
+  CHECK(!IsValidCompressedBuffer(dest));
+  CHECK(!Uncompress(dest, &uncmp));
 
   // This is testing for a security bug - a buffer that decompresses to 100k
   // but we lie in the snappy header and only reserve 0 bytes of memory :)
   source.resize(100000);
-  for (int i = 0; i < source.length(); ++i) {
+  for (size_t i = 0; i < source.length(); ++i) {
     source[i] = 'A';
   }
   snappy::Compress(source.data(), source.size(), &dest);
   dest[0] = dest[1] = dest[2] = dest[3] = 0;
-  CHECK(!IsValidCompressedBuffer(TypeParam(dest)));
-  CHECK(!Uncompress(TypeParam(dest), &uncmp));
+  CHECK(!IsValidCompressedBuffer(dest));
+  CHECK(!Uncompress(dest, &uncmp));
 
   if (sizeof(void *) == 4) {
     // Another security check; check a crazy big length can't DoS us with an
     // over-allocation.
     // Currently this is done only for 32-bit builds.  On 64-bit builds,
-    // where 3GBytes might be an acceptable allocation size, Uncompress()
+    // where 3 GB might be an acceptable allocation size, Uncompress()
     // attempts to decompress, and sometimes causes the test to run out of
     // memory.
-    dest[0] = dest[1] = dest[2] = dest[3] = 0xff;
-    // This decodes to a really large size, i.e., 3221225471 bytes
+    dest[0] = dest[1] = dest[2] = dest[3] = '\xff';
+    // This decodes to a really large size, i.e., about 3 GB.
     dest[4] = 'k';
-    CHECK(!IsValidCompressedBuffer(TypeParam(dest)));
-    CHECK(!Uncompress(TypeParam(dest), &uncmp));
-    dest[0] = dest[1] = dest[2] = 0xff;
-    dest[3] = 0x7f;
-    CHECK(!IsValidCompressedBuffer(TypeParam(dest)));
-    CHECK(!Uncompress(TypeParam(dest), &uncmp));
+    CHECK(!IsValidCompressedBuffer(dest));
+    CHECK(!Uncompress(dest, &uncmp));
   } else {
     LOG(WARNING) << "Crazy decompression lengths not checked on 64-bit build";
   }
 
+  // This decodes to about 2 MB; much smaller, but should still fail.
+  dest[0] = dest[1] = dest[2] = '\xff';
+  dest[3] = 0x00;
+  CHECK(!IsValidCompressedBuffer(dest));
+  CHECK(!Uncompress(dest, &uncmp));
+
   // try reading stuff in from a bad file.
   for (int i = 1; i <= 3; ++i) {
-    string data = ReadTestDataFile(StringPrintf("baddata%d.snappy", i).c_str());
+    string data = ReadTestDataFile(StringPrintf("baddata%d.snappy", i).c_str(),
+                                   0);
     string uncmp;
     // check that we don't return a crazy length
     size_t ulen;
     CHECK(!snappy::GetUncompressedLength(data.data(), data.size(), &ulen)
           || (ulen < (1<<20)));
     uint32 ulen2;
     snappy::ByteArraySource source(data.data(), data.size());
     CHECK(!snappy::GetUncompressedLength(&source, &ulen2) ||
           (ulen2 < (1<<20)));
-    CHECK(!IsValidCompressedBuffer(TypeParam(data)));
-    CHECK(!Uncompress(TypeParam(data), &uncmp));
+    CHECK(!IsValidCompressedBuffer(data));
+    CHECK(!Uncompress(data, &uncmp));
   }
 }
 
 // Helper routines to construct arbitrary compressed strings.
 // These mirror the compression code in snappy.cc, but are copied
 // here so that we can bypass some limitations in the how snappy.cc
 // invokes these routines.
 static void AppendLiteral(string* dst, const string& literal) {
@@ -656,17 +742,17 @@ static void AppendCopy(string* dst, int 
       to_copy = 64;
     } else if (length > 64) {
       to_copy = 60;
     } else {
       to_copy = length;
     }
     length -= to_copy;
 
-    if ((to_copy < 12) && (offset < 2048)) {
+    if ((to_copy >= 4) && (to_copy < 12) && (offset < 2048)) {
       assert(to_copy-4 < 8);            // Must fit in 3 bits
       dst->push_back(1 | ((to_copy-4) << 2) | ((offset >> 8) << 5));
       dst->push_back(offset & 0xff);
     } else if (offset < 65536) {
       dst->push_back(2 | ((to_copy-1) << 2));
       dst->push_back(offset & 0xff);
       dst->push_back(offset >> 8);
     } else {
@@ -713,17 +799,17 @@ TEST(Snappy, RandomData) {
 
   const int num_ops = 20000;
   for (int i = 0; i < num_ops; i++) {
     if ((i % 1000) == 0) {
       VLOG(0) << "Random op " << i << " of " << num_ops;
     }
 
     string x;
-    int len = rnd.Uniform(4096);
+    size_t len = rnd.Uniform(4096);
     if (i < 100) {
       len = 65536 + rnd.Uniform(65536);
     }
     while (x.size() < len) {
       int run_len = 1;
       if (rnd.OneIn(10)) {
         run_len = rnd.Skewed(8);
       }
@@ -761,20 +847,132 @@ TEST(Snappy, FourByteOffset) {
     src += fragment2;
   }
   AppendCopy(&compressed, src.size(), fragment1.size());
   src += fragment1;
   CHECK_EQ(length, src.size());
 
   string uncompressed;
   CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
-  CHECK(snappy::Uncompress(compressed.data(), compressed.size(), &uncompressed));
+  CHECK(snappy::Uncompress(compressed.data(), compressed.size(),
+                           &uncompressed));
   CHECK_EQ(uncompressed, src);
 }
 
+TEST(Snappy, IOVecEdgeCases) {
+  // Test some tricky edge cases in the iovec output that are not necessarily
+  // exercised by random tests.
+
+  // Our output blocks look like this initially (the last iovec is bigger
+  // than depicted):
+  // [  ] [ ] [    ] [        ] [        ]
+  static const int kLengths[] = { 2, 1, 4, 8, 128 };
+
+  struct iovec iov[ARRAYSIZE(kLengths)];
+  for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+    iov[i].iov_base = new char[kLengths[i]];
+    iov[i].iov_len = kLengths[i];
+  }
+
+  string compressed;
+  Varint::Append32(&compressed, 22);
+
+  // A literal whose output crosses three blocks.
+  // [ab] [c] [123 ] [        ] [        ]
+  AppendLiteral(&compressed, "abc123");
+
+  // A copy whose output crosses two blocks (source and destination
+  // segments marked).
+  // [ab] [c] [1231] [23      ] [        ]
+  //           ^--^   --
+  AppendCopy(&compressed, 3, 3);
+
+  // A copy where the input is, at first, in the block before the output:
+  //
+  // [ab] [c] [1231] [231231  ] [        ]
+  //           ^---     ^---
+  // Then during the copy, the pointers move such that the input and
+  // output pointers are in the same block:
+  //
+  // [ab] [c] [1231] [23123123] [        ]
+  //                  ^-    ^-
+  // And then they move again, so that the output pointer is no longer
+  // in the same block as the input pointer:
+  // [ab] [c] [1231] [23123123] [123     ]
+  //                    ^--      ^--
+  AppendCopy(&compressed, 6, 9);
+
+  // Finally, a copy where the input is from several blocks back,
+  // and it also crosses three blocks:
+  //
+  // [ab] [c] [1231] [23123123] [123b    ]
+  //   ^                            ^
+  // [ab] [c] [1231] [23123123] [123bc   ]
+  //       ^                         ^
+  // [ab] [c] [1231] [23123123] [123bc12 ]
+  //           ^-                     ^-
+  AppendCopy(&compressed, 17, 4);
+
+  CHECK(snappy::RawUncompressToIOVec(
+      compressed.data(), compressed.size(), iov, ARRAYSIZE(iov)));
+  CHECK_EQ(0, memcmp(iov[0].iov_base, "ab", 2));
+  CHECK_EQ(0, memcmp(iov[1].iov_base, "c", 1));
+  CHECK_EQ(0, memcmp(iov[2].iov_base, "1231", 4));
+  CHECK_EQ(0, memcmp(iov[3].iov_base, "23123123", 8));
+  CHECK_EQ(0, memcmp(iov[4].iov_base, "123bc12", 7));
+
+  for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+    delete[] reinterpret_cast<char *>(iov[i].iov_base);
+  }
+}
+
+TEST(Snappy, IOVecLiteralOverflow) {
+  static const int kLengths[] = { 3, 4 };
+
+  struct iovec iov[ARRAYSIZE(kLengths)];
+  for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+    iov[i].iov_base = new char[kLengths[i]];
+    iov[i].iov_len = kLengths[i];
+  }
+
+  string compressed;
+  Varint::Append32(&compressed, 8);
+
+  AppendLiteral(&compressed, "12345678");
+
+  CHECK(!snappy::RawUncompressToIOVec(
+      compressed.data(), compressed.size(), iov, ARRAYSIZE(iov)));
+
+  for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+    delete[] reinterpret_cast<char *>(iov[i].iov_base);
+  }
+}
+
+TEST(Snappy, IOVecCopyOverflow) {
+  static const int kLengths[] = { 3, 4 };
+
+  struct iovec iov[ARRAYSIZE(kLengths)];
+  for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+    iov[i].iov_base = new char[kLengths[i]];
+    iov[i].iov_len = kLengths[i];
+  }
+
+  string compressed;
+  Varint::Append32(&compressed, 8);
+
+  AppendLiteral(&compressed, "123");
+  AppendCopy(&compressed, 3, 5);
+
+  CHECK(!snappy::RawUncompressToIOVec(
+      compressed.data(), compressed.size(), iov, ARRAYSIZE(iov)));
+
+  for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+    delete[] reinterpret_cast<char *>(iov[i].iov_base);
+  }
+}
 
 static bool CheckUncompressedLength(const string& compressed,
                                     size_t* ulength) {
   const bool result1 = snappy::GetUncompressedLength(compressed.data(),
                                                      compressed.size(),
                                                      ulength);
 
   snappy::ByteArraySource source(compressed.data(), compressed.size());
@@ -792,28 +990,42 @@ TEST(SnappyCorruption, TruncatedVarint) 
   CHECK(!snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
   CHECK(!snappy::Uncompress(compressed.data(), compressed.size(),
                             &uncompressed));
 }
 
 TEST(SnappyCorruption, UnterminatedVarint) {
   string compressed, uncompressed;
   size_t ulength;
-  compressed.push_back(128);
-  compressed.push_back(128);
-  compressed.push_back(128);
-  compressed.push_back(128);
-  compressed.push_back(128);
+  compressed.push_back('\x80');
+  compressed.push_back('\x80');
+  compressed.push_back('\x80');
+  compressed.push_back('\x80');
+  compressed.push_back('\x80');
   compressed.push_back(10);
   CHECK(!CheckUncompressedLength(compressed, &ulength));
   CHECK(!snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
   CHECK(!snappy::Uncompress(compressed.data(), compressed.size(),
                             &uncompressed));
 }
 
+TEST(SnappyCorruption, OverflowingVarint) {
+  string compressed, uncompressed;
+  size_t ulength;
+  compressed.push_back('\xfb');
+  compressed.push_back('\xff');
+  compressed.push_back('\xff');
+  compressed.push_back('\xff');
+  compressed.push_back('\x7f');
+  CHECK(!CheckUncompressedLength(compressed, &ulength));
+  CHECK(!snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+  CHECK(!snappy::Uncompress(compressed.data(), compressed.size(),
+                            &uncompressed));
+}
+
 TEST(Snappy, ReadPastEndOfBuffer) {
   // Check that we do not read past end of input
 
   // Make a compressed string that ends with a single-byte literal
   string compressed;
   Varint::Append32(&compressed, 1);
   AppendLiteral(&compressed, "x");
 
@@ -834,17 +1046,16 @@ TEST(Snappy, ZeroOffsetCopy) {
 
 TEST(Snappy, ZeroOffsetCopyValidation) {
   const char* compressed = "\x05\x12\x00\x00";
   //  \x05              Length
   //  \x12\x00\x00      Copy with offset==0, length==5
   EXPECT_FALSE(snappy::IsValidCompressedBuffer(compressed, 4));
 }
 
-
 namespace {
 
 int TestFindMatchLength(const char* s1, const char *s2, unsigned length) {
   return snappy::internal::FindMatchLength(s1, s2, s2 + length);
 }
 
 }  // namespace
 
@@ -961,46 +1172,139 @@ TEST(Snappy, FindMatchLengthRandom) {
       EXPECT_NE(s[matched], t[matched]);
       for (int j = 0; j < matched; j++) {
         EXPECT_EQ(s[j], t[j]);
       }
     }
   }
 }
 
+static uint16 MakeEntry(unsigned int extra,
+                        unsigned int len,
+                        unsigned int copy_offset) {
+  // Check that all of the fields fit within the allocated space
+  assert(extra       == (extra & 0x7));          // At most 3 bits
+  assert(copy_offset == (copy_offset & 0x7));    // At most 3 bits
+  assert(len         == (len & 0x7f));           // At most 7 bits
+  return len | (copy_offset << 8) | (extra << 11);
+}
+
+// Check that the decompression table is correct, and optionally print out
+// the computed one.
+TEST(Snappy, VerifyCharTable) {
+  using snappy::internal::LITERAL;
+  using snappy::internal::COPY_1_BYTE_OFFSET;
+  using snappy::internal::COPY_2_BYTE_OFFSET;
+  using snappy::internal::COPY_4_BYTE_OFFSET;
+  using snappy::internal::char_table;
+  using snappy::internal::wordmask;
+
+  uint16 dst[256];
+
+  // Place invalid entries in all places to detect missing initialization
+  int assigned = 0;
+  for (int i = 0; i < 256; i++) {
+    dst[i] = 0xffff;
+  }
+
+  // Small LITERAL entries.  We store (len-1) in the top 6 bits.
+  for (unsigned int len = 1; len <= 60; len++) {
+    dst[LITERAL | ((len-1) << 2)] = MakeEntry(0, len, 0);
+    assigned++;
+  }
+
+  // Large LITERAL entries.  We use 60..63 in the high 6 bits to
+  // encode the number of bytes of length info that follow the opcode.
+  for (unsigned int extra_bytes = 1; extra_bytes <= 4; extra_bytes++) {
+    // We set the length field in the lookup table to 1 because extra
+    // bytes encode len-1.
+    dst[LITERAL | ((extra_bytes+59) << 2)] = MakeEntry(extra_bytes, 1, 0);
+    assigned++;
+  }
+
+  // COPY_1_BYTE_OFFSET.
+  //
+  // The tag byte in the compressed data stores len-4 in 3 bits, and
+  // offset/256 in 5 bits.  offset%256 is stored in the next byte.
+  //
+  // This format is used for length in range [4..11] and offset in
+  // range [0..2047]
+  for (unsigned int len = 4; len < 12; len++) {
+    for (unsigned int offset = 0; offset < 2048; offset += 256) {
+      dst[COPY_1_BYTE_OFFSET | ((len-4)<<2) | ((offset>>8)<<5)] =
+        MakeEntry(1, len, offset>>8);
+      assigned++;
+    }
+  }
+
+  // COPY_2_BYTE_OFFSET.
+  // Tag contains len-1 in top 6 bits, and offset in next two bytes.
+  for (unsigned int len = 1; len <= 64; len++) {
+    dst[COPY_2_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(2, len, 0);
+    assigned++;
+  }
+
+  // COPY_4_BYTE_OFFSET.
+  // Tag contents len-1 in top 6 bits, and offset in next four bytes.
+  for (unsigned int len = 1; len <= 64; len++) {
+    dst[COPY_4_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(4, len, 0);
+    assigned++;
+  }
+
+  // Check that each entry was initialized exactly once.
+  EXPECT_EQ(256, assigned) << "Assigned only " << assigned << " of 256";
+  for (int i = 0; i < 256; i++) {
+    EXPECT_NE(0xffff, dst[i]) << "Did not assign byte " << i;
+  }
+
+  if (FLAGS_snappy_dump_decompression_table) {
+    printf("static const uint16 char_table[256] = {\n  ");
+    for (int i = 0; i < 256; i++) {
+      printf("0x%04x%s",
+             dst[i],
+             ((i == 255) ? "\n" : (((i%8) == 7) ? ",\n  " : ", ")));
+    }
+    printf("};\n");
+  }
+
+  // Check that computed table matched recorded table.
+  for (int i = 0; i < 256; i++) {
+    EXPECT_EQ(dst[i], char_table[i]) << "Mismatch in byte " << i;
+  }
+}
 
 static void CompressFile(const char* fname) {
   string fullinput;
-  File::ReadFileToStringOrDie(fname, &fullinput);
+  CHECK_OK(file::GetContents(fname, &fullinput, file::Defaults()));
 
   string compressed;
   Compress(fullinput.data(), fullinput.size(), SNAPPY, &compressed, false);
 
-  File::WriteStringToFileOrDie(compressed,
-                               string(fname).append(".comp").c_str());
+  CHECK_OK(file::SetContents(string(fname).append(".comp"), compressed,
+                             file::Defaults()));
 }
 
 static void UncompressFile(const char* fname) {
   string fullinput;
-  File::ReadFileToStringOrDie(fname, &fullinput);
+  CHECK_OK(file::GetContents(fname, &fullinput, file::Defaults()));
 
   size_t uncompLength;
   CHECK(CheckUncompressedLength(fullinput, &uncompLength));
 
   string uncompressed;
   uncompressed.resize(uncompLength);
   CHECK(snappy::Uncompress(fullinput.data(), fullinput.size(), &uncompressed));
 
-  File::WriteStringToFileOrDie(uncompressed,
-                               string(fname).append(".uncomp").c_str());
+  CHECK_OK(file::SetContents(string(fname).append(".uncomp"), uncompressed,
+                             file::Defaults()));
 }
 
 static void MeasureFile(const char* fname) {
   string fullinput;
-  File::ReadFileToStringOrDie(fname, &fullinput);
+  CHECK_OK(file::GetContents(fname, &fullinput, file::Defaults()));
   printf("%-40s :\n", fname);
 
   int start_len = (FLAGS_start_len < 0) ? fullinput.size() : FLAGS_start_len;
   int end_len = fullinput.size();
   if (FLAGS_end_len >= 0) {
     end_len = min<int>(fullinput.size(), FLAGS_end_len);
   }
   for (int len = start_len; len <= end_len; len++) {
@@ -1023,92 +1327,167 @@ static void MeasureFile(const char* fnam
       Measure(input, len, SNAPPY, repeats, 1024<<10);
     }
   }
 }
 
 static struct {
   const char* label;
   const char* filename;
+  size_t size_limit;
 } files[] = {
-  { "html", "html" },
-  { "urls", "urls.10K" },
-  { "jpg", "house.jpg" },
-  { "pdf", "mapreduce-osdi-1.pdf" },
-  { "html4", "html_x_4" },
-  { "cp", "cp.html" },
-  { "c", "fields.c" },
-  { "lsp", "grammar.lsp" },
-  { "xls", "kennedy.xls" },
-  { "txt1", "alice29.txt" },
-  { "txt2", "asyoulik.txt" },
-  { "txt3", "lcet10.txt" },
-  { "txt4", "plrabn12.txt" },
-  { "bin", "ptt5" },
-  { "sum", "sum" },
-  { "man", "xargs.1" },
-  { "pb", "geo.protodata" },
-  { "gaviota", "kppkn.gtb" },
+  { "html", "html", 0 },
+  { "urls", "urls.10K", 0 },
+  { "jpg", "fireworks.jpeg", 0 },
+  { "jpg_200", "fireworks.jpeg", 200 },
+  { "pdf", "paper-100k.pdf", 0 },
+  { "html4", "html_x_4", 0 },
+  { "txt1", "alice29.txt", 0 },
+  { "txt2", "asyoulik.txt", 0 },
+  { "txt3", "lcet10.txt", 0 },
+  { "txt4", "plrabn12.txt", 0 },
+  { "pb", "geo.protodata", 0 },
+  { "gaviota", "kppkn.gtb", 0 },
 };
 
 static void BM_UFlat(int iters, int arg) {
   StopBenchmarkTiming();
 
   // Pick file to process based on "arg"
   CHECK_GE(arg, 0);
   CHECK_LT(arg, ARRAYSIZE(files));
-  string contents = ReadTestDataFile(files[arg].filename);
+  string contents = ReadTestDataFile(files[arg].filename,
+                                     files[arg].size_limit);
 
   string zcontents;
   snappy::Compress(contents.data(), contents.size(), &zcontents);
   char* dst = new char[contents.size()];
 
   SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
                              static_cast<int64>(contents.size()));
   SetBenchmarkLabel(files[arg].label);
   StartBenchmarkTiming();
   while (iters-- > 0) {
     CHECK(snappy::RawUncompress(zcontents.data(), zcontents.size(), dst));
   }
   StopBenchmarkTiming();
 
   delete[] dst;
 }
-BENCHMARK(BM_UFlat)->DenseRange(0, 17);
+BENCHMARK(BM_UFlat)->DenseRange(0, ARRAYSIZE(files) - 1);
 
 static void BM_UValidate(int iters, int arg) {
   StopBenchmarkTiming();
 
   // Pick file to process based on "arg"
   CHECK_GE(arg, 0);
   CHECK_LT(arg, ARRAYSIZE(files));
-  string contents = ReadTestDataFile(files[arg].filename);
+  string contents = ReadTestDataFile(files[arg].filename,
+                                     files[arg].size_limit);
 
   string zcontents;
   snappy::Compress(contents.data(), contents.size(), &zcontents);
 
   SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
                              static_cast<int64>(contents.size()));
   SetBenchmarkLabel(files[arg].label);
   StartBenchmarkTiming();
   while (iters-- > 0) {
     CHECK(snappy::IsValidCompressedBuffer(zcontents.data(), zcontents.size()));
   }
   StopBenchmarkTiming();
 }
 BENCHMARK(BM_UValidate)->DenseRange(0, 4);
 
+static void BM_UIOVec(int iters, int arg) {
+  StopBenchmarkTiming();
+
+  // Pick file to process based on "arg"
+  CHECK_GE(arg, 0);
+  CHECK_LT(arg, ARRAYSIZE(files));
+  string contents = ReadTestDataFile(files[arg].filename,
+                                     files[arg].size_limit);
+
+  string zcontents;
+  snappy::Compress(contents.data(), contents.size(), &zcontents);
+
+  // Uncompress into an iovec containing ten entries.
+  const int kNumEntries = 10;
+  struct iovec iov[kNumEntries];
+  char *dst = new char[contents.size()];
+  int used_so_far = 0;
+  for (int i = 0; i < kNumEntries; ++i) {
+    iov[i].iov_base = dst + used_so_far;
+    if (used_so_far == contents.size()) {
+      iov[i].iov_len = 0;
+      continue;
+    }
+
+    if (i == kNumEntries - 1) {
+      iov[i].iov_len = contents.size() - used_so_far;
+    } else {
+      iov[i].iov_len = contents.size() / kNumEntries;
+    }
+    used_so_far += iov[i].iov_len;
+  }
+
+  SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+                             static_cast<int64>(contents.size()));
+  SetBenchmarkLabel(files[arg].label);
+  StartBenchmarkTiming();
+  while (iters-- > 0) {
+    CHECK(snappy::RawUncompressToIOVec(zcontents.data(), zcontents.size(), iov,
+                                       kNumEntries));
+  }
+  StopBenchmarkTiming();
+
+  delete[] dst;
+}
+BENCHMARK(BM_UIOVec)->DenseRange(0, 4);
+
+static void BM_UFlatSink(int iters, int arg) {
+  StopBenchmarkTiming();
+
+  // Pick file to process based on "arg"
+  CHECK_GE(arg, 0);
+  CHECK_LT(arg, ARRAYSIZE(files));
+  string contents = ReadTestDataFile(files[arg].filename,
+                                     files[arg].size_limit);
+
+  string zcontents;
+  snappy::Compress(contents.data(), contents.size(), &zcontents);
+  char* dst = new char[contents.size()];
+
+  SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+                             static_cast<int64>(contents.size()));
+  SetBenchmarkLabel(files[arg].label);
+  StartBenchmarkTiming();
+  while (iters-- > 0) {
+    snappy::ByteArraySource source(zcontents.data(), zcontents.size());
+    snappy::UncheckedByteArraySink sink(dst);
+    CHECK(snappy::Uncompress(&source, &sink));
+  }
+  StopBenchmarkTiming();
+
+  string s(dst, contents.size());
+  CHECK_EQ(contents, s);
+
+  delete[] dst;
+}
+
+BENCHMARK(BM_UFlatSink)->DenseRange(0, ARRAYSIZE(files) - 1);
 
 static void BM_ZFlat(int iters, int arg) {
   StopBenchmarkTiming();
 
   // Pick file to process based on "arg"
   CHECK_GE(arg, 0);
   CHECK_LT(arg, ARRAYSIZE(files));
-  string contents = ReadTestDataFile(files[arg].filename);
+  string contents = ReadTestDataFile(files[arg].filename,
+                                     files[arg].size_limit);
 
   char* dst = new char[snappy::MaxCompressedLength(contents.size())];
 
   SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
                              static_cast<int64>(contents.size()));
   StartBenchmarkTiming();
 
   size_t zsize = 0;
@@ -1119,28 +1498,25 @@ static void BM_ZFlat(int iters, int arg)
   const double compression_ratio =
       static_cast<double>(zsize) / std::max<size_t>(1, contents.size());
   SetBenchmarkLabel(StringPrintf("%s (%.2f %%)",
                                  files[arg].label, 100.0 * compression_ratio));
   VLOG(0) << StringPrintf("compression for %s: %zd -> %zd bytes",
                           files[arg].label, contents.size(), zsize);
   delete[] dst;
 }
-BENCHMARK(BM_ZFlat)->DenseRange(0, 17);
-
+BENCHMARK(BM_ZFlat)->DenseRange(0, ARRAYSIZE(files) - 1);
 
 }  // namespace snappy
 
 
 int main(int argc, char** argv) {
   InitGoogle(argv[0], &argc, &argv, true);
-  File::Init();
   RunSpecifiedBenchmarks();
 
-
   if (argc >= 2) {
     for (int arg = 1; arg < argc; arg++) {
       if (FLAGS_write_compressed) {
         CompressFile(argv[arg]);
       } else if (FLAGS_write_uncompressed) {
         UncompressFile(argv[arg]);
       } else {
         MeasureFile(argv[arg]);