Merge m-c to m-i
authorPhil Ringnalda <philringnalda@gmail.com>
Sat, 25 Feb 2017 13:35:20 -0800
changeset 373995 132f7517c1f6c81fc0408c0b24ed2990ecee29e5
parent 373994 7fb6bef8f7d62958b76ab61228ef59de9784fb35 (current diff)
parent 373963 a08ec245fa24d573fc99e81210ecc09de734cdd3 (diff)
child 373996 aa863fbdf5c21235a92d8e54e23a9ddabd6069f8
push id10863
push userjlorenzo@mozilla.com
push dateMon, 06 Mar 2017 23:02:23 +0000
treeherdermozilla-aurora@0931190cd725 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone54.0a1
Merge m-c to m-i
js/src/vm/EnvironmentObject.cpp
layout/reftests/css-grid/reftest-stylo.list
mobile/android/services/src/main/java/org/mozilla/gecko/sync/Server11PreviousPostFailedException.java
mobile/android/services/src/main/java/org/mozilla/gecko/sync/Server11RecordPostFailedException.java
mobile/android/services/src/main/java/org/mozilla/gecko/sync/Sync11Configuration.java
mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/ConstrainedServer11Repository.java
mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server11Repository.java
mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server11RepositorySession.java
mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/SafeConstrainedServer11Repository.java
mobile/android/services/src/main/java/org/mozilla/gecko/sync/synchronizer/SerialRecordConsumer.java
mobile/android/tests/background/junit4/src/org/mozilla/android/sync/net/test/TestServer11Repository.java
mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestServer11RepositorySession.java
mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestSyncConfiguration.java
mobile/android/tests/background/junit4/src/org/mozilla/gecko/sync/repositories/test/TestSafeConstrainedServer11Repository.java
toolkit/modules/SpatialNavigation.jsm
toolkit/modules/tests/mochitest/.eslintrc.js
toolkit/modules/tests/mochitest/mochitest.ini
toolkit/modules/tests/mochitest/test_spatial_navigation.html
--- a/browser/components/search/test/browser.ini
+++ b/browser/components/search/test/browser.ini
@@ -26,16 +26,17 @@ support-files =
 skip-if = os == "mac" # bug 967013
 [browser_ddg.js]
 [browser_ddg_behavior.js]
 [browser_google.js]
 skip-if = artifact # bug 1315953
 [browser_google_codes.js]
 skip-if = artifact # bug 1315953
 [browser_google_nocodes.js]
+skip-if = artifact # bug 1315953
 [browser_google_behavior.js]
 skip-if = artifact # bug 1315953
 [browser_healthreport.js]
 [browser_hiddenOneOffs_cleanup.js]
 [browser_hiddenOneOffs_diacritics.js]
 [browser_oneOffContextMenu.js]
 [browser_oneOffContextMenu_setDefault.js]
 [browser_oneOffHeader.js]
--- a/browser/modules/ExtensionsUI.jsm
+++ b/browser/modules/ExtensionsUI.jsm
@@ -187,21 +187,17 @@ this.ExtensionsUI = {
   },
 
   // Create a set of formatted strings for a permission prompt
   _buildStrings(info) {
     let result = {};
 
     let bundle = Services.strings.createBundle(BROWSER_PROPERTIES);
 
-    let name = info.addon.name;
-    if (name.length > 50) {
-      name = name.slice(0, 49) + "…";
-    }
-    name = this._sanitizeName(name);
+    let name = this._sanitizeName(info.addon.name);
     let addonName = `<span class="addon-webext-name">${name}</span>`;
 
     result.header = bundle.formatStringFromName("webextPerms.header", [addonName], 1);
     result.text = "";
     result.listIntro = bundle.GetStringFromName("webextPerms.listIntro");
 
     result.acceptText = bundle.GetStringFromName("webextPerms.add.label");
     result.acceptKey = bundle.GetStringFromName("webextPerms.add.accessKey");
--- a/build/moz.configure/rust.configure
+++ b/build/moz.configure/rust.configure
@@ -38,20 +38,20 @@ def cargo_info(cargo):
         if not m:
             die('Could not determine cargo version from output: %s', out)
         version = m.group(1)
 
     return namespace(
         version=Version(version),
     )
 
-@depends(rustc, cargo, rustc_info)
+@depends(rustc_info, cargo_info)
 @imports(_from='textwrap', _import='dedent')
-def rust_compiler(rustc, cargo, rustc_info):
-    if not rustc:
+def rust_compiler(rustc_info, cargo_info):
+    if not rustc_info:
         die(dedent('''\
         Rust compiler not found.
         To compile rust language sources, you must have 'rustc' in your path.
         See https//www.rust-lang.org/ for more information.
 
         You can install rust by running './mach bootstrap'
         or by directly running the installer from https://rustup.rs/
         '''))
@@ -69,35 +69,37 @@ def rust_compiler(rustc, cargo, rustc_in
 
         You can verify this by typing 'rustc --version'.
 
         If you have the 'rustup' tool installed you can upgrade
         to the latest release by typing 'rustup update'. The
         installer is available from https://rustup.rs/
         '''.format(version, rustc_min_version)))
 
-        if not cargo:
-            die(dedent('''\
-            Cargo package manager not found.
-            To compile Rust language sources, you must have 'cargo' in your path.
-            See https://www.rust-lang.org/ for more information.
+    if not cargo_info:
+        die(dedent('''\
+        Cargo package manager not found.
+        To compile Rust language sources, you must have 'cargo' in your path.
+        See https://www.rust-lang.org/ for more information.
+
+        You can install cargo by running './mach bootstrap'
+        or by directly running the installer from https://rustup.rs/
+        '''))
 
-            You can install cargo by running './mach bootstrap'
-            or by directly running the installer from https://rustup.rs/
-            '''))
-        version = cargo_info.version
-        if version < cargo_min_version:
-            die(dedent('''\
-            Cargo package manager {} is too old.
+    version = cargo_info.version
+    if version < cargo_min_version:
+        die(dedent('''\
+        Cargo package manager {} is too old.
 
-            To compile Rust language sources please install at least
-            version {} of 'cargo' and make sure it is first in your path.
+        To compile Rust language sources please install at least
+        version {} of 'cargo' and make sure it is first in your path.
 
-            You can verify this by typing 'cargo --version'.
-            ''').format(version, cargo_min_version))
+        You can verify this by typing 'cargo --version'.
+        ''').format(version, cargo_min_version))
+
     return True
 
 set_config('MOZ_RUST', rust_compiler)
 
 @template
 def rust_triple_alias(host_or_target):
     """Template defining the alias used for rustc's --target flag.
     `host_or_target` is either `host` or `target` (the @depends functions
--- a/dom/animation/test/crashtests/crashtests.list
+++ b/dom/animation/test/crashtests/crashtests.list
@@ -1,27 +1,27 @@
 pref(dom.animations-api.core.enabled,true) load 1239889-1.html
 pref(dom.animations-api.core.enabled,true) load 1244595-1.html
 pref(dom.animations-api.core.enabled,true) load 1216842-1.html # bug 1334036
 pref(dom.animations-api.core.enabled,true) load 1216842-2.html # bug 1334036
 pref(dom.animations-api.core.enabled,true) load 1216842-3.html
 pref(dom.animations-api.core.enabled,true) load 1216842-4.html
 pref(dom.animations-api.core.enabled,true) load 1216842-5.html # bug 1334036
 pref(dom.animations-api.core.enabled,true) load 1216842-6.html # bug 1334036
-skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1272475-1.html # bug 1324693 and bug 1332657
-skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1272475-2.html # bug 1324693 and bug 1332657
+skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1272475-1.html # bug 1324693 and bug 1332657 and bug 1336769
+skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1272475-2.html # bug 1324693 and bug 1332657 and bug 1336769
 pref(dom.animations-api.core.enabled,true) load 1278485-1.html
 pref(dom.animations-api.core.enabled,true) load 1277272-1.html
 pref(dom.animations-api.core.enabled,true) load 1290535-1.html
 pref(dom.animations-api.core.enabled,true) load 1304886-1.html
 pref(dom.animations-api.core.enabled,true) load 1322382-1.html
 skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1322291-1.html # bug 1311257
 skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1322291-2.html # bug 1311257 and bug 1311257
-skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1323114-1.html # bug 1324690 and bug 1311257
-skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1323114-2.html # bug 1324690
-skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1324554-1.html # bug 1311257
+pref(dom.animations-api.core.enabled,true) load 1323114-1.html # bug 1324690 and bug 1311257
+skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1323114-2.html # bug 1339318
+pref(dom.animations-api.core.enabled,true) load 1324554-1.html # bug 1311257
 skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1325193-1.html # bug 1311257
 skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1330190-1.html # bug 1311257
 skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1330190-2.html # bug 1311257
 skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1330513-1.html # bug 1311257
 skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1333539-1.html
 skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1333539-2.html
 skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1333418-1.html # bug 1311257
new file mode 100644
--- /dev/null
+++ b/dom/base/FuzzingFunctions.cpp
@@ -0,0 +1,30 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "FuzzingFunctions.h"
+
+#include "nsJSEnvironment.h"
+#include "js/GCAPI.h"
+
+namespace mozilla {
+namespace dom {
+
+/* static */ void
+FuzzingFunctions::GarbageCollect(const GlobalObject&)
+{
+  nsJSContext::GarbageCollectNow(JS::gcreason::COMPONENT_UTILS,
+                                 nsJSContext::NonIncrementalGC,
+                                 nsJSContext::NonShrinkingGC);
+}
+
+/* static */ void
+FuzzingFunctions::CycleCollect(const GlobalObject&)
+{
+  nsJSContext::CycleCollectNow();
+}
+
+} // namespace dom
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/dom/base/FuzzingFunctions.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_dom_FuzzingFunctions
+#define mozilla_dom_FuzzingFunctions
+
+namespace mozilla {
+namespace dom {
+
+class GlobalObject;
+
+class FuzzingFunctions final
+{
+public:
+  static void
+  GarbageCollect(const GlobalObject&);
+
+  static void
+  CycleCollect(const GlobalObject&);
+};
+
+} // namespace dom
+} // namespace mozilla
+
+#endif // mozilla_dom_FuzzingFunctions
--- a/dom/base/moz.build
+++ b/dom/base/moz.build
@@ -208,16 +208,21 @@ EXPORTS.mozilla.dom += [
     'TimeoutHandler.h',
     'TimeoutManager.h',
     'TreeWalker.h',
     'WebKitCSSMatrix.h',
     'WebSocket.h',
     'WindowOrientationObserver.h',
 ]
 
+if CONFIG['FUZZING']:
+    EXPORTS.mozilla.dom += [
+        'FuzzingFunctions.h',
+    ]
+
 UNIFIED_SOURCES += [
     'AnonymousContent.cpp',
     'Attr.cpp',
     'BarProps.cpp',
     'BodyUtil.cpp',
     'BorrowedAttrInfo.cpp',
     'ChildIterator.cpp',
     'ChromeNodeList.cpp',
@@ -355,16 +360,21 @@ UNIFIED_SOURCES += [
     'WindowOrientationObserver.cpp',
 ]
 
 if CONFIG['MOZ_WEBRTC']:
     UNIFIED_SOURCES += [
         'nsDOMDataChannel.cpp',
     ]
 
+if CONFIG['FUZZING']:
+    UNIFIED_SOURCES += [
+        'FuzzingFunctions.cpp',
+    ]
+
 # these files couldn't be in UNIFIED_SOURCES for now for reasons given below:
 SOURCES += [
     # Several conflicts with other bindings.
     'DOMIntersectionObserver.cpp',
     # Because of OS X headers.
     'nsContentUtils.cpp',
     # this file doesn't like windows.h
     'nsDOMWindowUtils.cpp',
--- a/dom/bindings/Bindings.conf
+++ b/dom/bindings/Bindings.conf
@@ -425,16 +425,23 @@ DOMInterfaces = {
 'FontFaceSet': {
     'implicitJSContext': [ 'load' ],
 },
 
 'FontFaceSetIterator': {
     'wrapperCache': False,
 },
 
+'FuzzingFunctions': {
+    # The codegen is dumb, and doesn't understand that this interface is only a
+    # collection of static methods, so we have this `concrete: False` hack.
+    'concrete': False,
+    'headerFile': 'mozilla/dom/FuzzingFunctions.h',
+},
+
 'Geolocation': {
     'headerFile': 'nsGeolocation.h'
 },
 
 'HeapSnapshot': {
     'nativeType': 'mozilla::devtools::HeapSnapshot'
 },
 
--- a/dom/html/crashtests/crashtests.list
+++ b/dom/html/crashtests/crashtests.list
@@ -6,17 +6,17 @@ load 307616-1.html
 load 324918-1.xhtml
 load 338649-1.xhtml
 load 339501-1.xhtml
 load 339501-2.xhtml
 load 378993-1.xhtml
 load 382568-1.html
 asserts-if(stylo,1) load 383137.xhtml # bug 1324704
 load 388183-1.html
-skip-if(stylo) load 395340-1.html # bug 1323652
+load 395340-1.html # bug 1323652
 load 399694-1.html
 load 407053.html
 load 423371-1.html
 load 448564.html
 load 451123-1.html
 load 453406-1.html
 load 464197-1.html
 load 465466-1.xhtml
new file mode 100644
--- /dev/null
+++ b/dom/webidl/FuzzingFunctions.webidl
@@ -0,0 +1,24 @@
+/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/.
+ */
+
+/*
+ * Various functions useful for automated fuzzing that are enabled
+ * only in --enable-fuzzing builds, because they may be dangerous to
+ * enable on untrusted pages.
+*/
+
+[Pref="fuzzing.enabled"]
+interface FuzzingFunctions {
+  /**
+   * Synchronously perform a garbage collection.
+   */
+  static void garbageCollect();
+
+  /**
+   * Synchronously perform a cycle collection.
+   */
+  static void cycleCollect();
+};
--- a/dom/webidl/moz.build
+++ b/dom/webidl/moz.build
@@ -1048,16 +1048,21 @@ if CONFIG['MOZ_SECUREELEMENT']:
          'SecureElementManager.webidl',
     ]
 
 if CONFIG['MOZ_WIDGET_TOOLKIT'] != 'gonk':
     WEBIDL_FILES += [
         'InstallTrigger.webidl',
     ]
 
+if CONFIG['FUZZING']:
+    WEBIDL_FILES += [
+        'FuzzingFunctions.webidl',
+    ]
+
 GENERATED_EVENTS_WEBIDL_FILES = [
     'AddonEvent.webidl',
     'AnimationPlaybackEvent.webidl',
     'AutocompleteErrorEvent.webidl',
     'BlobEvent.webidl',
     'CaretStateChangedEvent.webidl',
     'CloseEvent.webidl',
     'DeviceLightEvent.webidl',
--- a/gfx/2d/PathHelpers.h
+++ b/gfx/2d/PathHelpers.h
@@ -242,16 +242,21 @@ struct RectCornerRadii {
   const Size TopRight() const { return radii[eCornerTopRight]; }
   Size& TopRight() { return radii[eCornerTopRight]; }
 
   const Size BottomRight() const { return radii[eCornerBottomRight]; }
   Size& BottomRight() { return radii[eCornerBottomRight]; }
 
   const Size BottomLeft() const { return radii[eCornerBottomLeft]; }
   Size& BottomLeft() { return radii[eCornerBottomLeft]; }
+
+  bool IsEmpty() const {
+    return TopLeft().IsEmpty() && TopRight().IsEmpty() &&
+           BottomRight().IsEmpty() && BottomLeft().IsEmpty();
+  }
 };
 
 /**
  * Appends a path represending a rounded rectangle to the path being built by
  * aPathBuilder.
  *
  * aRect           The rectangle to append.
  * aCornerRadii    Contains the radii of the top-left, top-right, bottom-right
--- a/gfx/tests/crashtests/crashtests.list
+++ b/gfx/tests/crashtests/crashtests.list
@@ -126,11 +126,11 @@ load 944579.html
 asserts-if(stylo,1) pref(security.fileuri.strict_origin_policy,false) load 950000.html # bug 1324700
 load 1034403-1.html
 load 1205900.html
 load 1134549-1.svg
 load balinese-letter-spacing.html
 load 1216832-1.html
 load 1225125-1.html
 load 1308394.html
-skip-if(stylo) load 1317403-1.html # bug 1331533
+load 1317403-1.html # bug 1331533
 load 1325159-1.html
 load 1331683.html
--- a/image/test/crashtests/crashtests.list
+++ b/image/test/crashtests/crashtests.list
@@ -42,10 +42,10 @@ load invalid-icc-profile.jpg # This woul
 load invalid_ico_height.ico
 load invalid_ico_width.ico
 
 # Bug 525326 - Test image sizes of 65535x65535 which is larger than we allow)
 load invalid-size.gif
 load invalid-size-second-frame.gif
 
 load multiple-png-hassize.ico # Bug 863958 - This icon's size is such that it leads to multiple writes to the PNG decoder after we've gotten our size.
-skip-if(stylo) asserts(0-2) load ownerdiscard.html # Bug 1323672, bug 807211
+asserts(0-2) load ownerdiscard.html # Bug 1323672, bug 807211
 load truncated-second-frame.png # Bug 863975
--- a/js/src/devtools/automation/autospider.py
+++ b/js/src/devtools/automation/autospider.py
@@ -132,17 +132,17 @@ if args.variant == 'nonunified':
             subprocess.check_call(['sed', '-i', 's/UNIFIED_SOURCES/SOURCES/',
                                    os.path.join(dirpath, 'moz.build')])
 
 OBJDIR = os.path.join(DIR.source, args.objdir)
 OUTDIR = os.path.join(OBJDIR, "out")
 POBJDIR = posixpath.join(PDIR.source, args.objdir)
 AUTOMATION = env.get('AUTOMATION', False)
 MAKE = env.get('MAKE', 'make')
-MAKEFLAGS = env.get('MAKEFLAGS', '-j6')
+MAKEFLAGS = env.get('MAKEFLAGS', '-j6' + ('' if AUTOMATION else ' -s'))
 UNAME_M = subprocess.check_output(['uname', '-m']).strip()
 
 CONFIGURE_ARGS = variant['configure-args']
 opt = args.optimize
 if opt is None:
     opt = variant.get('optimize')
 if opt is not None:
     CONFIGURE_ARGS += (" --enable-optimize" if opt else " --disable-optimize")
@@ -290,17 +290,17 @@ if not args.nobuild:
         shutil.copyfile(configure + ".in", configure)
         os.chmod(configure, 0755)
 
     # Run configure
     if not args.noconf:
         run_command(['sh', '-c', posixpath.join(PDIR.js_src, 'configure') + ' ' + CONFIGURE_ARGS], check=True)
 
     # Run make
-    run_command('%s -s -w %s' % (MAKE, MAKEFLAGS), shell=True, check=True)
+    run_command('%s -w %s' % (MAKE, MAKEFLAGS), shell=True, check=True)
 
 COMMAND_PREFIX = []
 # On Linux, disable ASLR to make shell builds a bit more reproducible.
 if subprocess.call("type setarch >/dev/null 2>&1", shell=True) == 0:
     COMMAND_PREFIX.extend(['setarch', UNAME_M, '-R'])
 
 
 def run_test_command(command, **kwargs):
--- a/js/src/devtools/automation/variants/nonunified
+++ b/js/src/devtools/automation/variants/nonunified
@@ -1,9 +1,9 @@
 {
-    "configure-args": "--enable-debug",
+    "configure-args": "--enable-debug --enable-warnings-as-errors",
     "skip-tests": {
         "all": ["jstests", "jittest", "checks"]
     },
     "extra-tests": {
         "all": ["check-style"]
     }
 }
--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -7,16 +7,17 @@
 #include "jit/BaselineCacheIRCompiler.h"
 
 #include "jit/CacheIR.h"
 #include "jit/Linker.h"
 #include "jit/SharedICHelpers.h"
 #include "proxy/Proxy.h"
 
 #include "jscntxtinlines.h"
+#include "jscompartmentinlines.h"
 
 #include "jit/MacroAssembler-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::Maybe;
 
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -3,16 +3,18 @@
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/CacheIRCompiler.h"
 
 #include "jit/IonIC.h"
 
+#include "jscompartmentinlines.h"
+
 #include "jit/MacroAssembler-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::Maybe;
 
 ValueOperand
--- a/js/src/jit/CacheIRSpewer.cpp
+++ b/js/src/jit/CacheIRSpewer.cpp
@@ -17,16 +17,17 @@
 #else
 #include <unistd.h>
 #endif
 
 #include <stdarg.h>
 
 #include "jsfun.h"
 #include "jsscript.h"
+#include "jscompartmentinlines.h"
 
 using namespace js;
 using namespace js::jit;
 
 CacheIRSpewer cacheIRspewer;
 
 CacheIRSpewer&
 jit::GetCacheIRSpewerSingleton()
--- a/js/src/jit/IonCacheIRCompiler.cpp
+++ b/js/src/jit/IonCacheIRCompiler.cpp
@@ -9,16 +9,18 @@
 #include "jit/CacheIRCompiler.h"
 #include "jit/IonCaches.h"
 #include "jit/IonIC.h"
 
 #include "jit/Linker.h"
 #include "jit/SharedICHelpers.h"
 #include "proxy/Proxy.h"
 
+#include "jscompartmentinlines.h"
+
 #include "jit/MacroAssembler-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::DebugOnly;
 
 namespace js {
--- a/js/src/jit/JitSpewer.cpp
+++ b/js/src/jit/JitSpewer.cpp
@@ -25,16 +25,18 @@
 #include "jit/MIRGenerator.h"
 #include "jit/MIRGraph.h"
 
 #include "threading/LockGuard.h"
 
 #include "vm/HelperThreads.h"
 #include "vm/MutexIDs.h"
 
+#include "jscompartmentinlines.h"
+
 #ifndef JIT_SPEW_DIR
 # if defined(_WIN32)
 #  define JIT_SPEW_DIR "."
 # elif defined(__ANDROID__)
 #  define JIT_SPEW_DIR "/data/local/tmp"
 # else
 #  define JIT_SPEW_DIR "/tmp"
 # endif
--- a/js/src/vm/ArgumentsObject.cpp
+++ b/js/src/vm/ArgumentsObject.cpp
@@ -11,16 +11,17 @@
 #include "jit/JitFrames.h"
 #include "vm/AsyncFunction.h"
 #include "vm/GlobalObject.h"
 #include "vm/Stack.h"
 
 #include "jsobjinlines.h"
 
 #include "gc/Nursery-inl.h"
+#include "vm/NativeObject-inl.h"
 #include "vm/Stack-inl.h"
 
 using namespace js;
 using namespace js::gc;
 
 /* static */ size_t
 RareArgumentsData::bytesRequired(size_t numActuals)
 {
--- a/js/src/vm/EnvironmentObject.cpp
+++ b/js/src/vm/EnvironmentObject.cpp
@@ -23,16 +23,17 @@
 #include "vm/Shape.h"
 #include "vm/Xdr.h"
 #include "wasm/WasmInstance.h"
 
 #include "jsatominlines.h"
 #include "jsobjinlines.h"
 #include "jsscriptinlines.h"
 
+#include "vm/NativeObject-inl.h"
 #include "vm/Stack-inl.h"
 
 using namespace js;
 using namespace js::gc;
 
 using mozilla::PodZero;
 using mozilla::Maybe;
 using mozilla::Some;
--- a/layout/base/crashtests/crashtests.list
+++ b/layout/base/crashtests/crashtests.list
@@ -156,17 +156,17 @@ load 378682.html
 skip-if(stylo) load 379105-1.xhtml # bug 1323679
 load 379419-1.xhtml
 load 379768-1.html
 asserts-if(stylo,8) load 379799-1.html # bug 1324977
 load 379920-1.svg
 load 379920-2.svg
 load 379975.html
 load 380096-1.html
-skip-if(stylo) load 382204-1.html # bug 1323680
+load 382204-1.html # bug 1323680
 load 383102-1.xhtml
 load 383129-1.html
 asserts-if(stylo,2) load 383806-1.xhtml # bug 1329454
 load 384344-1.html
 load 384392-1.xhtml
 load 384392-2.svg
 load 384649-1.xhtml
 load 385354.html
@@ -201,17 +201,17 @@ load 399994-1.html
 load 400185-1.xul
 load 400445-1.xhtml
 load 400904-1.xhtml
 load 401589-1.xul
 load 401734-1.html
 load 401734-2.html
 needs-focus pref(accessibility.browsewithcaret,true) load 403048.html
 skip load 403175-1.html # times out occasionally, bug 473680
-skip-if(stylo) load 403245-1.html # bug 1323652
+load 403245-1.html # bug 1323652
 load 403454.html
 load 403569-1.xhtml
 load 403569-2.xhtml
 load 403569-3.xhtml
 skip-if(stylo) load 404218-1.xhtml # bug 1338761
 load 404491-1.html
 load 404721-1.xhtml
 load 404721-2.xhtml
@@ -226,21 +226,21 @@ load 409461-1.xhtml
 load 409513.html
 load 410967.html
 load 411870-1.html
 load 412651-1.html
 load 413587-1.svg
 load 414058-1.html
 load 414175-1.xul
 load 415503.xhtml
-skip-if(stylo) load 416107.xhtml # bug 1323652
+load 416107.xhtml # bug 1323652
 HTTP load 419985.html
 load 420031-1.html
 load 420213-1.html
-skip-if(stylo) load 420219-1.html # bug 1323652
+load 420219-1.html # bug 1323652
 load 420651-1.xhtml
 load 421203-1.xul
 load 421432.html
 load 422276.html
 asserts(0-1) load 423107-1.xhtml # bug 866955
 load 425981-1.html
 asserts-if(stylo,1) load 428113.xhtml # bug 1329454
 load 428138-1.html
@@ -268,17 +268,17 @@ load 448543-2.html
 load 448543-3.html
 load 450319-1.xhtml
 asserts(1) load 453894-1.xhtml # Bug 398043
 load 454751-1.xul
 load 455063-1.html
 load 455063-2.html
 load 455063-3.html
 load 455171-4.html
-skip-if(stylo) load 455623-1.html # bug 1323652
+load 455623-1.html # bug 1323652
 load 457362-1.xhtml
 load 457514.html
 asserts(0-1) load 460389-1.html # bug 780985
 load 462392.html
 load 466763-1.html
 load 467881-1.html
 load 468491-1.html
 load 468546-1.xhtml
@@ -395,38 +395,38 @@ load 663662-1.html
 load 663662-2.html
 load 665837.html
 load 668579.html
 load 668941.xhtml
 load 670226.html
 asserts(2) load 675246-1.xhtml # Bug 675713
 load 690247-1.html
 load 690619-1.html
-skip-if(stylo) load 691118-1.html # bug 1323652
+load 691118-1.html # bug 1323652
 load 695861.html
 load 695964-1.svg
 load 698335.html
 needs-focus pref(accessibility.browsewithcaret,true) load 699353-1.html
 load 701504.html
 load 707098.html
 load 709536-1.xhtml
 load 722137.html
-skip-if(stylo) load 725535.html # bug 1323652
-skip-if(stylo) load 727601.html # bug 1323652
+load 725535.html # bug 1323652
+load 727601.html # bug 1323652
 skip-if(Android) asserts(0-2) pref(dom.disable_open_during_load,false) load 735943.html # the assertion is bug 735966, for android bug 760271
 asserts(0-2) load 736389-1.xhtml # sometimes the above assertions are delayed and is reported on this test instead
 skip-if(stylo) load 736924-1.html # bug 1337695
 load 749816-1.html
 load 763223-1.html
 test-pref(font.size.inflation.emPerLine,15) test-pref(font.size.inflation.lineThreshold,100) load 763702.xhtml
 load 767593-1.html
 load 767593-2.html
 load 770381-1.html
 load 772306.html
-skip-if(stylo) load 788360.html # bug 1323652
+load 788360.html # bug 1323652
 load 793848.html
 load 795646.html
 skip load 802902.html # bug 901752
 load 806056-1.html
 load 806056-2.html
 load 812665.html
 load 813372-1.html
 load 817219.html
--- a/layout/forms/crashtests/crashtests.list
+++ b/layout/forms/crashtests/crashtests.list
@@ -52,18 +52,18 @@ asserts(1) load 578604-1.html # bug 5845
 asserts(4-7) load 590302-1.xhtml # bug 584564
 load 626014.xhtml
 load 639733.xhtml
 load 669767.html
 load 682684.xhtml
 load 865602.html
 load 893332-1.html
 load 944198.html
-skip-if(stylo) load 949891.xhtml # bug 1323693
+skip-if(stylo) load 949891.xhtml # bug 1323693, bug 1341973
 load 959311.html
 load 960277-2.html
 load 997709-1.html
 load 1102791.html
-skip-if(stylo) load 1140216.html # bug 1323693
+skip-if(stylo) load 1140216.html # bug 1323693, bug 1341973
 load 1182414.html
 load 1212688.html
 load 1228670.xhtml
 load 1279354.html
--- a/layout/generic/crashtests/crashtests.list
+++ b/layout/generic/crashtests/crashtests.list
@@ -10,19 +10,19 @@ load 255982-4.html
 load 264937-1.html
 asserts-if(stylo,4) load 265867-1.html # bug 1324703
 load 265867-2.html
 load 286491.html
 load 289864-1.html
 asserts(0-1) load 295292-1.html # Bug 1315855
 load 295292-2.html
 load 302260-1.html
-skip-if(stylo) load 307979-1.html # bug 1338007
-skip-if(stylo) load 309322-1.html # bug 1338007
-skip-if(stylo) load 309322-2.html # bug 1338007
+load 307979-1.html # bug 1338007
+load 309322-1.html # bug 1338007
+load 309322-2.html # bug 1338007
 load 309322-3.html
 load 309322-4.html
 load 310556-1.xhtml
 load 321224.xul
 load 322780-1.xul
 load 323381-1.html
 load 323381-2.html
 asserts-if(gtkWidget,1) asserts-if(Android&&asyncPan,1) asserts-if(stylo,0) load 323386-1.html # Bug 718883
@@ -163,43 +163,43 @@ load 393956-4.html
 load 394237-1.html
 load 394818-1.html
 load 394818-2.html
 load 394820-1.html
 load 395316-1.html
 load 395450-1.xhtml
 load 397007-1.html
 load 397187-1.html
-skip-if(stylo) load 397844-1.xhtml # bug 1323652
-skip-if(stylo) load 397844-2.xhtml # bug 1323652
+load 397844-1.xhtml # bug 1323652
+load 397844-2.xhtml # bug 1323652
 load 397852-1.xhtml
 load 398181-1.html
 load 398181-2.html
 load 398322-1.html
 load 398322-2.html
 load 398332-1.html
 load 398332-2.html
 asserts(0-2) load 398332-3.html # bug 436123 and bug 457397
 load 399407-1.xhtml
 load 399412-1.html
 load 399843-1.html
 load 400078-1.html
 load 400190.html
-skip-if(stylo) load 400223-1.html # bug 1323652
+load 400223-1.html # bug 1323652
 load 400232-1.html
 load 400244-1.html
 load 400768-1.xhtml
 load 400768-2.xhtml
 load 401042-1.xhtml
 load 402380-1.html
 load 402380-2.html
 load 402872-1.html
 load 402872-2.html
 load 403004.html
-skip-if(stylo) load 403143-1.html # bug 1323652
+load 403143-1.html # bug 1323652
 load 403576-1.html
 load 404140-1.html
 load 404146-1.html
 load 404204-1.html
 load 404215-1.html
 load 404215-2.html
 load 404215-3.html
 load 404219-1.html
@@ -215,30 +215,30 @@ load 408737-2.html
 load 408749-1.xhtml
 load 408883-1.html
 load 410198.html
 load 410228-1.html
 load 410232-1.html
 load 410595-1.html
 load 411213-1.html
 load 411213-2.xml
-skip-if(stylo) load 411835.html # bug 1323652
+load 411835.html # bug 1323652
 load 411851-1.html
 load 412014-1.html
 load 412201-1.xhtml
 load 412543-1.html
 load 413048-1.html
 load 413079-1.xhtml
 load 413079-2.xhtml
 load 413079-3.xhtml
 load 413085-1.html
 load 413085-2.html
 load 413582-1.xhtml
 load 413582-2.html
-skip-if(stylo) load 413712-1.xhtml # bug 1323680
+load 413712-1.xhtml # bug 1323680
 load 414061-1.html
 load 414180-1.xul
 load 414719-1.html
 load 415685-1.html
 load 416165.html
 load 416264-1.html
 load 416476-1.html
 load 417109-1.xhtml
@@ -276,32 +276,32 @@ load 430991.html
 load 431260-1.html
 load 431260-2.html
 load 435529.html
 load 436194-1.html
 load 436602-1.html
 load 436822-1.html
 load 436823.html
 load 436969-1.html
-skip-if(stylo) load 437156-1.html # bug 1323652
+load 437156-1.html # bug 1323652
 load 437565-1.xhtml
 load 437565-2.xhtml
 load 437565-3.xhtml
 load 438259-1.html
 load 438266-1.html
 skip load 438509-1.html # bug 511234
 load 442860-1.xul
 load 443528-1.html
 load 444230-1.html
 load 444484-1.html
 load 444726-1.xhtml
-skip-if(stylo) load 444861-1.html # bug 1323652
+load 444861-1.html # bug 1323652
 load 445288.html
 load 448903-1.html
-skip-if(stylo) load 448996-1.html # bug 1323652
+load 448996-1.html # bug 1323652
 asserts-if(stylo,4) load 451315-1.html # bug 1324672
 asserts-if(stylo,1) load 451317-1.html # bug 1324665
 load 451334-1.html
 load 452157-1.html
 load 452157-2.html
 load 452157-3.html
 load 453762-1.html
 load 455171-1.html
@@ -325,17 +325,17 @@ load 467213-1.html
 load 467487-1.html
 load 467493-1.html
 load 467493-2.html
 load 467875-1.xhtml
 load 467914-1.html
 load 468207-1.html
 load 468771-1.xhtml
 load 468771-2.xhtml
-skip-if(stylo) load 469859-1.xhtml # bug 1323665
+load 469859-1.xhtml # bug 1323665
 load 472587-1.xhtml
 asserts-if(stylo,2) load 472617-1.xhtml # bug 1324665
 load 472774-1.html
 load 472776-1.html
 load 472950-1.html
 load 472957.xhtml
 load 473278-1.xhtml
 load 473894-1.html
@@ -377,17 +377,17 @@ load 509749-1.html
 load 511482.html
 asserts-if(stylo,2) load 512724-1.html # bug 1330259
 asserts-if(stylo,2) load 512725-1.html # bug 1324640
 load 512749-1.html
 load 513110-1.html
 load 513110-2.xhtml
 load 513394-1.html
 load 514098-1.xhtml
-skip-if(stylo) load 514800-1.html # bug 1323652
+load 514800-1.html # bug 1323652
 load 515811-1.html
 load 517968.html
 load 519031.xhtml
 load 520340.html
 load 522170-1.html
 load 526217.html
 load 533379-1.html
 load 533379-2.html
@@ -409,17 +409,17 @@ asserts-if(stylo,4) load 553504-1.xhtml 
 load 564368-1.xhtml
 load 564968.xhtml
 load 569193-1.html
 load 570160.html
 load 570289-1.html
 load 571618-1.svg
 asserts(1) asserts-if(stylo,0-1) load 571975-1.html # bug 574889
 load 571995.xhtml
-skip-if(stylo) load 574958.xhtml # bug 1323696
+load 574958.xhtml # bug 1323696
 asserts(0-4) load 578977.html # bug 757305
 load 580504-1.xhtml
 load 585598-1.xhtml
 load 586806-1.html
 load 586806-2.html
 load 586806-3.html
 load 586973-1.html
 load 589002-1.html
@@ -455,17 +455,17 @@ load 650499-1.html
 load 654002-1.html
 load 654002-2.html
 load 655462-1.html
 load 656130-1.html
 load 656130-2.html
 load 660416.html
 load 665853.html
 load 667025.html
-skip-if(stylo) load 673770.html # bug 1323652
+load 673770.html # bug 1323652
 load 679933-1.html
 load 681489-1.html
 load 682649-1.html
 load 683702-1.xhtml
 load 683712.html
 load 688996-1.html
 load 688996-2.html
 load 691210.html
@@ -554,17 +554,17 @@ asserts(0-3) load 914501.html # bug 1144
 load 914891.html
 load 915475.xhtml
 load 927558.html
 load 943509-1.html
 asserts(2-8) asserts-if(stylo,0) load 944909-1.html
 load 946167-1.html
 load 947158.html
 load 949932.html
-skip-if(stylo) asserts-if(Android,0-1) load 964078.html # bug 989718
+asserts-if(Android,0-1) asserts-if(stylo,1) load 964078.html # bug 989718, bug 1296477
 load 970710.html
 load 973701-1.xhtml
 load 973701-2.xhtml
 load 986899.html
 load 1001233.html
 load 1001258-1.html
 load 1003441.xul
 pref(layout.css.grid.enabled,true) load 1015562.html
--- a/layout/painting/nsCSSRendering.cpp
+++ b/layout/painting/nsCSSRendering.cpp
@@ -1984,25 +1984,42 @@ ComputeBoxValue(nsIFrame* aForFrame, Sty
     if (IsHTMLStyleGeometryBox(aBox)) {
       return StyleGeometryBox::Fill;
     }
   }
 
   return aBox;
 }
 
+bool
+nsCSSRendering::ImageLayerClipState::IsValid() const
+{
+  // mDirtyRectInDevPx comes from mDirtyRectInAppUnits. mDirtyRectInAppUnits
+  // can not be empty if mDirtyRectInDevPx is not.
+  if (!mDirtyRectInDevPx.IsEmpty() && mDirtyRectInAppUnits.IsEmpty()) {
+    return false;
+  }
+
+  if (mHasRoundedCorners == mClippedRadii.IsEmpty()) {
+    return false;
+  }
+
+  return true;
+}
+
 /* static */ void
 nsCSSRendering::GetImageLayerClip(const nsStyleImageLayers::Layer& aLayer,
                                   nsIFrame* aForFrame, const nsStyleBorder& aBorder,
                                   const nsRect& aBorderArea, const nsRect& aCallerDirtyRect,
                                   bool aWillPaintBorder, nscoord aAppUnitsPerPixel,
                                   /* out */ ImageLayerClipState* aClipState)
 {
   aClipState->mHasRoundedCorners = false;
   aClipState->mHasAdditionalBGClipArea = false;
+  aClipState->mAdditionalBGClipArea.SetEmpty();
   aClipState->mCustomClip = false;
 
   StyleGeometryBox layerClip = ComputeBoxValue(aForFrame, aLayer.mClip);
   if (IsSVGStyleGeometryBox(layerClip)) {
     MOZ_ASSERT(aForFrame->IsFrameOfType(nsIFrame::eSVG) &&
                (aForFrame->GetType() != nsGkAtoms::svgOuterSVGFrame));
 
     // The coordinate space of clipArea is svg user space.
@@ -2021,27 +2038,29 @@ nsCSSRendering::GetImageLayerClip(const 
     // Which coordinate space chosen for aBorderArea is not matter. What
     // matter is to ensure returning aClipState->mBGClipArea in the consistent
     // coordiante space with aBorderArea. So we evaluate the position of clip
     // area base on the position of aBorderArea here.
     aClipState->mBGClipArea =
       clipAreaRelativeToStrokeBox + aBorderArea.TopLeft();
 
     SetupDirtyRects(aClipState->mBGClipArea, aCallerDirtyRect,
-                    aAppUnitsPerPixel, &aClipState->mDirtyRect,
-                    &aClipState->mDirtyRectGfx);
+                    aAppUnitsPerPixel, &aClipState->mDirtyRectInAppUnits,
+                    &aClipState->mDirtyRectInDevPx);
+    MOZ_ASSERT(aClipState->IsValid());
     return;
   }
 
   if (layerClip == StyleGeometryBox::NoClip) {
     aClipState->mBGClipArea = aCallerDirtyRect;
 
     SetupDirtyRects(aClipState->mBGClipArea, aCallerDirtyRect,
-                    aAppUnitsPerPixel, &aClipState->mDirtyRect,
-                    &aClipState->mDirtyRectGfx);
+                    aAppUnitsPerPixel, &aClipState->mDirtyRectInAppUnits,
+                    &aClipState->mDirtyRectInDevPx);
+    MOZ_ASSERT(aClipState->IsValid());
     return;
   }
 
   MOZ_ASSERT(!aForFrame->IsFrameOfType(nsIFrame::eSVG) ||
              aForFrame->GetType() == nsGkAtoms::svgOuterSVGFrame);
 
   // Compute the outermost boundary of the area that might be painted.
   // Same coordinate space as aBorderArea.
@@ -2127,50 +2146,51 @@ nsCSSRendering::GetImageLayerClip(const 
     if (haveRoundedCorners) {
       nsIFrame::InsetBorderRadii(aClipState->mRadii, border);
     }
   }
 
   if (haveRoundedCorners) {
     auto d2a = aForFrame->PresContext()->AppUnitsPerDevPixel();
     nsCSSRendering::ComputePixelRadii(aClipState->mRadii, d2a, &aClipState->mClippedRadii);
-    aClipState->mHasRoundedCorners = true;
+    aClipState->mHasRoundedCorners = !aClipState->mClippedRadii.IsEmpty();
   }
 
 
   if (!haveRoundedCorners && aClipState->mHasAdditionalBGClipArea) {
     // Do the intersection here to account for the fast path(?) below.
     aClipState->mBGClipArea =
       aClipState->mBGClipArea.Intersect(aClipState->mAdditionalBGClipArea);
     aClipState->mHasAdditionalBGClipArea = false;
   }
 
   SetupDirtyRects(aClipState->mBGClipArea, aCallerDirtyRect, aAppUnitsPerPixel,
-                  &aClipState->mDirtyRect, &aClipState->mDirtyRectGfx);
+                  &aClipState->mDirtyRectInAppUnits,
+                  &aClipState->mDirtyRectInDevPx);
+
+  MOZ_ASSERT(aClipState->IsValid());
 }
 
 static void
 SetupImageLayerClip(nsCSSRendering::ImageLayerClipState& aClipState,
                     gfxContext *aCtx, nscoord aAppUnitsPerPixel,
                     gfxContextAutoSaveRestore* aAutoSR)
 {
-  if (aClipState.mDirtyRectGfx.IsEmpty()) {
+  if (aClipState.mDirtyRectInDevPx.IsEmpty()) {
     // Our caller won't draw anything under this condition, so no need
     // to set more up.
     return;
   }
 
   if (aClipState.mCustomClip) {
     // We don't support custom clips and rounded corners, arguably a bug, but
     // table painting seems to depend on it.
     return;
   }
 
-  DrawTarget* drawTarget = aCtx->GetDrawTarget();
-
   // If we have rounded corners, clip all subsequent drawing to the
   // rounded rectangle defined by bgArea and bgRadii (we don't know
   // whether the rounded corners intrude on the dirtyRect or not).
   // Do not do this if we have a caller-provided clip rect --
   // as above with bgArea, arguably a bug, but table painting seems
   // to depend on it.
 
   if (aClipState.mHasAdditionalBGClipArea) {
@@ -2189,63 +2209,64 @@ SetupImageLayerClip(nsCSSRendering::Imag
     Rect bgAreaGfx = NSRectToRect(aClipState.mBGClipArea, aAppUnitsPerPixel);
     bgAreaGfx.Round();
 
     if (bgAreaGfx.IsEmpty()) {
       // I think it's become possible to hit this since
       // https://hg.mozilla.org/mozilla-central/rev/50e934e4979b landed.
       NS_WARNING("converted background area should not be empty");
       // Make our caller not do anything.
-      aClipState.mDirtyRectGfx.SizeTo(gfxSize(0.0, 0.0));
+      aClipState.mDirtyRectInDevPx.SizeTo(gfxSize(0.0, 0.0));
       return;
     }
 
     aAutoSR->EnsureSaved(aCtx);
 
     RefPtr<Path> roundedRect =
-      MakePathForRoundedRect(*drawTarget, bgAreaGfx, aClipState.mClippedRadii);
+      MakePathForRoundedRect(*aCtx->GetDrawTarget(), bgAreaGfx,
+                             aClipState.mClippedRadii);
     aCtx->Clip(roundedRect);
   }
 }
 
 static void
 DrawBackgroundColor(nsCSSRendering::ImageLayerClipState& aClipState,
                     gfxContext *aCtx, nscoord aAppUnitsPerPixel)
 {
-  if (aClipState.mDirtyRectGfx.IsEmpty()) {
+  if (aClipState.mDirtyRectInDevPx.IsEmpty()) {
     // Our caller won't draw anything under this condition, so no need
     // to set more up.
     return;
   }
 
   DrawTarget* drawTarget = aCtx->GetDrawTarget();
 
   // We don't support custom clips and rounded corners, arguably a bug, but
   // table painting seems to depend on it.
   if (!aClipState.mHasRoundedCorners || aClipState.mCustomClip) {
     aCtx->NewPath();
-    aCtx->Rectangle(aClipState.mDirtyRectGfx, true);
+    aCtx->Rectangle(aClipState.mDirtyRectInDevPx, true);
     aCtx->Fill();
     return;
   }
 
   Rect bgAreaGfx = NSRectToRect(aClipState.mBGClipArea, aAppUnitsPerPixel);
   bgAreaGfx.Round();
 
   if (bgAreaGfx.IsEmpty()) {
     // I think it's become possible to hit this since
     // https://hg.mozilla.org/mozilla-central/rev/50e934e4979b landed.
     NS_WARNING("converted background area should not be empty");
     // Make our caller not do anything.
-    aClipState.mDirtyRectGfx.SizeTo(gfxSize(0.0, 0.0));
+    aClipState.mDirtyRectInDevPx.SizeTo(gfxSize(0.0, 0.0));
     return;
   }
 
   aCtx->Save();
-  gfxRect dirty = ThebesRect(bgAreaGfx).Intersect(aClipState.mDirtyRectGfx);
+  gfxRect dirty = ThebesRect(bgAreaGfx).Intersect(aClipState.mDirtyRectInDevPx);
 
   aCtx->NewPath();
   aCtx->Rectangle(dirty, true);
   aCtx->Clip();
 
   if (aClipState.mHasAdditionalBGClipArea) {
     gfxRect bgAdditionalAreaGfx = nsLayoutUtils::RectToGfxRect(
       aClipState.mAdditionalBGClipArea, aAppUnitsPerPixel);
@@ -3297,18 +3318,16 @@ nsCSSRendering::PaintStyleImageLayerWith
   NS_PRECONDITION(aParams.frame,
                   "Frame is expected to be provided to PaintBackground");
 
   // If we're drawing all layers, aCompositonOp is ignored, so make sure that
   // it was left at its default value.
   MOZ_ASSERT_IF(aParams.layer == -1,
                 aParams.compositionOp == CompositionOp::OP_OVER);
 
-  DrawResult result = DrawResult::SUCCESS;
-
   // Check to see if we have an appearance defined.  If so, we let the theme
   // renderer draw the background and bail out.
   // XXXzw this ignores aParams.bgClipRect.
   const nsStyleDisplay* displayData = aParams.frame->StyleDisplay();
   if (displayData->mAppearance) {
     nsITheme *theme = aParams.presCtx.GetTheme();
     if (theme && theme->ThemeSupportsWidget(&aParams.presCtx,
                                             aParams.frame,
@@ -3356,57 +3375,45 @@ nsCSSRendering::PaintStyleImageLayerWith
   }
 
   // At this point, drawBackgroundImage and drawBackgroundColor are
   // true if and only if we are actually supposed to paint an image or
   // color into aDirtyRect, respectively.
   if (!drawBackgroundImage && !drawBackgroundColor)
     return DrawResult::SUCCESS;
 
-  // Compute the outermost boundary of the area that might be painted.
-  // Same coordinate space as aParams.borderArea & aParams.bgClipRect.
-  Sides skipSides = aParams.frame->GetSkipSides();
-  nsRect paintBorderArea =
-    ::BoxDecorationRectForBackground(aParams.frame, aParams.borderArea,
-                                     skipSides, &aBorder);
-  nsRect clipBorderArea =
-    ::BoxDecorationRectForBorder(aParams.frame, aParams.borderArea,
-                                 skipSides, &aBorder);
-
   // The 'bgClipArea' (used only by the image tiling logic, far below)
   // is the caller-provided aParams.bgClipRect if any, or else the area
   // determined by the value of 'background-clip' in
   // SetupCurrentBackgroundClip.  (Arguably it should be the
   // intersection, but that breaks the table painter -- in particular,
   // taking the intersection breaks reftests/bugs/403249-1[ab].)
   gfxContext* ctx = aParams.renderingCtx.ThebesContext();
   nscoord appUnitsPerPixel = aParams.presCtx.AppUnitsPerDevPixel();
   ImageLayerClipState clipState;
   if (aParams.bgClipRect) {
     clipState.mBGClipArea = *aParams.bgClipRect;
     clipState.mCustomClip = true;
     clipState.mHasRoundedCorners = false;
     SetupDirtyRects(clipState.mBGClipArea, aParams.dirtyRect, appUnitsPerPixel,
-                    &clipState.mDirtyRect, &clipState.mDirtyRectGfx);
+                    &clipState.mDirtyRectInAppUnits,
+                    &clipState.mDirtyRectInDevPx);
   } else {
     GetImageLayerClip(layers.BottomLayer(),
                       aParams.frame, aBorder, aParams.borderArea,
                       aParams.dirtyRect,
                       (aParams.paintFlags & PAINTBG_WILL_PAINT_BORDER),
                       appUnitsPerPixel,
                       &clipState);
   }
 
   // If we might be using a background color, go ahead and set it now.
-  if (drawBackgroundColor && !isCanvasFrame)
+  if (drawBackgroundColor && !isCanvasFrame) {
     ctx->SetColor(Color::FromABGR(bgColor));
-
-  // NOTE: no Save() yet, we do that later by calling autoSR.EnsureSaved(ctx)
-  // in the cases we need it.
-  gfxContextAutoSaveRestore autoSR;
+  }
 
   // If there is no background image, draw a color.  (If there is
   // neither a background image nor a color, we wouldn't have gotten
   // this far.)
   if (!drawBackgroundImage) {
     if (!isCanvasFrame) {
       DrawBackgroundColor(clipState, ctx, appUnitsPerPixel);
     }
@@ -3414,103 +3421,127 @@ nsCSSRendering::PaintStyleImageLayerWith
   }
 
   if (layers.mImageCount < 1) {
     // Return if there are no background layers, all work from this point
     // onwards happens iteratively on these.
     return DrawResult::SUCCESS;
   }
 
-  // Validate the layer range before we start iterating.
-  int32_t startLayer = aParams.layer;
-  int32_t nLayers = 1;
-  if (startLayer < 0) {
-    startLayer = (int32_t)layers.mImageCount - 1;
-    nLayers = layers.mImageCount;
-  }
+  MOZ_ASSERT((aParams.layer < 0) ||
+             (layers.mImageCount > uint32_t(aParams.layer)));
+  bool drawAllLayers = (aParams.layer < 0);
 
   // Ensure we get invalidated for loads of the image.  We need to do
   // this here because this might be the only code that knows about the
   // association of the style data with the frame.
   if (aBackgroundSC != aParams.frame->StyleContext()) {
-    NS_FOR_VISIBLE_IMAGE_LAYERS_BACK_TO_FRONT_WITH_RANGE(i, layers, startLayer, nLayers) {
+    uint32_t startLayer = drawAllLayers ? layers.mImageCount - 1
+                                        : aParams.layer;
+    uint32_t count = drawAllLayers ? layers.mImageCount : 1;
+    NS_FOR_VISIBLE_IMAGE_LAYERS_BACK_TO_FRONT_WITH_RANGE(i, layers, startLayer,
+                                                         count) {
       aParams.frame->AssociateImage(layers.mLayers[i].mImage,
                                     &aParams.presCtx);
     }
   }
 
   // The background color is rendered over the entire dirty area,
   // even if the image isn't.
   if (drawBackgroundColor && !isCanvasFrame) {
     DrawBackgroundColor(clipState, ctx, appUnitsPerPixel);
   }
 
-  if (drawBackgroundImage) {
-    bool clipSet = false;
-    StyleGeometryBox currentBackgroundClip = StyleGeometryBox::Border;
-    NS_FOR_VISIBLE_IMAGE_LAYERS_BACK_TO_FRONT_WITH_RANGE(i, layers, layers.mImageCount - 1,
-                                                         nLayers + (layers.mImageCount -
-                                                         startLayer - 1)) {
-      const nsStyleImageLayers::Layer& layer = layers.mLayers[i];
-      if (!aParams.bgClipRect) {
-        if (currentBackgroundClip != layer.mClip || !clipSet) {
-          currentBackgroundClip = layer.mClip;
-          // If clipSet is false that means this is the bottom layer and we
-          // already called GetImageLayerClip above and it stored its results
-          // in clipState.
-          if (clipSet) {
-            autoSR.Restore(); // reset the previous one
-            GetImageLayerClip(layer, aParams.frame,
-                              aBorder, aParams.borderArea, aParams.dirtyRect,
-                              (aParams.paintFlags & PAINTBG_WILL_PAINT_BORDER),
-                              appUnitsPerPixel, &clipState);
-          }
-          SetupImageLayerClip(clipState, ctx, appUnitsPerPixel, &autoSR);
-          clipSet = true;
-          if (!clipBorderArea.IsEqualEdges(aParams.borderArea)) {
-            // We're drawing the background for the joined continuation boxes
-            // so we need to clip that to the slice that we want for this
-            // frame.
-            gfxRect clip =
-              nsLayoutUtils::RectToGfxRect(aParams.borderArea, appUnitsPerPixel);
-            autoSR.EnsureSaved(ctx);
-            ctx->NewPath();
-            ctx->SnappedRectangle(clip);
-            ctx->Clip();
-          }
+  if (!drawBackgroundImage) {
+    return DrawResult::SUCCESS; // No need to draw layer image, we can early
+                                // return now.
+  }
+
+  // Compute the outermost boundary of the area that might be painted.
+  // Same coordinate space as aParams.borderArea & aParams.bgClipRect.
+  Sides skipSides = aParams.frame->GetSkipSides();
+  nsRect paintBorderArea =
+    ::BoxDecorationRectForBackground(aParams.frame, aParams.borderArea,
+                                     skipSides, &aBorder);
+  nsRect clipBorderArea =
+    ::BoxDecorationRectForBorder(aParams.frame, aParams.borderArea,
+                                 skipSides, &aBorder);
+
+  DrawResult result = DrawResult::SUCCESS;
+  StyleGeometryBox currentBackgroundClip = StyleGeometryBox::Border;
+  uint32_t count = drawAllLayers
+    ? layers.mImageCount                  // iterate all image layers.
+    : layers.mImageCount - aParams.layer; // iterate from the bottom layer to
+                                          // the 'aParams.layer-th' layer.
+  NS_FOR_VISIBLE_IMAGE_LAYERS_BACK_TO_FRONT_WITH_RANGE(i, layers,
+                                                       layers.mImageCount - 1,
+                                                       count) {
+    // NOTE: no Save() yet, we do that later by calling autoSR.EnsureSaved(ctx)
+    // in the cases we need it.
+    gfxContextAutoSaveRestore autoSR;
+    const nsStyleImageLayers::Layer& layer = layers.mLayers[i];
+
+    if (!aParams.bgClipRect) {
+      bool isBottomLayer = (i == layers.mImageCount - 1);
+      if (currentBackgroundClip != layer.mClip || isBottomLayer) {
+        currentBackgroundClip = layer.mClip;
+        // For  the bottom layer, we already called GetImageLayerClip above
+        // and it stored its results in clipState.
+        if (!isBottomLayer) {
+          GetImageLayerClip(layer, aParams.frame,
+                            aBorder, aParams.borderArea, aParams.dirtyRect,
+                            (aParams.paintFlags & PAINTBG_WILL_PAINT_BORDER),
+                            appUnitsPerPixel, &clipState);
+        }
+        SetupImageLayerClip(clipState, ctx, appUnitsPerPixel, &autoSR);
+        if (!clipBorderArea.IsEqualEdges(aParams.borderArea)) {
+          // We're drawing the background for the joined continuation boxes
+          // so we need to clip that to the slice that we want for this
+          // frame.
+          gfxRect clip =
+            nsLayoutUtils::RectToGfxRect(aParams.borderArea, appUnitsPerPixel);
+          autoSR.EnsureSaved(ctx);
+          ctx->NewPath();
+          ctx->SnappedRectangle(clip);
+          ctx->Clip();
         }
       }
-      if ((aParams.layer < 0 || i == (uint32_t)startLayer) &&
-          !clipState.mDirtyRectGfx.IsEmpty()) {
-        CompositionOp co = DetermineCompositionOp(aParams, layers, i);
-        nsBackgroundLayerState state =
-          PrepareImageLayer(&aParams.presCtx, aParams.frame,
-                            aParams.paintFlags, paintBorderArea, clipState.mBGClipArea,
-                            layer, nullptr);
-        result &= state.mImageRenderer.PrepareResult();
-        if (!state.mFillArea.IsEmpty()) {
-          if (co != CompositionOp::OP_OVER) {
-            NS_ASSERTION(ctx->CurrentOp() == CompositionOp::OP_OVER,
-                         "It is assumed the initial op is OP_OVER, when it is "
-                         "restored later");
-            ctx->SetOp(co);
-          }
-
-          result &=
-            state.mImageRenderer.DrawLayer(&aParams.presCtx,
-                                           aParams.renderingCtx,
-                                           state.mDestArea, state.mFillArea,
-                                           state.mAnchor + paintBorderArea.TopLeft(),
-                                           clipState.mDirtyRect,
-                                           state.mRepeatSize, aParams.opacity);
-
-          if (co != CompositionOp::OP_OVER) {
-            ctx->SetOp(CompositionOp::OP_OVER);
-          }
-        }
+    }
+
+    // Skip the following layer painting code if we found the dirty region is
+    // empty or the current layer is not selected for drawing.
+    if (clipState.mDirtyRectInDevPx.IsEmpty() ||
+        (aParams.layer >= 0 && i != (uint32_t)aParams.layer)) {
+      continue;
+    }
+
+    nsBackgroundLayerState state =
+      PrepareImageLayer(&aParams.presCtx, aParams.frame,
+                        aParams.paintFlags, paintBorderArea,
+                        clipState.mBGClipArea, layer, nullptr);
+    result &= state.mImageRenderer.PrepareResult();
+    if (!state.mFillArea.IsEmpty()) {
+      CompositionOp co = DetermineCompositionOp(aParams, layers, i);
+      if (co != CompositionOp::OP_OVER) {
+        NS_ASSERTION(ctx->CurrentOp() == CompositionOp::OP_OVER,
+                     "It is assumed the initial op is OP_OVER, when it is "
+                     "restored later");
+        ctx->SetOp(co);
+      }
+
+      result &=
+        state.mImageRenderer.DrawLayer(&aParams.presCtx,
+                                       aParams.renderingCtx,
+                                       state.mDestArea, state.mFillArea,
+                                       state.mAnchor + paintBorderArea.TopLeft(),
+                                       clipState.mDirtyRectInAppUnits,
+                                       state.mRepeatSize, aParams.opacity);
+
+      if (co != CompositionOp::OP_OVER) {
+        ctx->SetOp(CompositionOp::OP_OVER);
       }
     }
   }
 
   return result;
 }
 
 nsRect
--- a/layout/painting/nsCSSRendering.h
+++ b/layout/painting/nsCSSRendering.h
@@ -593,18 +593,18 @@ struct nsCSSRendering {
                     const nsRect& aBorderArea,
                     const nsRect& aBGClipRect,
                     const nsStyleImageLayers::Layer& aLayer,
                     bool* aOutIsTransformedFixed = nullptr);
 
   struct ImageLayerClipState {
     nsRect mBGClipArea;            // Affected by mClippedRadii
     nsRect mAdditionalBGClipArea;  // Not affected by mClippedRadii
-    nsRect mDirtyRect;
-    gfxRect mDirtyRectGfx;
+    nsRect mDirtyRectInAppUnits;
+    gfxRect mDirtyRectInDevPx;
 
     nscoord mRadii[8];
     RectCornerRadii mClippedRadii;
     bool mHasRoundedCorners;
     bool mHasAdditionalBGClipArea;
 
     // Whether we are being asked to draw with a caller provided background
     // clipping area. If this is true we also disable rounded corners.
@@ -612,16 +612,18 @@ struct nsCSSRendering {
 
     ImageLayerClipState()
      : mHasRoundedCorners(false),
        mHasAdditionalBGClipArea(false),
        mCustomClip(false)
     {
       memset(mRadii, 0, sizeof(nscoord) * 8);
     }
+
+    bool IsValid() const;
   };
 
   static void
   GetImageLayerClip(const nsStyleImageLayers::Layer& aLayer,
                     nsIFrame* aForFrame, const nsStyleBorder& aBorder,
                     const nsRect& aBorderArea, const nsRect& aCallerDirtyRect,
                     bool aWillPaintBorder, nscoord aAppUnitsPerPixel,
                     /* out */ ImageLayerClipState* aClipState);
--- a/layout/painting/nsDisplayList.cpp
+++ b/layout/painting/nsDisplayList.cpp
@@ -7692,17 +7692,17 @@ ComputeMaskGeometry(PaintFramesParams& a
       nsCSSRendering::GetImageLayerClip(svgReset->mMask.mLayers[i],
                                        frame,
                                        *frame->StyleBorder(),
                                        userSpaceBorderArea,
                                        userSpaceDirtyRect,
                                        false, /* aWillPaintBorder */
                                        appUnitsPerDevPixel,
                                        &clipState);
-      currentMaskSurfaceRect = clipState.mDirtyRectGfx;
+      currentMaskSurfaceRect = clipState.mDirtyRectInDevPx;
     }
 
     maskInUserSpace = maskInUserSpace.Union(currentMaskSurfaceRect);
   }
 
   ctx.Save();
 
   if (!maskInUserSpace.IsEmpty()) {
--- a/layout/reftests/css-grid/reftest-stylo.list
+++ b/layout/reftests/css-grid/reftest-stylo.list
@@ -58,17 +58,17 @@ fails == grid-auto-min-sizing-intrinsic-
 fails == grid-auto-min-sizing-intrinsic-003.html grid-auto-min-sizing-intrinsic-003.html
 fails == grid-auto-min-sizing-intrinsic-004.html grid-auto-min-sizing-intrinsic-004.html
 fails == grid-auto-min-sizing-transferred-size-001.html grid-auto-min-sizing-transferred-size-001.html
 fails == grid-auto-min-sizing-transferred-size-002.html grid-auto-min-sizing-transferred-size-002.html
 fails == grid-auto-min-sizing-transferred-size-003.html grid-auto-min-sizing-transferred-size-003.html
 fails == grid-auto-min-sizing-transferred-size-004.html grid-auto-min-sizing-transferred-size-004.html
 fails == grid-auto-min-sizing-min-content-min-size-001.html grid-auto-min-sizing-min-content-min-size-001.html
 fails == grid-auto-min-sizing-min-content-min-size-002.html grid-auto-min-sizing-min-content-min-size-002.html
-fails == grid-auto-min-sizing-min-content-min-size-003.html grid-auto-min-sizing-min-content-min-size-003.html
+# == grid-auto-min-sizing-min-content-min-size-003.html grid-auto-min-sizing-min-content-min-size-003.html
 fails == grid-auto-min-sizing-min-content-min-size-004.html grid-auto-min-sizing-min-content-min-size-004.html
 fails == grid-min-content-min-sizing-transferred-size-001.html grid-min-content-min-sizing-transferred-size-001.html
 fails == grid-min-content-min-sizing-transferred-size-002.html grid-min-content-min-sizing-transferred-size-002.html
 fails == grid-min-content-min-sizing-transferred-size-003.html grid-min-content-min-sizing-transferred-size-003.html
 fails == grid-min-content-min-sizing-transferred-size-004.html grid-min-content-min-sizing-transferred-size-004.html
 fails == grid-auto-min-sizing-percent-001.html grid-auto-min-sizing-percent-001.html
 fails == grid-track-intrinsic-sizing-001.html grid-track-intrinsic-sizing-001.html
 fails == grid-track-intrinsic-sizing-002.html grid-track-intrinsic-sizing-002.html
@@ -168,17 +168,17 @@ fails == grid-repeat-auto-fill-fit-001.h
 fails == grid-repeat-auto-fill-fit-002.html grid-repeat-auto-fill-fit-002.html
 fails == grid-repeat-auto-fill-fit-003.html grid-repeat-auto-fill-fit-003.html
 fails == grid-repeat-auto-fill-fit-004.html grid-repeat-auto-fill-fit-004.html
 fails == grid-repeat-auto-fill-fit-005.html grid-repeat-auto-fill-fit-005.html
 fails == grid-repeat-auto-fill-fit-006.html grid-repeat-auto-fill-fit-006.html
 fails == grid-repeat-auto-fill-fit-007.html grid-repeat-auto-fill-fit-007.html
 fails == grid-repeat-auto-fill-fit-008.html grid-repeat-auto-fill-fit-008.html
 fails == grid-repeat-auto-fill-fit-009.html grid-repeat-auto-fill-fit-009.html
-fails == grid-repeat-auto-fill-fit-010.html grid-repeat-auto-fill-fit-010.html
+# == grid-repeat-auto-fill-fit-010.html grid-repeat-auto-fill-fit-010.html
 fails == grid-repeat-auto-fill-fit-011.html grid-repeat-auto-fill-fit-011.html
 fails == grid-item-blockifying-001.html grid-item-blockifying-001.html
 fails == grid-fragmentation-001.html grid-fragmentation-001.html
 fails == grid-fragmentation-002.html grid-fragmentation-002.html
 fails == grid-fragmentation-003.html grid-fragmentation-003.html
 fails == grid-fragmentation-004.html grid-fragmentation-004.html
 fails == grid-fragmentation-005.html grid-fragmentation-005.html
 fails == grid-fragmentation-006.html grid-fragmentation-006.html
--- a/layout/style/crashtests/crashtests.list
+++ b/layout/style/crashtests/crashtests.list
@@ -18,28 +18,28 @@ asserts-if(stylo,1) load 397022-1.html #
 load 399289-1.svg
 load 404470-1.html
 load 411603-1.html
 load 412588-1.html
 load 413274-1.xhtml
 load 416461-1.xul
 load 418007-1.xhtml
 load 431705-1.xul
-skip-if(stylo) load 432561-1.html # bug 1323705
+load 432561-1.html # bug 1323705
 load 437170-1.html
 load 437532-1.html
-skip-if(stylo) load 439184-1.html # bug 1323939
+load 439184-1.html # bug 1323939
 load 444237-1.html
 load 444848-1.html 
 load 447776-1.html
 load 447783-1.html
 load 448161-1.html
 load 448161-2.html
 load 452150-1.xhtml
-skip-if(stylo) load 456196.html # bug 132652
+load 456196.html # bug 132652
 load 460209-1.html
 load 460217-1.html
 load 460323-1.html
 load 466845-1.html
 load 469432-1.xhtml
 load 472195-1.html
 load 472237-1.html # will fail, test for leak (474704)
 HTTP(..) load 472237-1.html
@@ -76,17 +76,17 @@ load 621596-1.html
 skip-if(stylo) load 622314-1.xhtml # bug 1337695
 load 637242.xhtml
 load 645142.html
 fails-if(stylo) == 645951-1.html 645951-1-ref.html
 load 652976-1.svg
 load 665209-1.html
 load 671799-1.html
 load 671799-2.html
-skip-if(stylo) load 690990-1.html # bug 1323708
+load 690990-1.html # bug 1323708
 load 696188-1.html
 load 696869-1.html
 asserts-if(stylo,2) load 700116.html # bug 1324635
 load 729126-1.html
 load 729126-2.html
 load 786108-1.html
 load 786108-2.html
 load 788836.html
@@ -110,17 +110,17 @@ load 930270-1.html
 load 930270-2.html
 load 945048-1.html
 load 972199-1.html
 load 989965-1.html
 load 992333-1.html
 skip-if(stylo) pref(dom.webcomponents.enabled,true) load 1017798-1.html # bug 1323689
 load 1028514-1.html
 load 1066089-1.html
-skip-if(stylo) load 1074651-1.html # bug 1323652
+load 1074651-1.html # bug 1323652
 load 1135534.html
 pref(dom.webcomponents.enabled,true) load 1089463-1.html
 pref(layout.css.expensive-style-struct-assertions.enabled,true) load 1136010-1.html
 asserts-if(stylo,0-2) pref(layout.css.expensive-style-struct-assertions.enabled,true) load 1146101-1.html # bug 1324677
 load 1153693-1.html
 load 1161320-1.html
 pref(dom.animations-api.core.enabled,true) load 1161320-2.html
 load 1161366-1.html
@@ -147,18 +147,18 @@ pref(layout.css.prefixes.webkit,false) l
 load border-image-visited-link.html
 load font-face-truncated-src.html 
 load large_border_image_width.html
 load long-url-list-stack-overflow.html
 pref(layout.css.background-clip-text.enabled,true) load 1264949.html
 pref(layout.css.background-clip-text.enabled,true) load 1270795.html
 pref(layout.css.background-clip-text.enabled,true) load 1275026.html
 load 1278463-1.html
-skip-if(stylo) pref(dom.animations-api.core.enabled,true) load 1277908-1.html # bug 1323652
-skip-if(stylo) load 1277908-2.html # bug 1323652
+pref(dom.animations-api.core.enabled,true) load 1277908-1.html # bug 1323652
+load 1277908-2.html # bug 1323652
 load 1282076-1.html
 pref(dom.animations-api.core.enabled,true) load 1282076-2.html
 pref(dom.animations-api.core.enabled,true) load 1290994-1.html
 pref(dom.animations-api.core.enabled,true) load 1290994-2.html
 pref(dom.animations-api.core.enabled,true) load 1290994-3.html
 load 1290994-4.html
 load 1314531.html
 load 1315889-1.html
--- a/layout/style/test/stylo-failures.md
+++ b/layout/style/test/stylo-failures.md
@@ -113,21 +113,18 @@ Any line which doesn't follow the format
 * url value in style attribute bug 1310886
   * test_computed_style.html `url` [18]
   * test_parse_url.html [66]
   * test_value_storage.html `url` [850]
   * test_shorthand_property_getters.html `background shorthand` [1]
   * ... `url` [3]
 * test_computed_style.html `mask`: setting mask shorthand resets subproperties to non-initial value bug 1331516 [11]
 * auto value for min-{width,height} servo/servo#15045
-  * test_computed_style_min_size_auto.html [14]
-  * test_compute_data_with_start_struct.html `height` [4]
-  * ... `min-width` [2]
+  * test_compute_data_with_start_struct.html `height` [1]
   * ... ` width` [1]
-  * test_value_storage.html `'auto' on 'min-` [4]
 * test_compute_data_with_start_struct.html `timing-function`: incorrectly computing keywords to bezier function servo/servo#15086 [2]
 * test_condition_text.html: @-moz-document, CSSOM support of @media, @support [2]
 * @counter-style support:
   * test_counter_descriptor_storage.html [1]
   * test_counter_style.html [1]
   * test_rule_insertion.html `@counter-style` [4]
   * test_value_storage.html `symbols(` [30]
   * ... `list-style-type` [60]
--- a/layout/svg/crashtests/crashtests.list
+++ b/layout/svg/crashtests/crashtests.list
@@ -71,17 +71,17 @@ load 409565-1.xhtml
 load 409573-1.svg
 load 420697-1.svg
 load 420697-2.svg
 load 429774-1.svg
 load 441368-1.svg
 load 453754-1.svg
 load 455314-1.xhtml
 load 458453.html
-skip-if(stylo) load 459666-1.html # bug 1323652
+load 459666-1.html # bug 1323652
 load 459883.xhtml
 load 461289-1.svg
 load 464374-1.svg
 load 466585-1.svg
 load 467323-1.svg
 load 467498-1.svg
 load 470124-1.svg
 load 472782-1.svg
@@ -144,17 +144,17 @@ load 768087-1.html
 load 768351.svg
 load 778492-1.svg
 load 779971-1.svg
 load 780764-1.svg
 load 780963-1.html
 load 782141-1.svg
 load 784061-1.svg
 load 788831-1.svg
-skip-if(stylo) load 789390-1.html # bug 1323652
+load 789390-1.html # bug 1323652
 load 790072.svg
 load 791826-1.svg
 load 808318-1.svg
 load 803562-1.svg
 load 813420-1.svg
 load 841163-1.svg
 load 841812-1.svg
 load 842009-1.svg
--- a/layout/tables/crashtests/crashtests.list
+++ b/layout/tables/crashtests/crashtests.list
@@ -89,17 +89,17 @@ load 388700-1.html
 load 391898-1.html
 load 391901-1.html
 load 392132-1.xhtml
 load 397448-1.html
 load 398157-1.xhtml
 load 399209-1.xhtml
 load 403249-1.html
 load 403579-1.html
-skip-if(stylo) load 404301-1.xhtml # bug 1323652
+load 404301-1.xhtml # bug 1323652
 load 408753-1.xhtml
 load 410426-1.html
 load 410428-1.xhtml
 load 411582.xhtml
 load 413091.xhtml
 load 413180-1.html
 load 416845-1.xhtml
 load 416845-2.xhtml
--- a/layout/xul/crashtests/crashtests.list
+++ b/layout/xul/crashtests/crashtests.list
@@ -48,17 +48,17 @@ load 384105-1.html
 load 384373.html
 load 384491-1.xhtml
 load 384871-1.html
 load 386642.xul
 load 387033-1.xhtml
 load 387080-1.xul
 load 391974-1.html
 load 394120-1.xhtml
-skip-if(stylo) load 397293.xhtml # bug 1323652
+load 397293.xhtml # bug 1323652
 load 397304-1.html
 asserts-if(stylo,0-2) load 398326-1.xhtml # bug 1324660
 load 399013.xul
 load 400779-1.xhtml
 load 402912-1.xhtml
 load 404192.xhtml
 load 407152.xul
 load 408904-1.xul
--- a/mobile/android/base/android-services.mozbuild
+++ b/mobile/android/base/android-services.mozbuild
@@ -861,16 +861,17 @@ sync_java_files = [TOPSRCDIR + '/mobile/
     'fxa/SyncStatusListener.java',
     'push/autopush/AutopushClient.java',
     'push/autopush/AutopushClientException.java',
     'push/RegisterUserAgentResponse.java',
     'push/SubscribeChannelResponse.java',
     'sync/AlreadySyncingException.java',
     'sync/BackoffHandler.java',
     'sync/BadRequiredFieldJSONException.java',
+    'sync/CollectionConcurrentModificationException.java',
     'sync/CollectionKeys.java',
     'sync/CommandProcessor.java',
     'sync/CommandRunner.java',
     'sync/CredentialException.java',
     'sync/crypto/CryptoException.java',
     'sync/crypto/CryptoInfo.java',
     'sync/crypto/HKDF.java',
     'sync/crypto/HMACVerificationException.java',
@@ -896,20 +897,24 @@ sync_java_files = [TOPSRCDIR + '/mobile/
     'sync/InfoConfiguration.java',
     'sync/InfoCounts.java',
     'sync/JSONRecordFetcher.java',
     'sync/KeyBundleProvider.java',
     'sync/MetaGlobal.java',
     'sync/MetaGlobalException.java',
     'sync/MetaGlobalMissingEnginesException.java',
     'sync/MetaGlobalNotSetException.java',
+    'sync/middleware/BufferingMiddlewareRepository.java',
+    'sync/middleware/BufferingMiddlewareRepositorySession.java',
     'sync/middleware/Crypto5MiddlewareRepository.java',
     'sync/middleware/Crypto5MiddlewareRepositorySession.java',
     'sync/middleware/MiddlewareRepository.java',
     'sync/middleware/MiddlewareRepositorySession.java',
+    'sync/middleware/storage/BufferStorage.java',
+    'sync/middleware/storage/MemoryBufferStorage.java',
     'sync/net/AbstractBearerTokenAuthHeaderProvider.java',
     'sync/net/AuthHeaderProvider.java',
     'sync/net/BaseResource.java',
     'sync/net/BaseResourceDelegate.java',
     'sync/net/BasicAuthHeaderProvider.java',
     'sync/net/BearerAuthHeaderProvider.java',
     'sync/net/BrowserIDAuthHeaderProvider.java',
     'sync/net/ConnectionMonitorThread.java',
@@ -935,16 +940,17 @@ sync_java_files = [TOPSRCDIR + '/mobile/
     'sync/net/WBORequestDelegate.java',
     'sync/NoCollectionKeysSetException.java',
     'sync/NodeAuthenticationException.java',
     'sync/NonArrayJSONException.java',
     'sync/NonObjectJSONException.java',
     'sync/NullClusterURLException.java',
     'sync/PersistedMetaGlobal.java',
     'sync/PrefsBackoffHandler.java',
+    'sync/ReflowIsNecessaryException.java',
     'sync/repositories/android/AndroidBrowserBookmarksDataAccessor.java',
     'sync/repositories/android/AndroidBrowserBookmarksRepository.java',
     'sync/repositories/android/AndroidBrowserBookmarksRepositorySession.java',
     'sync/repositories/android/AndroidBrowserHistoryDataAccessor.java',
     'sync/repositories/android/AndroidBrowserHistoryRepository.java',
     'sync/repositories/android/AndroidBrowserHistoryRepositorySession.java',
     'sync/repositories/android/AndroidBrowserRepository.java',
     'sync/repositories/android/AndroidBrowserRepositoryDataAccessor.java',
@@ -957,17 +963,17 @@ sync_java_files = [TOPSRCDIR + '/mobile/
     'sync/repositories/android/ClientsDatabaseAccessor.java',
     'sync/repositories/android/FennecTabsRepository.java',
     'sync/repositories/android/FormHistoryRepositorySession.java',
     'sync/repositories/android/PasswordsRepositorySession.java',
     'sync/repositories/android/RepoUtils.java',
     'sync/repositories/android/VisitsHelper.java',
     'sync/repositories/BookmarkNeedsReparentingException.java',
     'sync/repositories/BookmarksRepository.java',
-    'sync/repositories/ConstrainedServer11Repository.java',
+    'sync/repositories/ConfigurableServer15Repository.java',
     'sync/repositories/delegates/DeferrableRepositorySessionCreationDelegate.java',
     'sync/repositories/delegates/DeferredRepositorySessionBeginDelegate.java',
     'sync/repositories/delegates/DeferredRepositorySessionFetchRecordsDelegate.java',
     'sync/repositories/delegates/DeferredRepositorySessionFinishDelegate.java',
     'sync/repositories/delegates/DeferredRepositorySessionStoreDelegate.java',
     'sync/repositories/delegates/RepositorySessionBeginDelegate.java',
     'sync/repositories/delegates/RepositorySessionCleanDelegate.java',
     'sync/repositories/delegates/RepositorySessionCreationDelegate.java',
@@ -986,88 +992,91 @@ sync_java_files = [TOPSRCDIR + '/mobile/
     'sync/repositories/domain/PasswordRecord.java',
     'sync/repositories/domain/PasswordRecordFactory.java',
     'sync/repositories/domain/Record.java',
     'sync/repositories/domain/RecordParseException.java',
     'sync/repositories/domain/TabsRecord.java',
     'sync/repositories/domain/TabsRecordFactory.java',
     'sync/repositories/domain/VersionConstants.java',
     'sync/repositories/downloaders/BatchingDownloader.java',
+    'sync/repositories/downloaders/BatchingDownloaderController.java',
     'sync/repositories/downloaders/BatchingDownloaderDelegate.java',
     'sync/repositories/FetchFailedException.java',
     'sync/repositories/HashSetStoreTracker.java',
     'sync/repositories/HistoryRepository.java',
     'sync/repositories/IdentityRecordFactory.java',
     'sync/repositories/InactiveSessionException.java',
     'sync/repositories/InvalidBookmarkTypeException.java',
     'sync/repositories/InvalidRequestException.java',
     'sync/repositories/InvalidSessionTransitionException.java',
     'sync/repositories/MultipleRecordsForGuidException.java',
     'sync/repositories/NoContentProviderException.java',
     'sync/repositories/NoGuidForIdException.java',
+    'sync/repositories/NonPersistentRepositoryStateProvider.java',
     'sync/repositories/NoStoreDelegateException.java',
     'sync/repositories/NullCursorException.java',
     'sync/repositories/ParentNotFoundException.java',
+    'sync/repositories/PersistentRepositoryStateProvider.java',
     'sync/repositories/ProfileDatabaseException.java',
     'sync/repositories/RecordFactory.java',
     'sync/repositories/RecordFilter.java',
     'sync/repositories/Repository.java',
     'sync/repositories/RepositorySession.java',
     'sync/repositories/RepositorySessionBundle.java',
-    'sync/repositories/Server11Repository.java',
-    'sync/repositories/Server11RepositorySession.java',
+    'sync/repositories/RepositoryStateProvider.java',
+    'sync/repositories/Server15Repository.java',
+    'sync/repositories/Server15RepositorySession.java',
     'sync/repositories/StoreFailedException.java',
     'sync/repositories/StoreTracker.java',
     'sync/repositories/StoreTrackingRepositorySession.java',
     'sync/repositories/uploaders/BatchingUploader.java',
     'sync/repositories/uploaders/BatchMeta.java',
     'sync/repositories/uploaders/BufferSizeTracker.java',
     'sync/repositories/uploaders/MayUploadProvider.java',
     'sync/repositories/uploaders/Payload.java',
     'sync/repositories/uploaders/PayloadDispatcher.java',
     'sync/repositories/uploaders/PayloadUploadDelegate.java',
     'sync/repositories/uploaders/RecordUploadRunnable.java',
     'sync/repositories/uploaders/UploaderMeta.java',
-    'sync/Server11PreviousPostFailedException.java',
-    'sync/Server11RecordPostFailedException.java',
+    'sync/Server15PreviousPostFailedException.java',
+    'sync/Server15RecordPostFailedException.java',
     'sync/setup/activities/ActivityUtils.java',
     'sync/setup/activities/WebURLFinder.java',
     'sync/setup/Constants.java',
     'sync/setup/InvalidSyncKeyException.java',
     'sync/SharedPreferencesClientsDataDelegate.java',
     'sync/stage/AbstractNonRepositorySyncStage.java',
     'sync/stage/AbstractSessionManagingSyncStage.java',
     'sync/stage/AndroidBrowserBookmarksServerSyncStage.java',
     'sync/stage/AndroidBrowserHistoryServerSyncStage.java',
+    'sync/stage/AndroidBrowserRecentHistoryServerSyncStage.java',
     'sync/stage/CheckPreconditionsStage.java',
     'sync/stage/CompletedStage.java',
     'sync/stage/EnsureCrypto5KeysStage.java',
     'sync/stage/FennecTabsServerSyncStage.java',
     'sync/stage/FetchInfoCollectionsStage.java',
     'sync/stage/FetchInfoConfigurationStage.java',
     'sync/stage/FetchMetaGlobalStage.java',
     'sync/stage/FormHistoryServerSyncStage.java',
     'sync/stage/GlobalSyncStage.java',
     'sync/stage/NoSuchStageException.java',
     'sync/stage/PasswordsServerSyncStage.java',
-    'sync/stage/SafeConstrainedServer11Repository.java',
     'sync/stage/ServerSyncStage.java',
     'sync/stage/SyncClientsEngineStage.java',
     'sync/stage/UploadMetaGlobalStage.java',
-    'sync/Sync11Configuration.java',
     'sync/SyncConfiguration.java',
     'sync/SyncConfigurationException.java',
     'sync/SyncConstants.java',
+    'sync/SyncDeadlineReachedException.java',
     'sync/SyncException.java',
     'sync/synchronizer/ConcurrentRecordConsumer.java',
     'sync/synchronizer/RecordConsumer.java',
     'sync/synchronizer/RecordsChannel.java',
     'sync/synchronizer/RecordsChannelDelegate.java',
     'sync/synchronizer/RecordsConsumerDelegate.java',
-    'sync/synchronizer/SerialRecordConsumer.java',
     'sync/synchronizer/ServerLocalSynchronizer.java',
     'sync/synchronizer/ServerLocalSynchronizerSession.java',
     'sync/synchronizer/SessionNotBegunException.java',
     'sync/synchronizer/Synchronizer.java',
     'sync/synchronizer/SynchronizerDelegate.java',
     'sync/synchronizer/SynchronizerSession.java',
     'sync/synchronizer/SynchronizerSessionDelegate.java',
     'sync/synchronizer/UnbundleError.java',
--- a/mobile/android/base/java/org/mozilla/gecko/db/BrowserContract.java
+++ b/mobile/android/base/java/org/mozilla/gecko/db/BrowserContract.java
@@ -53,16 +53,21 @@ public class BrowserContract {
     public static final String PARAM_INSERT_IF_NEEDED = "insert_if_needed";
     public static final String PARAM_INCREMENT_VISITS = "increment_visits";
     public static final String PARAM_INCREMENT_REMOTE_AGGREGATES = "increment_remote_aggregates";
     public static final String PARAM_NON_POSITIONED_PINS = "non_positioned_pins";
     public static final String PARAM_EXPIRE_PRIORITY = "priority";
     public static final String PARAM_DATASET_ID = "dataset_id";
     public static final String PARAM_GROUP_BY = "group_by";
 
+    public static final String METHOD_INSERT_HISTORY_WITH_VISITS_FROM_SYNC = "insertHistoryWithVisitsSync";
+    public static final String METHOD_RESULT = "methodResult";
+    public static final String METHOD_PARAM_OBJECT = "object";
+    public static final String METHOD_PARAM_DATA = "data";
+
     static public enum ExpirePriority {
         NORMAL,
         AGGRESSIVE
     }
 
     /**
      * Produces a SQL expression used for sorting results of the "combined" view by frecency.
      * Combines remote and local frecency calculations, weighting local visits much heavier.
--- a/mobile/android/base/java/org/mozilla/gecko/db/BrowserProvider.java
+++ b/mobile/android/base/java/org/mozilla/gecko/db/BrowserProvider.java
@@ -27,16 +27,17 @@ import org.mozilla.gecko.db.BrowserContr
 import org.mozilla.gecko.db.BrowserContract.Tabs;
 import org.mozilla.gecko.db.BrowserContract.Thumbnails;
 import org.mozilla.gecko.db.BrowserContract.TopSites;
 import org.mozilla.gecko.db.BrowserContract.UrlAnnotations;
 import org.mozilla.gecko.db.BrowserContract.PageMetadata;
 import org.mozilla.gecko.db.DBUtils.UpdateOperation;
 import org.mozilla.gecko.icons.IconsHelper;
 import org.mozilla.gecko.sync.Utils;
+import org.mozilla.gecko.sync.repositories.android.BrowserContractHelpers;
 import org.mozilla.gecko.util.ThreadUtils;
 
 import android.content.BroadcastReceiver;
 import android.content.ContentProviderOperation;
 import android.content.ContentProviderResult;
 import android.content.ContentUris;
 import android.content.ContentValues;
 import android.content.Context;
@@ -44,20 +45,25 @@ import android.content.Intent;
 import android.content.IntentFilter;
 import android.content.OperationApplicationException;
 import android.content.UriMatcher;
 import android.database.Cursor;
 import android.database.DatabaseUtils;
 import android.database.MatrixCursor;
 import android.database.MergeCursor;
 import android.database.SQLException;
+import android.database.sqlite.SQLiteConstraintException;
 import android.database.sqlite.SQLiteCursor;
 import android.database.sqlite.SQLiteDatabase;
 import android.database.sqlite.SQLiteQueryBuilder;
+import android.database.sqlite.SQLiteStatement;
 import android.net.Uri;
+import android.os.Bundle;
+import android.support.annotation.NonNull;
+import android.support.annotation.Nullable;
 import android.support.v4.content.LocalBroadcastManager;
 import android.text.TextUtils;
 import android.util.Log;
 
 public class BrowserProvider extends SharedBrowserDatabaseProvider {
     public static final String ACTION_SHRINK_MEMORY = "org.mozilla.gecko.db.intent.action.SHRINK_MEMORY";
 
     private static final String LOGTAG = "GeckoBrowserProvider";
@@ -2202,16 +2208,240 @@ public class BrowserProvider extends Sha
                 + " WHERE " + Bookmarks.IS_DELETED + " = 0"
                 + " AND " + Bookmarks.URL + " IS NOT NULL)";
 
         return deleteFavicons(uri, faviconSelection, null) +
                deleteThumbnails(uri, thumbnailSelection, null) +
                getURLImageDataTable().deleteUnused(getWritableDatabase(uri));
     }
 
+    @Nullable
+    @Override
+    public Bundle call(@NonNull String method, String uriArg, Bundle extras) {
+        if (uriArg == null) {
+            throw new IllegalArgumentException("Missing required Uri argument.");
+        }
+        final Bundle result = new Bundle();
+        switch (method) {
+            case BrowserContract.METHOD_INSERT_HISTORY_WITH_VISITS_FROM_SYNC:
+                try {
+                    final Uri uri = Uri.parse(uriArg);
+                    final SQLiteDatabase db = getWritableDatabase(uri);
+                    bulkInsertHistoryWithVisits(db, extras);
+                    result.putSerializable(BrowserContract.METHOD_RESULT, null);
+
+                // If anything went wrong during insertion, we know that changes were rolled back.
+                // Inform our caller that we have failed.
+                } catch (Exception e) {
+                    Log.e(LOGTAG, "Unexpected error while bulk inserting history", e);
+                    result.putSerializable(BrowserContract.METHOD_RESULT, e);
+                }
+                break;
+            default:
+                throw new IllegalArgumentException("Unknown method call: " + method);
+        }
+
+        return result;
+    }
+
+    private void bulkInsertHistoryWithVisits(final SQLiteDatabase db, @NonNull Bundle dataBundle) {
+        // NB: dataBundle structure:
+        // Key METHOD_PARAM_DATA=[Bundle,...]
+        // Each Bundle has keys METHOD_PARAM_OBJECT=ContentValues{HistoryRecord}, VISITS=ContentValues[]{visits}
+        final Bundle[] recordBundles = (Bundle[]) dataBundle.getSerializable(BrowserContract.METHOD_PARAM_DATA);
+
+        if (recordBundles == null) {
+            throw new IllegalArgumentException("Received null recordBundle while bulk inserting history.");
+        }
+
+        if (recordBundles.length == 0) {
+            return;
+        }
+
+        final ContentValues[][] visitsValueSet = new ContentValues[recordBundles.length][];
+        final ContentValues[] historyValueSet = new ContentValues[recordBundles.length];
+        for (int i = 0; i < recordBundles.length; i++) {
+            historyValueSet[i] = recordBundles[i].getParcelable(BrowserContract.METHOD_PARAM_OBJECT);
+            visitsValueSet[i] = (ContentValues[]) recordBundles[i].getSerializable(History.VISITS);
+        }
+
+        // Wrap the whole operation in a transaction.
+        beginBatch(db);
+
+        final int historyInserted;
+        try {
+            // First, insert history records.
+            historyInserted = bulkInsertHistory(db, historyValueSet);
+            if (historyInserted != recordBundles.length) {
+                Log.w(LOGTAG, "Expected to insert " + recordBundles.length + " history records, " +
+                        "but actually inserted " + historyInserted);
+            }
+
+            // Second, insert visit records.
+            bulkInsertVisits(db, visitsValueSet);
+
+            // Finally, commit all of the insertions we just made.
+            markBatchSuccessful(db);
+
+        // We're done with our database operations.
+        } finally {
+            endBatch(db);
+        }
+
+        // Notify listeners that we've just inserted new history records.
+        if (historyInserted > 0) {
+            getContext().getContentResolver().notifyChange(
+                    BrowserContractHelpers.HISTORY_CONTENT_URI, null,
+                    // Do not sync these changes.
+                    false
+            );
+        }
+    }
+
+    private int bulkInsertHistory(final SQLiteDatabase db, ContentValues[] values) {
+        int inserted = 0;
+        final String fullInsertSqlStatement = "INSERT INTO " + History.TABLE_NAME + " (" +
+                History.GUID + "," +
+                History.TITLE + "," +
+                History.URL + "," +
+                History.DATE_LAST_VISITED + "," +
+                History.REMOTE_DATE_LAST_VISITED + "," +
+                History.VISITS + "," +
+                History.REMOTE_VISITS + ") VALUES (?, ?, ?, ?, ?, ?, ?)";
+        final String shortInsertSqlStatement = "INSERT INTO " + History.TABLE_NAME + " (" +
+                History.GUID + "," +
+                History.TITLE + "," +
+                History.URL + ") VALUES (?, ?, ?)";
+        final SQLiteStatement compiledFullStatement = db.compileStatement(fullInsertSqlStatement);
+        final SQLiteStatement compiledShortStatement = db.compileStatement(shortInsertSqlStatement);
+        SQLiteStatement statementToExec;
+
+        beginWrite(db);
+        try {
+            for (ContentValues cv : values) {
+                final String guid = cv.getAsString(History.GUID);
+                final String title = cv.getAsString(History.TITLE);
+                final String url = cv.getAsString(History.URL);
+                final Long dateLastVisited = cv.getAsLong(History.DATE_LAST_VISITED);
+                final Long remoteDateLastVisited = cv.getAsLong(History.REMOTE_DATE_LAST_VISITED);
+                final Integer visits = cv.getAsInteger(History.VISITS);
+
+                // If dateLastVisited is null, so will be remoteDateLastVisited and visits.
+                // We will use the short compiled statement in this case.
+                // See implementation in AndroidBrowserHistoryDataAccessor@getContentValues.
+                if (dateLastVisited == null) {
+                    statementToExec = compiledShortStatement;
+                } else {
+                    statementToExec = compiledFullStatement;
+                }
+
+                statementToExec.clearBindings();
+                statementToExec.bindString(1, guid);
+                // Title is allowed to be null.
+                if (title != null) {
+                    statementToExec.bindString(2, title);
+                } else {
+                    statementToExec.bindNull(2);
+                }
+                statementToExec.bindString(3, url);
+                if (dateLastVisited != null) {
+                    statementToExec.bindLong(4, dateLastVisited);
+                    statementToExec.bindLong(5, remoteDateLastVisited);
+
+                    // NB:
+                    // Both of these count values might be slightly off unless we recalculate them
+                    // from data in the visits table at some point.
+                    // See note about visit insertion failures below in the bulkInsertVisits method.
+
+                    // Visit count
+                    statementToExec.bindLong(6, visits);
+                    // Remote visit count.
+                    statementToExec.bindLong(7, visits);
+                }
+
+                try {
+                    if (statementToExec.executeInsert() != -1) {
+                        inserted += 1;
+                    }
+
+                // NB: Constraint violation might occur if we're trying to insert a duplicate GUID.
+                // This should not happen but it does in practice, possibly due to reconciliation bugs.
+                // For now we catch and log the error without failing the whole bulk insert.
+                } catch (SQLiteConstraintException e) {
+                    Log.w(LOGTAG, "Unexpected constraint violation while inserting history with GUID " + guid, e);
+                }
+            }
+            markWriteSuccessful(db);
+        } finally {
+            endWrite(db);
+        }
+
+        if (inserted != values.length) {
+            Log.w(LOGTAG, "Failed to insert some of the history. " +
+                    "Expected: " + values.length + ", actual: " + inserted);
+        }
+
+        return inserted;
+    }
+
+    private int bulkInsertVisits(SQLiteDatabase db, ContentValues[][] valueSets) {
+        final String insertSqlStatement = "INSERT INTO " + Visits.TABLE_NAME + " (" +
+                Visits.DATE_VISITED + "," +
+                Visits.VISIT_TYPE + "," +
+                Visits.HISTORY_GUID + "," +
+                Visits.IS_LOCAL + ") VALUES (?, ?, ?, ?)";
+        final SQLiteStatement compiledInsertStatement = db.compileStatement(insertSqlStatement);
+
+        int totalInserted = 0;
+        beginWrite(db);
+        try {
+            for (ContentValues[] valueSet : valueSets) {
+                int inserted = 0;
+                for (ContentValues values : valueSet) {
+                    final long date = values.getAsLong(Visits.DATE_VISITED);
+                    final long visitType = values.getAsLong(Visits.VISIT_TYPE);
+                    final String guid = values.getAsString(Visits.HISTORY_GUID);
+                    final Integer isLocal = values.getAsInteger(Visits.IS_LOCAL);
+
+                    // Bind parameters use a 1-based index.
+                    compiledInsertStatement.clearBindings();
+                    compiledInsertStatement.bindLong(1, date);
+                    compiledInsertStatement.bindLong(2, visitType);
+                    compiledInsertStatement.bindString(3, guid);
+                    compiledInsertStatement.bindLong(4, isLocal);
+
+                    try {
+                        if (compiledInsertStatement.executeInsert() != -1) {
+                            inserted++;
+                        }
+
+                    // NB:
+                    // Constraint exception will be thrown if we try to insert a visit violating
+                    // unique(guid, date) constraint. We don't expect to do that, but our incoming
+                    // data might not be clean - either due to duplicate entries in the sync data,
+                    // or, less likely, due to record reconciliation bugs at the RepositorySession
+                    // level.
+                    } catch (SQLiteConstraintException e) {
+                        Log.w(LOGTAG, "Unexpected constraint exception while inserting a visit", e);
+                    }
+                }
+                if (inserted != valueSet.length) {
+                    Log.w(LOGTAG, "Failed to insert some of the visits. " +
+                            "Expected: " + valueSet.length + ", actual: " + inserted);
+                }
+                totalInserted += inserted;
+            }
+            markWriteSuccessful(db);
+        } finally {
+            endWrite(db);
+        }
+
+        return totalInserted;
+    }
+
     @Override
     public ContentProviderResult[] applyBatch (ArrayList<ContentProviderOperation> operations)
         throws OperationApplicationException {
         final int numOperations = operations.size();
         final ContentProviderResult[] results = new ContentProviderResult[numOperations];
 
         if (numOperations < 1) {
             debug("applyBatch: no operations; returning immediately.");
--- a/mobile/android/chrome/content/browser.js
+++ b/mobile/android/chrome/content/browser.js
@@ -20,19 +20,16 @@ Cu.import("resource://gre/modules/XPCOMU
 if (AppConstants.ACCESSIBILITY) {
   XPCOMUtils.defineLazyModuleGetter(this, "AccessFu",
                                     "resource://gre/modules/accessibility/AccessFu.jsm");
 }
 
 XPCOMUtils.defineLazyModuleGetter(this, "Manifests",
                                   "resource://gre/modules/Manifest.jsm");
 
-XPCOMUtils.defineLazyModuleGetter(this, "SpatialNavigation",
-                                  "resource://gre/modules/SpatialNavigation.jsm");
-
 XPCOMUtils.defineLazyModuleGetter(this, "DownloadNotifications",
                                   "resource://gre/modules/DownloadNotifications.jsm");
 
 XPCOMUtils.defineLazyModuleGetter(this, "FileUtils",
                                   "resource://gre/modules/FileUtils.jsm");
 
 XPCOMUtils.defineLazyModuleGetter(this, "JNI",
                                   "resource://gre/modules/JNI.jsm");
@@ -540,17 +537,16 @@ var BrowserApp = {
       // Collect telemetry data.
       // We do this at startup because we want to move away from "gather-telemetry" (bug 1127907)
       InitLater(() => {
         Telemetry.addData("FENNEC_TRACKING_PROTECTION_STATE", parseInt(BrowserApp.getTrackingProtectionState()));
         Telemetry.addData("ZOOMED_VIEW_ENABLED", Services.prefs.getBoolPref("ui.zoomedview.enabled"));
       });
 
       InitLater(() => LightWeightThemeWebInstaller.init());
-      InitLater(() => SpatialNavigation.init(BrowserApp.deck, null), window, "SpatialNavigation");
       InitLater(() => CastingApps.init(), window, "CastingApps");
       InitLater(() => Services.search.init(), Services, "search");
       InitLater(() => DownloadNotifications.init(), window, "DownloadNotifications");
 
       // Bug 778855 - Perf regression if we do this here. To be addressed in bug 779008.
       InitLater(() => SafeBrowsing.init(), window, "SafeBrowsing");
 
       InitLater(() => Cc["@mozilla.org/login-manager;1"].getService(Ci.nsILoginManager));
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/fxa/sync/FxAccountSyncAdapter.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/fxa/sync/FxAccountSyncAdapter.java
@@ -48,37 +48,44 @@ import org.mozilla.gecko.sync.stage.Glob
 import org.mozilla.gecko.sync.telemetry.TelemetryContract;
 import org.mozilla.gecko.tokenserver.TokenServerClient;
 import org.mozilla.gecko.tokenserver.TokenServerClientDelegate;
 import org.mozilla.gecko.tokenserver.TokenServerException;
 import org.mozilla.gecko.tokenserver.TokenServerToken;
 
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.EnumSet;
+import java.util.List;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
 
 public class FxAccountSyncAdapter extends AbstractThreadedSyncAdapter {
   private static final String LOG_TAG = FxAccountSyncAdapter.class.getSimpleName();
 
   public static final int NOTIFICATION_ID = LOG_TAG.hashCode();
 
   // Tracks the last seen storage hostname for backoff purposes.
   private static final String PREF_BACKOFF_STORAGE_HOST = "backoffStorageHost";
 
   // Used to do cheap in-memory rate limiting. Don't sync again if we
   // successfully synced within this duration.
   private static final int MINIMUM_SYNC_DELAY_MILLIS = 15 * 1000;        // 15 seconds.
   private volatile long lastSyncRealtimeMillis;
 
+  // Non-user initiated sync can't take longer than 30 minutes.
+  // To ensure we're not churning through device's battery/resources, we limit sync to 10 minutes,
+  // and request a re-sync if we hit that deadline.
+  private static final long SYNC_DEADLINE_DELTA_MILLIS = TimeUnit.MINUTES.toMillis(10);
+
   protected final ExecutorService executor;
   protected final FxAccountNotificationManager notificationManager;
 
   public FxAccountSyncAdapter(Context context, boolean autoInitialize) {
     super(context, autoInitialize);
     this.executor = Executors.newSingleThreadExecutor();
     this.notificationManager = new FxAccountNotificationManager(NOTIFICATION_ID);
   }
@@ -112,18 +119,25 @@ public class FxAccountSyncAdapter extend
       super.postponeSync(millis);
     }
 
     @Override
     public void rejectSync() {
       super.rejectSync();
     }
 
+    /* package-local */ void requestFollowUpSync(String stage) {
+      this.stageNamesForFollowUpSync.add(stage);
+    }
+
     protected final Collection<String> stageNamesToSync;
 
+    // Keeps track of incomplete stages during this sync that need to be re-synced once we're done.
+    private final List<String> stageNamesForFollowUpSync = Collections.synchronizedList(new ArrayList<String>());
+
     public SyncDelegate(BlockingQueue<Result> latch, SyncResult syncResult, AndroidFxAccount fxAccount, Collection<String> stageNamesToSync) {
       super(latch, syncResult);
       this.stageNamesToSync = Collections.unmodifiableCollection(stageNamesToSync);
     }
 
     public Collection<String> getStageNamesToSync() {
       return this.stageNamesToSync;
     }
@@ -172,16 +186,25 @@ public class FxAccountSyncAdapter extend
           "Firefox Account informMigrated called, but it's not yet possible to migrate.  " +
           "Ignoring even though something is terribly wrong.");
     }
 
     @Override
     public void handleStageCompleted(Stage currentState, GlobalSession globalSession) {
     }
 
+    /**
+     * Schedule an incomplete stage for a follow-up sync.
+     */
+    @Override
+    public void handleIncompleteStage(Stage currentState,
+                                      GlobalSession globalSession) {
+      syncDelegate.requestFollowUpSync(currentState.getRepositoryName());
+    }
+
     @Override
     public void handleSuccess(GlobalSession globalSession) {
       Logger.info(LOG_TAG, "Global session succeeded.");
 
       // Get the number of clients, so we can schedule the sync interval accordingly.
       try {
         int otherClientsCount = globalSession.getClientsDelegate().getClientsCount();
         Logger.debug(LOG_TAG, "" + otherClientsCount + " other client(s).");
@@ -226,26 +249,26 @@ public class FxAccountSyncAdapter extend
     if (forced) {
       Logger.info(LOG_TAG, "Forced sync (" + kind + "): overruling remaining backoff of " + delay + "ms.");
     } else {
       Logger.info(LOG_TAG, "Not syncing (" + kind + "): must wait another " + delay + "ms.");
     }
     return forced;
   }
 
-  protected void syncWithAssertion(final String audience,
-                                   final String assertion,
+  protected void syncWithAssertion(final String assertion,
                                    final URI tokenServerEndpointURI,
                                    final BackoffHandler tokenBackoffHandler,
                                    final SharedPreferences sharedPrefs,
                                    final KeyBundle syncKeyBundle,
                                    final String clientState,
                                    final SessionCallback callback,
                                    final Bundle extras,
-                                   final AndroidFxAccount fxAccount) {
+                                   final AndroidFxAccount fxAccount,
+                                   final long syncDeadline) {
     final TokenServerClientDelegate delegate = new TokenServerClientDelegate() {
       private boolean didReceiveBackoff = false;
 
       @Override
       public String getUserAgent() {
         return FxAccountConstants.USER_AGENT;
       }
 
@@ -316,17 +339,17 @@ public class FxAccountSyncAdapter extend
           final Context context = getContext();
           final SyncConfiguration syncConfig = new SyncConfiguration(token.uid, authHeaderProvider, sharedPrefs, syncKeyBundle);
 
           Collection<String> knownStageNames = SyncConfiguration.validEngineNames();
           syncConfig.stagesToSync = Utils.getStagesToSyncFromBundle(knownStageNames, extras);
           syncConfig.setClusterURL(storageServerURI);
 
           globalSession = new GlobalSession(syncConfig, callback, context, clientsDataDelegate);
-          globalSession.start();
+          globalSession.start(syncDeadline);
         } catch (Exception e) {
           callback.handleError(globalSession, e);
           return;
         }
       }
 
       @Override
       public void handleFailure(TokenServerException e) {
@@ -381,16 +404,20 @@ public class FxAccountSyncAdapter extend
   @Override
   public void onPerformSync(final Account account, final Bundle extras, final String authority, ContentProviderClient provider, final SyncResult syncResult) {
     Logger.setThreadLogTag(FxAccountConstants.GLOBAL_LOG_TAG);
     Logger.resetLogging();
 
     final Context context = getContext();
     final AndroidFxAccount fxAccount = new AndroidFxAccount(context, account);
 
+    // NB: we use elapsedRealtime which is time since boot, to ensure our clock is monotonic and isn't
+    // paused while CPU is in the power-saving mode.
+    final long syncDeadline = SystemClock.elapsedRealtime() + SYNC_DEADLINE_DELTA_MILLIS;
+
     Logger.info(LOG_TAG, "Syncing FxAccount" +
         " account named like " + Utils.obfuscateEmail(account.name) +
         " for authority " + authority +
         " with instance " + this + ".");
 
     Logger.info(LOG_TAG, "Account last synced at: " + fxAccount.getLastSyncedTimestamp());
 
     if (FxAccountUtils.LOG_PERSONAL_INFORMATION) {
@@ -531,17 +558,19 @@ public class FxAccountSyncAdapter extend
               Logger.info(LOG_TAG, "Not syncing (token server).");
               syncDelegate.postponeSync(tokenBackoffHandler.delayMilliseconds());
               return;
             }
 
             final SessionCallback sessionCallback = new SessionCallback(syncDelegate, schedulePolicy);
             final KeyBundle syncKeyBundle = married.getSyncKeyBundle();
             final String clientState = married.getClientState();
-            syncWithAssertion(audience, assertion, tokenServerEndpointURI, tokenBackoffHandler, sharedPrefs, syncKeyBundle, clientState, sessionCallback, extras, fxAccount);
+            syncWithAssertion(
+                    assertion, tokenServerEndpointURI, tokenBackoffHandler, sharedPrefs,
+                    syncKeyBundle, clientState, sessionCallback, extras, fxAccount, syncDeadline);
 
             // Register the device if necessary (asynchronous, in another thread)
             if (fxAccount.getDeviceRegistrationVersion() != FxAccountDeviceRegistrator.DEVICE_REGISTRATION_VERSION
                 || TextUtils.isEmpty(fxAccount.getDeviceId())) {
               FxAccountDeviceRegistrator.register(context);
             }
 
             // Force fetch the profile avatar information. (asynchronous, in another thread)
@@ -557,12 +586,29 @@ public class FxAccountSyncAdapter extend
       latch.take();
     } catch (Exception e) {
       Logger.error(LOG_TAG, "Got error syncing.", e);
       syncDelegate.handleError(e);
     } finally {
       fxAccount.releaseSharedAccountStateLock();
     }
 
-    Logger.info(LOG_TAG, "Syncing done.");
+    // If there are any incomplete stages, request a follow-up sync. Otherwise, we're done.
+    // Incomplete stage is:
+    // - one that hit a 412 error during either upload or download of data, indicating that
+    //   its collection has been modified remotely, or
+    // - one that hit a sync deadline
+    final String[] stagesToSyncAgain;
+    synchronized (syncDelegate.stageNamesForFollowUpSync) {
+      stagesToSyncAgain = syncDelegate.stageNamesForFollowUpSync.toArray(
+              new String[syncDelegate.stageNamesForFollowUpSync.size()]
+      );
+    }
+
+    if (stagesToSyncAgain.length > 0) {
+      Logger.info(LOG_TAG, "Syncing done. Requesting an immediate follow-up sync.");
+      fxAccount.requestImmediateSync(stagesToSyncAgain, null);
+    } else {
+      Logger.info(LOG_TAG, "Syncing done.");
+    }
     lastSyncRealtimeMillis = SystemClock.elapsedRealtime();
   }
 }
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/CollectionConcurrentModificationException.java
@@ -0,0 +1,15 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync;
+
+/**
+ * Thrown when a collection has been modified by another client while we were either
+ * downloading from it or uploading to it.
+ *
+ * @author grisha
+ */
+public class CollectionConcurrentModificationException extends ReflowIsNecessaryException {
+    private static final long serialVersionUID = 2701457832508838524L;
+}
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/GlobalSession.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/GlobalSession.java
@@ -22,16 +22,17 @@ import org.mozilla.gecko.sync.net.BaseRe
 import org.mozilla.gecko.sync.net.HttpResponseObserver;
 import org.mozilla.gecko.sync.net.SyncResponse;
 import org.mozilla.gecko.sync.net.SyncStorageRecordRequest;
 import org.mozilla.gecko.sync.net.SyncStorageRequest;
 import org.mozilla.gecko.sync.net.SyncStorageRequestDelegate;
 import org.mozilla.gecko.sync.net.SyncStorageResponse;
 import org.mozilla.gecko.sync.stage.AndroidBrowserBookmarksServerSyncStage;
 import org.mozilla.gecko.sync.stage.AndroidBrowserHistoryServerSyncStage;
+import org.mozilla.gecko.sync.stage.AndroidBrowserRecentHistoryServerSyncStage;
 import org.mozilla.gecko.sync.stage.CheckPreconditionsStage;
 import org.mozilla.gecko.sync.stage.CompletedStage;
 import org.mozilla.gecko.sync.stage.EnsureCrypto5KeysStage;
 import org.mozilla.gecko.sync.stage.FennecTabsServerSyncStage;
 import org.mozilla.gecko.sync.stage.FetchInfoCollectionsStage;
 import org.mozilla.gecko.sync.stage.FetchInfoConfigurationStage;
 import org.mozilla.gecko.sync.stage.FetchMetaGlobalStage;
 import org.mozilla.gecko.sync.stage.FormHistoryServerSyncStage;
@@ -69,16 +70,18 @@ public class GlobalSession implements Ht
 
   protected Map<Stage, GlobalSyncStage> stages;
   public Stage currentState = Stage.idle;
 
   public final GlobalSessionCallback callback;
   protected final Context context;
   protected final ClientsDataDelegate clientsDelegate;
 
+  private long syncDeadline;
+
   /**
    * Map from engine name to new settings for an updated meta/global record.
    * Engines to remove will have <code>null</code> EngineSettings.
    */
   public final Map<String, EngineSettings> enginesToUpdate = new HashMap<String, EngineSettings>();
 
    /*
    * Key accessors.
@@ -181,19 +184,24 @@ public class GlobalSession implements Ht
     stages.put(Stage.fetchInfoConfiguration,  new FetchInfoConfigurationStage(
             config.infoConfigurationURL(), getAuthHeaderProvider()));
     stages.put(Stage.ensureKeysStage,         new EnsureCrypto5KeysStage());
 
     stages.put(Stage.syncClientsEngine,       new SyncClientsEngineStage());
 
     stages.put(Stage.syncTabs,                new FennecTabsServerSyncStage());
     stages.put(Stage.syncPasswords,           new PasswordsServerSyncStage());
+
+    // Will only run if syncFullHistory stage never completed.
+    // Bug 1316110 tracks follow up work to improve efficiency of this stage.
+    stages.put(Stage.syncRecentHistory,       new AndroidBrowserRecentHistoryServerSyncStage());
+
     stages.put(Stage.syncBookmarks,           new AndroidBrowserBookmarksServerSyncStage());
-    stages.put(Stage.syncHistory,             new AndroidBrowserHistoryServerSyncStage());
     stages.put(Stage.syncFormHistory,         new FormHistoryServerSyncStage());
+    stages.put(Stage.syncFullHistory,         new AndroidBrowserHistoryServerSyncStage());
 
     stages.put(Stage.uploadMetaGlobal,        new UploadMetaGlobalStage());
     stages.put(Stage.completed,               new CompletedStage());
 
     this.stages = Collections.unmodifiableMap(stages);
   }
 
   public GlobalSyncStage getSyncStageByName(String name) throws NoSuchStageException {
@@ -229,16 +237,20 @@ public class GlobalSession implements Ht
         out.add(stage);
       } catch (NoSuchStageException e) {
         Logger.warn(LOG_TAG, "Unable to find stage with name " + name);
       }
     }
     return out;
   }
 
+  public long getSyncDeadline() {
+    return syncDeadline;
+  }
+
   /**
    * Advance and loop around the stages of a sync.
    * @param current
    * @return
    *        The next stage to execute.
    */
   public static Stage nextStage(Stage current) {
     int index = current.ordinal() + 1;
@@ -288,35 +300,40 @@ public class GlobalSession implements Ht
    * <ul>
    * <li>Verifying that any backoffs/minimum next sync requests are respected.</li>
    * <li>Ensuring that the device is online.</li>
    * <li>Ensuring that dependencies are ready.</li>
    * </ul>
    *
    * @throws AlreadySyncingException
    */
-  public void start() throws AlreadySyncingException {
+  public void start(final long syncDeadline) throws AlreadySyncingException {
     if (this.currentState != GlobalSyncStage.Stage.idle) {
       throw new AlreadySyncingException(this.currentState);
     }
+
+    // Make the deadline value available to stages via its getter.
+    this.syncDeadline = syncDeadline;
+
     installAsHttpResponseObserver(); // Uninstalled by completeSync or abort.
     this.advance();
   }
 
   /**
    * Stop this sync and start again.
    * @throws AlreadySyncingException
    */
   protected void restart() throws AlreadySyncingException {
     this.currentState = GlobalSyncStage.Stage.idle;
     if (callback.shouldBackOffStorage()) {
       this.callback.handleAborted(this, "Told to back off.");
       return;
     }
-    this.start();
+    // Restart with the same deadline as before.
+    this.start(syncDeadline);
   }
 
   /**
    * We're finished (aborted or succeeded): release resources.
    */
   protected void cleanUp() {
     uninstallAsHttpResponseObserver();
     this.stages = null;
@@ -457,16 +474,21 @@ public class GlobalSession implements Ht
       //  e is null, or we aborted for a non-HTTP reason; okay to upload new meta/global record.
       if (this.hasUpdatedMetaGlobal()) {
         this.uploadUpdatedMetaGlobal(); // Only logs errors; does not call abort.
       }
     }
     this.callback.handleError(this, e);
   }
 
+  public void handleIncompleteStage() {
+    // Let our delegate know that current stage is incomplete and needs to be synced again.
+    callback.handleIncompleteStage(this.currentState, this);
+  }
+
   public void handleHTTPError(SyncStorageResponse response, String reason) {
     // TODO: handling of 50x (backoff), 401 (node reassignment or auth error).
     // Fall back to aborting.
     Logger.warn(LOG_TAG, "Aborting sync due to HTTP " + response.getStatusCode());
     this.interpretHTTPFailure(response.httpResponse());
     this.abort(new HTTPFailureException(response), reason);
   }
 
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/ReflowIsNecessaryException.java
@@ -0,0 +1,21 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync;
+
+/**
+ * Used by SynchronizerSession to indicate that reflow of a stage is necessary.
+ * To reflow a stage is to request that it is synced again. Depending on the stage and its current
+ * state (last-synced timestamp, resume context, high-water-mark) we might resume, or sync from a
+ * high-water-mark if allowed, or sync regularly from last-synced timestamp.
+ * A re-sync of a stage is no different from a regular sync of the same stage.
+ *
+ * Stages which complete only partially due to hitting a concurrent collection modification error or
+ * hitting a sync deadline should be re-synced as soon as possible.
+ *
+ * @author grisha
+ */
+public class ReflowIsNecessaryException extends Exception {
+    private static final long serialVersionUID = -2614772437814638768L;
+}
rename from mobile/android/services/src/main/java/org/mozilla/gecko/sync/Server11PreviousPostFailedException.java
rename to mobile/android/services/src/main/java/org/mozilla/gecko/sync/Server15PreviousPostFailedException.java
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/Server11PreviousPostFailedException.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/Server15PreviousPostFailedException.java
@@ -2,11 +2,11 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync;
 
 /**
  * A previous POST failed, so we won't send any more records this session.
  */
-public class Server11PreviousPostFailedException extends SyncException {
+public class Server15PreviousPostFailedException extends SyncException {
   private static final long serialVersionUID = -3582490631414624310L;
 }
rename from mobile/android/services/src/main/java/org/mozilla/gecko/sync/Server11RecordPostFailedException.java
rename to mobile/android/services/src/main/java/org/mozilla/gecko/sync/Server15RecordPostFailedException.java
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/Server11RecordPostFailedException.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/Server15RecordPostFailedException.java
@@ -2,11 +2,11 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync;
 
 /**
  * The server rejected a record in its "failure" array.
  */
-public class Server11RecordPostFailedException extends SyncException {
+public class Server15RecordPostFailedException extends SyncException {
   private static final long serialVersionUID = -8517471217486190314L;
 }
deleted file mode 100644
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/Sync11Configuration.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-package org.mozilla.gecko.sync;
-
-import java.net.URI;
-
-import org.mozilla.gecko.background.common.log.Logger;
-import org.mozilla.gecko.sync.crypto.KeyBundle;
-import org.mozilla.gecko.sync.net.AuthHeaderProvider;
-
-import android.content.SharedPreferences;
-import android.content.SharedPreferences.Editor;
-
-/**
- * Override SyncConfiguration to restore the old behavior of clusterURL --
- * that is, a URL without the protocol version etc.
- *
- */
-public class Sync11Configuration extends SyncConfiguration {
-  private static final String LOG_TAG = "Sync11Configuration";
-  private static final String API_VERSION = "1.1";
-
-  public Sync11Configuration(String username,
-                             AuthHeaderProvider authHeaderProvider,
-                             SharedPreferences prefs) {
-    super(username, authHeaderProvider, prefs);
-  }
-
-  public Sync11Configuration(String username,
-                             AuthHeaderProvider authHeaderProvider,
-                             SharedPreferences prefs,
-                             KeyBundle keyBundle) {
-    super(username, authHeaderProvider, prefs, keyBundle);
-  }
-
-  @Override
-  public String getAPIVersion() {
-    return API_VERSION;
-  }
-
-  @Override
-  public String storageURL() {
-    return clusterURL + API_VERSION + "/" + username + "/storage";
-  }
-
-  @Override
-  protected String infoBaseURL() {
-    return clusterURL + API_VERSION + "/" + username + "/info/";
-  }
-
-  protected void setAndPersistClusterURL(URI u, SharedPreferences prefs) {
-    boolean shouldPersist = (prefs != null) && (clusterURL == null);
-
-    Logger.trace(LOG_TAG, "Setting cluster URL to " + u.toASCIIString() +
-                          (shouldPersist ? ". Persisting." : ". Not persisting."));
-    clusterURL = u;
-    if (shouldPersist) {
-      Editor edit = prefs.edit();
-      edit.putString(PREF_CLUSTER_URL, clusterURL.toASCIIString());
-      edit.commit();
-    }
-  }
-
-  protected void setClusterURL(URI u, SharedPreferences prefs) {
-    if (u == null) {
-      Logger.warn(LOG_TAG, "Refusing to set cluster URL to null.");
-      return;
-    }
-    URI uri = u.normalize();
-    if (uri.toASCIIString().endsWith("/")) {
-      setAndPersistClusterURL(u, prefs);
-      return;
-    }
-    setAndPersistClusterURL(uri.resolve("/"), prefs);
-    Logger.trace(LOG_TAG, "Set cluster URL to " + clusterURL.toASCIIString() + ", given input " + u.toASCIIString());
-  }
-
-  @Override
-  public void setClusterURL(URI u) {
-    setClusterURL(u, this.getPrefs());
-  }
-}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/SyncDeadlineReachedException.java
@@ -0,0 +1,14 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync;
+
+/**
+ * Thrown when we've hit a self-imposed sync deadline, and decided not to proceed.
+ *
+ * @author grisha
+ */
+public class SyncDeadlineReachedException extends ReflowIsNecessaryException {
+    private static final long serialVersionUID = 2305367921350245484L;
+}
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/delegates/GlobalSessionCallback.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/delegates/GlobalSessionCallback.java
@@ -33,16 +33,17 @@ public interface GlobalSessionCallback {
    * This account should stop syncing immediately, and arrange to delete itself.
    */
   void informMigrated(GlobalSession session);
 
   void handleAborted(GlobalSession globalSession, String reason);
   void handleError(GlobalSession globalSession, Exception ex);
   void handleSuccess(GlobalSession globalSession);
   void handleStageCompleted(Stage currentState, GlobalSession globalSession);
+  void handleIncompleteStage(Stage currentState, GlobalSession globalSession);
 
   /**
    * Called when a {@link GlobalSession} wants to know if it should continue
    * to make storage requests.
    *
    * @return false if the session should make no further requests.
    */
   boolean shouldBackOffStorage();
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/middleware/BufferingMiddlewareRepository.java
@@ -0,0 +1,60 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.middleware;
+
+import android.content.Context;
+
+import org.mozilla.gecko.sync.middleware.storage.BufferStorage;
+import org.mozilla.gecko.sync.repositories.Repository;
+import org.mozilla.gecko.sync.repositories.RepositorySession;
+import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionCreationDelegate;
+
+/**
+ * A buffering-enabled middleware which is intended to wrap local repositories. Configurable with
+ * a sync deadline, buffer storage implementation and a consistency checker implementation.
+ *
+ * @author grisha
+ */
+public class BufferingMiddlewareRepository extends MiddlewareRepository {
+    private final long syncDeadline;
+    private final Repository inner;
+    private final BufferStorage bufferStorage;
+
+    private class BufferingMiddlewareRepositorySessionCreationDelegate extends MiddlewareRepository.SessionCreationDelegate {
+        private final BufferingMiddlewareRepository repository;
+        private final RepositorySessionCreationDelegate outerDelegate;
+
+        private BufferingMiddlewareRepositorySessionCreationDelegate(BufferingMiddlewareRepository repository, RepositorySessionCreationDelegate outerDelegate) {
+            this.repository = repository;
+            this.outerDelegate = outerDelegate;
+        }
+
+        @Override
+        public void onSessionCreateFailed(Exception ex) {
+            this.outerDelegate.onSessionCreateFailed(ex);
+        }
+
+        @Override
+        public void onSessionCreated(RepositorySession session) {
+            outerDelegate.onSessionCreated(new BufferingMiddlewareRepositorySession(
+                    session, this.repository, syncDeadline, bufferStorage
+            ));
+        }
+    }
+
+    public BufferingMiddlewareRepository(long syncDeadline, BufferStorage bufferStore, Repository wrappedRepository) {
+        this.syncDeadline = syncDeadline;
+        this.inner = wrappedRepository;
+        this.bufferStorage = bufferStore;
+    }
+
+    @Override
+    public void createSession(RepositorySessionCreationDelegate delegate, Context context) {
+        this.inner.createSession(
+                new BufferingMiddlewareRepositorySessionCreationDelegate(this, delegate),
+                context
+        );
+    }
+}
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/middleware/BufferingMiddlewareRepositorySession.java
@@ -0,0 +1,175 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.middleware;
+
+import android.os.SystemClock;
+import android.support.annotation.VisibleForTesting;
+
+import org.mozilla.gecko.sync.SyncDeadlineReachedException;
+import org.mozilla.gecko.sync.middleware.storage.BufferStorage;
+import org.mozilla.gecko.sync.repositories.InactiveSessionException;
+import org.mozilla.gecko.sync.repositories.NoStoreDelegateException;
+import org.mozilla.gecko.sync.repositories.RepositorySession;
+import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionFetchRecordsDelegate;
+import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
+import org.mozilla.gecko.sync.repositories.domain.Record;
+
+import java.util.Collection;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+/**
+ * Buffering middleware which is intended to wrap local RepositorySessions.
+ *
+ * Configure it:
+ *  - with an appropriate BufferStore (in-memory, record-type-aware database-backed, etc).
+ *
+ *  Fetch is pass-through, store is buffered.
+ *
+ * @author grisha
+ */
+/* package-local */ class BufferingMiddlewareRepositorySession extends MiddlewareRepositorySession {
+    private final BufferStorage bufferStorage;
+    private final long syncDeadlineMillis;
+
+    private ExecutorService storeDelegateExecutor = Executors.newSingleThreadExecutor();
+
+    /* package-local */ BufferingMiddlewareRepositorySession(
+            RepositorySession repositorySession, MiddlewareRepository repository,
+            long syncDeadlineMillis, BufferStorage bufferStorage) {
+        super(repositorySession, repository);
+        this.syncDeadlineMillis = syncDeadlineMillis;
+        this.bufferStorage = bufferStorage;
+    }
+
+    @Override
+    public void fetchSince(long timestamp, RepositorySessionFetchRecordsDelegate delegate) {
+        this.inner.fetchSince(timestamp, delegate);
+    }
+
+    @Override
+    public void fetch(String[] guids, RepositorySessionFetchRecordsDelegate delegate) throws InactiveSessionException {
+        this.inner.fetch(guids, delegate);
+    }
+
+    @Override
+    public void fetchAll(RepositorySessionFetchRecordsDelegate delegate) {
+        this.inner.fetchAll(delegate);
+    }
+
+    /**
+     * Will be called when this repository is acting as a `source`, and a flow of records into `sink`
+     * was completed. That is, we've uploaded merged records to the server, so now is a good time
+     * to clean up our buffer for this repository.
+     */
+    @Override
+    public void performCleanup() {
+        bufferStorage.clear();
+    }
+
+    @Override
+    public void store(Record record) throws NoStoreDelegateException {
+        bufferStorage.addOrReplace(record);
+    }
+
+    /**
+     * When source fails to provide all records, we need to decide what to do with the buffer.
+     * We might fail because of a network partition, or because of a concurrent modification of a
+     * collection, or because we ran out of time fetching records, or some other reason.
+     *
+     * Either way we do not clear the buffer in any error scenario, but rather
+     * allow it to be re-filled, replacing existing records with their newer versions if necessary.
+     *
+     * If a collection has been modified, affected records' last-modified timestamps will be bumped,
+     * and we will receive those records during the next sync. If we already have them in our buffer,
+     * we replace our now-old copy. Otherwise, they are new records and we just append them.
+     *
+     * Incoming records are mapped to existing ones via GUIDs.
+     */
+    @Override
+    public void storeIncomplete() {
+        bufferStorage.flush();
+    }
+
+    @Override
+    public void storeDone() {
+        storeDone(System.currentTimeMillis());
+    }
+
+    @Override
+    public void storeFlush() {
+        bufferStorage.flush();
+    }
+
+    @Override
+    public void storeDone(final long end) {
+        bufferStorage.flush();
+
+        // Determine if we have enough time to merge the buffer data.
+        // If we don't have enough time now, we keep our buffer and try again later.
+        if (!mayProceedToMergeBuffer()) {
+            super.abort();
+            storeDelegate.deferredStoreDelegate(storeDelegateExecutor).onStoreFailed(new SyncDeadlineReachedException());
+            return;
+        }
+
+        doMergeBuffer(end);
+    }
+
+    @VisibleForTesting
+    /* package-local */ void doMergeBuffer(long end) {
+        final Collection<Record> bufferData = bufferStorage.all();
+
+        // Trivial case of an empty buffer.
+        if (bufferData.isEmpty()) {
+            super.storeDone(end);
+            return;
+        }
+
+        // Let session handle actual storing of records as it pleases.
+        // See Bug 1332094 which is concerned with allowing merge to proceed transactionally.
+        try {
+            for (Record record : bufferData) {
+                this.inner.store(record);
+            }
+        } catch (NoStoreDelegateException e) {
+            // At this point we should have a store delegate set on the session, so this won't happen.
+        }
+
+        // Let session know that there are no more records to store.
+        super.storeDone(end);
+    }
+
+    /**
+     * Session abnormally aborted. This doesn't mean our so-far buffered data is invalid.
+     * Clean up after ourselves, if there's anything to clean up.
+     */
+    @Override
+    public void abort() {
+        bufferStorage.flush();
+        super.abort();
+    }
+
+    @Override
+    public void setStoreDelegate(RepositorySessionStoreDelegate delegate) {
+        inner.setStoreDelegate(delegate);
+        this.storeDelegate = delegate;
+    }
+
+    private boolean mayProceedToMergeBuffer() {
+        // If our buffer storage is not persistent, disallowing merging after buffer has been filled
+        // means throwing away records only to re-download them later.
+        // In this case allow merge to proceed even if we're past the deadline.
+        if (!bufferStorage.isPersistent()) {
+            return true;
+        }
+
+        // While actual runtime of a merge operation is a function of record type, buffer size, etc.,
+        // let's do a simple thing for now and say that we may proceed if we have couple of minutes
+        // of runtime left. That surely is enough, right?
+        final long timeLeftMillis = syncDeadlineMillis - SystemClock.elapsedRealtime();
+        return timeLeftMillis > 1000 * 60 * 2;
+    }
+}
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/middleware/Crypto5MiddlewareRepositorySession.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/middleware/Crypto5MiddlewareRepositorySession.java
@@ -24,17 +24,17 @@ import org.mozilla.gecko.sync.repositori
  * Takes a RecordFactory as a parameter. This is in charge of taking decrypted CryptoRecords
  * as input and producing some expected kind of Record as output for local use.
  *
  *
 
 
 
                  +------------------------------------+
-                 |    Server11RepositorySession       |
+                 |    Server15RepositorySession       |
                  +-------------------------+----------+
                            ^               |
                            |               |
                         Encrypted CryptoRecords
                            |               |
                            |               v
                  +---------+--------------------------+
                  | Crypto5MiddlewareRepositorySession |
@@ -74,52 +74,57 @@ public class Crypto5MiddlewareRepository
 
     DecryptingTransformingFetchDelegate(RepositorySessionFetchRecordsDelegate next, KeyBundle bundle, RecordFactory recordFactory) {
       this.next = next;
       this.keyBundle = bundle;
       this.recordFactory = recordFactory;
     }
 
     @Override
-    public void onFetchFailed(Exception ex, Record record) {
-      next.onFetchFailed(ex, record);
+    public void onFetchFailed(Exception ex) {
+      next.onFetchFailed(ex);
     }
 
     @Override
     public void onFetchedRecord(Record record) {
       CryptoRecord r;
       try {
         r = (CryptoRecord) record;
       } catch (ClassCastException e) {
-        next.onFetchFailed(e, record);
+        next.onFetchFailed(e);
         return;
       }
       r.keyBundle = keyBundle;
       try {
         r.decrypt();
       } catch (Exception e) {
-        next.onFetchFailed(e, r);
+        next.onFetchFailed(e);
         return;
       }
       Record transformed;
       try {
         transformed = this.recordFactory.createRecord(r);
       } catch (Exception e) {
-        next.onFetchFailed(e, r);
+        next.onFetchFailed(e);
         return;
       }
       next.onFetchedRecord(transformed);
     }
 
     @Override
     public void onFetchCompleted(final long fetchEnd) {
       next.onFetchCompleted(fetchEnd);
     }
 
     @Override
+    public void onBatchCompleted() {
+      next.onBatchCompleted();
+    }
+
+    @Override
     public RepositorySessionFetchRecordsDelegate deferredFetchDelegate(ExecutorService executor) {
       // Synchronously perform *our* work, passing through appropriately.
       RepositorySessionFetchRecordsDelegate deferredNext = next.deferredFetchDelegate(executor);
       return new DecryptingTransformingFetchDelegate(deferredNext, keyBundle, recordFactory);
     }
   }
 
   private DecryptingTransformingFetchDelegate makeUnwrappingDelegate(RepositorySessionFetchRecordsDelegate inner) {
@@ -145,28 +150,28 @@ public class Crypto5MiddlewareRepository
   public void fetchAll(RepositorySessionFetchRecordsDelegate delegate) {
     inner.fetchAll(makeUnwrappingDelegate(delegate));
   }
 
   @Override
   public void setStoreDelegate(RepositorySessionStoreDelegate delegate) {
     // TODO: it remains to be seen how this will work.
     inner.setStoreDelegate(delegate);
-    this.delegate = delegate;             // So we can handle errors without involving inner.
+    this.storeDelegate = delegate;             // So we can handle errors without involving inner.
   }
 
   @Override
   public void store(Record record) throws NoStoreDelegateException {
-    if (delegate == null) {
+    if (storeDelegate == null) {
       throw new NoStoreDelegateException();
     }
     CryptoRecord rec = record.getEnvelope();
     rec.keyBundle = this.keyBundle;
     try {
       rec.encrypt();
     } catch (UnsupportedEncodingException | CryptoException e) {
-      delegate.onRecordStoreFailed(e, record.guid);
+      storeDelegate.onRecordStoreFailed(e, record.guid);
       return;
     }
     // Allow the inner session to do delegate handling.
     inner.store(rec);
   }
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/middleware/MiddlewareRepositorySession.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/middleware/MiddlewareRepositorySession.java
@@ -149,16 +149,21 @@ public abstract class MiddlewareReposito
 
   @Override
   public void guidsSince(long timestamp, RepositorySessionGuidsSinceDelegate delegate) {
     // TODO: need to do anything here?
     inner.guidsSince(timestamp, delegate);
   }
 
   @Override
+  public void storeIncomplete() {
+    inner.storeIncomplete();
+  }
+
+  @Override
   public void storeDone() {
     inner.storeDone();
   }
 
   @Override
   public void storeDone(long storeEnd) {
     inner.storeDone(storeEnd);
   }
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/middleware/storage/BufferStorage.java
@@ -0,0 +1,31 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.middleware.storage;
+
+import org.mozilla.gecko.sync.repositories.domain.Record;
+
+import java.util.Collection;
+
+/**
+ * A contract between BufferingMiddleware and specific storage implementations.
+ *
+ * @author grisha
+ */
+public interface BufferStorage {
+    // Returns all of the records currently present in the buffer.
+    Collection<Record> all();
+
+    // Implementations are responsible to ensure that any incoming records with duplicate GUIDs replace
+    // what's already present in the storage layer.
+    // NB: For a database-backed storage, "replace" happens at a transaction level.
+    void addOrReplace(Record record);
+
+    // For database-backed implementations, commits any records that came in up to this point.
+    void flush();
+
+    void clear();
+
+    boolean isPersistent();
+}
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/middleware/storage/MemoryBufferStorage.java
@@ -0,0 +1,51 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.middleware.storage;
+
+import org.mozilla.gecko.sync.repositories.domain.Record;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A trivial, memory-backed, transient implementation of a BufferStorage.
+ * Its intended use is to buffer syncing of small collections.
+ * Thread-safe.
+ *
+ * @author grisha
+ */
+public class MemoryBufferStorage implements BufferStorage {
+    private final Map<String, Record> recordBuffer = Collections.synchronizedMap(new HashMap<String, Record>());
+
+    @Override
+    public boolean isPersistent() {
+        return false;
+    }
+
+    @Override
+    public Collection<Record> all() {
+        synchronized (recordBuffer) {
+            return new ArrayList<>(recordBuffer.values());
+        }
+    }
+
+    @Override
+    public void addOrReplace(Record record) {
+        recordBuffer.put(record.guid, record);
+    }
+
+    @Override
+    public void flush() {
+        // This is a no-op; flush intended for database-backed stores.
+    }
+
+    @Override
+    public void clear() {
+        recordBuffer.clear();
+    }
+}
rename from mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/ConstrainedServer11Repository.java
rename to mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/ConfigurableServer15Repository.java
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/ConstrainedServer11Repository.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/ConfigurableServer15Repository.java
@@ -4,48 +4,81 @@
 
 package org.mozilla.gecko.sync.repositories;
 
 import java.net.URISyntaxException;
 
 import org.mozilla.gecko.sync.InfoCollections;
 import org.mozilla.gecko.sync.InfoConfiguration;
 import org.mozilla.gecko.sync.net.AuthHeaderProvider;
+import org.mozilla.gecko.sync.stage.ServerSyncStage;
 
 /**
- * A kind of Server11Repository that supports explicit setting of total fetch limit, per-batch fetch limit, and a sort order.
+ * A kind of Server15Repository that supports explicit setting of:
+ * - per-batch fetch limit
+ * - batching mode (single batch vs multi-batch)
+ * - sort order
+ * - repository state provider (persistent vs non-persistent)
+ * - whereas use of high-water-mark is allowed
  *
  * @author rnewman
  *
  */
-public class ConstrainedServer11Repository extends Server11Repository {
-
-  private final String sort;
+public class ConfigurableServer15Repository extends Server15Repository {
+  private final String sortOrder;
   private final long batchLimit;
-  private final long totalLimit;
+  private final ServerSyncStage.MultipleBatches multipleBatches;
+  private final ServerSyncStage.HighWaterMark highWaterMark;
 
-  public ConstrainedServer11Repository(String collection, String storageURL,
-                                       AuthHeaderProvider authHeaderProvider,
-                                       InfoCollections infoCollections,
-                                       InfoConfiguration infoConfiguration,
-                                       long batchLimit, long totalLimit, String sort)
-          throws URISyntaxException {
-    super(collection, storageURL, authHeaderProvider, infoCollections, infoConfiguration);
+  public ConfigurableServer15Repository(
+          String collection,
+          long syncDeadline,
+          String storageURL,
+          AuthHeaderProvider authHeaderProvider,
+          InfoCollections infoCollections,
+          InfoConfiguration infoConfiguration,
+          long batchLimit,
+          String sort,
+          ServerSyncStage.MultipleBatches multipleBatches,
+          ServerSyncStage.HighWaterMark highWaterMark,
+          RepositoryStateProvider stateProvider) throws URISyntaxException {
+    super(
+            collection,
+            syncDeadline,
+            storageURL,
+            authHeaderProvider,
+            infoCollections,
+            infoConfiguration,
+            stateProvider
+    );
     this.batchLimit = batchLimit;
-    this.totalLimit = totalLimit;
-    this.sort  = sort;
+    this.sortOrder  = sort;
+    this.multipleBatches = multipleBatches;
+    this.highWaterMark = highWaterMark;
+
+    // Sanity check: let's ensure we're configured correctly. At this point in time, it doesn't make
+    // sense to use H.W.M. with a non-persistent state provider. This might change if we start retrying
+    // during a download in case of 412s.
+    if (!stateProvider.isPersistent() && highWaterMark.equals(ServerSyncStage.HighWaterMark.Enabled)) {
+      throw new IllegalArgumentException("Can not use H.W.M. with NonPersistentRepositoryStateProvider");
+    }
   }
 
   @Override
-  public String getDefaultSort() {
-    return sort;
+  public String getSortOrder() {
+    return sortOrder;
   }
 
   @Override
-  public long getDefaultBatchLimit() {
+  public Long getBatchLimit() {
     return batchLimit;
   }
 
   @Override
-  public long getDefaultTotalLimit() {
-    return totalLimit;
+  public boolean getAllowMultipleBatches() {
+    return multipleBatches.equals(ServerSyncStage.MultipleBatches.Enabled);
+  }
+
+  @Override
+  public boolean getAllowHighWaterMark() {
+    return highWaterMark.equals(ServerSyncStage.HighWaterMark.Enabled);
   }
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/InactiveSessionException.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/InactiveSessionException.java
@@ -5,13 +5,17 @@
 package org.mozilla.gecko.sync.repositories;
 
 import org.mozilla.gecko.sync.SyncException;
 
 public class InactiveSessionException extends SyncException {
 
   private static final long serialVersionUID = 537241160815940991L;
 
+  public InactiveSessionException() {
+    super();
+  }
+
   public InactiveSessionException(Exception ex) {
     super(ex);
   }
 
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/InvalidRequestException.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/InvalidRequestException.java
@@ -5,12 +5,12 @@
 package org.mozilla.gecko.sync.repositories;
 
 import org.mozilla.gecko.sync.SyncException;
 
 public class InvalidRequestException extends SyncException {
 
   private static final long serialVersionUID = 4502951350743608243L;
 
-  public InvalidRequestException(Exception ex) {
-    super(ex);
+  public InvalidRequestException() {
+    super();
   }
 }
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/NonPersistentRepositoryStateProvider.java
@@ -0,0 +1,76 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.repositories;
+
+import android.support.annotation.Nullable;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Simple non-persistent implementation of a repository state provider.
+ *
+ * Just like in the persistent implementation, changes to values are visible only after a commit.
+ *
+ * @author grisha
+ */
+public class NonPersistentRepositoryStateProvider implements RepositoryStateProvider {
+    // We'll have at least OFFSET and H.W.M. values set.
+    private final int INITIAL_CAPACITY = 2;
+    private final Map<String, Object> nonCommittedValuesMap = Collections.synchronizedMap(
+            new HashMap<String, Object>(INITIAL_CAPACITY)
+    );
+
+    // NB: Any changes are made by creating a new map instead of altering an existing one.
+    private volatile Map<String, Object> committedValuesMap = new HashMap<>(INITIAL_CAPACITY);
+
+    @Override
+    public boolean isPersistent() {
+        return false;
+    }
+
+    @Override
+    public boolean commit() {
+        committedValuesMap = new HashMap<>(nonCommittedValuesMap);
+        return true;
+    }
+
+    @Override
+    public NonPersistentRepositoryStateProvider clear(String key) {
+        nonCommittedValuesMap.remove(key);
+        return this;
+    }
+
+    @Override
+    public NonPersistentRepositoryStateProvider setString(String key, String value) {
+        nonCommittedValuesMap.put(key, value);
+        return this;
+    }
+
+    @Nullable
+    @Override
+    public String getString(String key) {
+        return (String) committedValuesMap.get(key);
+    }
+
+    @Override
+    public NonPersistentRepositoryStateProvider setLong(String key, Long value) {
+        nonCommittedValuesMap.put(key, value);
+        return this;
+    }
+
+    @Nullable
+    @Override
+    public Long getLong(String key) {
+        return (Long) committedValuesMap.get(key);
+    }
+
+    @Override
+    public boolean resetAndCommit() {
+        nonCommittedValuesMap.clear();
+        return commit();
+    }
+}
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/PersistentRepositoryStateProvider.java
@@ -0,0 +1,83 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.repositories;
+
+import android.support.annotation.Nullable;
+
+import org.mozilla.gecko.background.common.PrefsBranch;
+
+/**
+ * Simple persistent implementation of a repository state provider.
+ * Uses provided PrefsBranch object in order to persist values.
+ *
+ * Values must be committed before they become visible via getters.
+ * It is a caller's responsibility to perform a commit.
+ *
+ * @author grisha
+ */
+public class PersistentRepositoryStateProvider implements RepositoryStateProvider {
+    private final PrefsBranch prefs;
+
+    private final PrefsBranch.Editor editor;
+
+    public PersistentRepositoryStateProvider(PrefsBranch prefs) {
+        this.prefs = prefs;
+        // NB: It is a caller's responsibility to commit any changes it performs via setters.
+        this.editor = prefs.edit();
+    }
+
+    @Override
+    public boolean isPersistent() {
+        return true;
+    }
+
+    @Override
+    public boolean commit() {
+        return this.editor.commit();
+    }
+
+    @Override
+    public PersistentRepositoryStateProvider clear(String key) {
+        this.editor.remove(key);
+        return this;
+    }
+
+    @Override
+    public PersistentRepositoryStateProvider setString(String key, String value) {
+        this.editor.putString(key, value);
+        return this;
+    }
+
+    @Nullable
+    @Override
+    public String getString(String key) {
+        return this.prefs.getString(key, null);
+    }
+
+    @Override
+    public PersistentRepositoryStateProvider setLong(String key, Long value) {
+        this.editor.putLong(key, value);
+        return this;
+    }
+
+    @Nullable
+    @Override
+    public Long getLong(String key) {
+        if (!this.prefs.contains(key)) {
+            return null;
+        }
+        return this.prefs.getLong(key, 0);
+    }
+
+    @Override
+    public boolean resetAndCommit() {
+        return this.editor
+                .remove(KEY_HIGH_WATER_MARK)
+                .remove(KEY_OFFSET)
+                .remove(KEY_OFFSET_ORDER)
+                .remove(KEY_OFFSET_SINCE)
+                .commit();
+    }
+}
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/RepositorySession.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/RepositorySession.java
@@ -19,17 +19,17 @@ import org.mozilla.gecko.sync.repositori
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionWipeDelegate;
 import org.mozilla.gecko.sync.repositories.domain.Record;
 
 /**
  * A <code>RepositorySession</code> is created and used thusly:
  *
  *<ul>
  * <li>Construct, with a reference to its parent {@link Repository}, by calling
- *   {@link Repository#createSession(RepositorySessionCreationDelegate, android.content.Context)}.</li>
+ *   {@link Repository#createSession(org.mozilla.gecko.sync.repositories.delegates.RepositorySessionCreationDelegate, android.content.Context)}.</li>
  * <li>Populate with saved information by calling {@link #unbundle(RepositorySessionBundle)}.</li>
  * <li>Begin a sync by calling {@link #begin(RepositorySessionBeginDelegate)}. <code>begin()</code>
  *   is an appropriate place to initialize expensive resources.</li>
  * <li>Perform operations such as {@link #fetchSince(long, RepositorySessionFetchRecordsDelegate)} and
  *   {@link #store(Record)}.</li>
  * <li>Finish by calling {@link #finish(RepositorySessionFinishDelegate)}, retrieving and storing
  *   the current bundle.</li>
  *</ul>
@@ -50,17 +50,17 @@ public abstract class RepositorySession 
   private static final String LOG_TAG = "RepositorySession";
 
   protected static void trace(String message) {
     Logger.trace(LOG_TAG, message);
   }
 
   private SessionStatus status = SessionStatus.UNSTARTED;
   protected Repository repository;
-  protected RepositorySessionStoreDelegate delegate;
+  protected RepositorySessionStoreDelegate storeDelegate;
 
   /**
    * A queue of Runnables which call out into delegates.
    */
   protected ExecutorService delegateQueue  = Executors.newSingleThreadExecutor();
 
   /**
    * A queue of Runnables which effect storing.
@@ -117,38 +117,53 @@ public abstract class RepositorySession 
    * * The store delegate will be notified of error or completion.
    *
    * This arrangement of calls allows for batching at the session level.
    *
    * Store success calls are not guaranteed.
    */
   public void setStoreDelegate(RepositorySessionStoreDelegate delegate) {
     Logger.debug(LOG_TAG, "Setting store delegate to " + delegate);
-    this.delegate = delegate;
+    this.storeDelegate = delegate;
   }
   public abstract void store(Record record) throws NoStoreDelegateException;
 
+  public void storeIncomplete() {}
+
   public void storeDone() {
     // Our default behavior will be to assume that the Runnable is
     // executed as soon as all the stores synchronously finish, so
     // our end timestamp can just be… now.
     storeDone(now());
   }
 
   public void storeDone(final long end) {
     Logger.debug(LOG_TAG, "Scheduling onStoreCompleted for after storing is done: " + end);
     Runnable command = new Runnable() {
       @Override
       public void run() {
-        delegate.onStoreCompleted(end);
+        storeDelegate.onStoreCompleted(end);
       }
     };
     storeWorkQueue.execute(command);
   }
 
+  /**
+   * Indicates that a number of records have been stored, more are still to come but after some time,
+   * and now would be a good time to flush records and perform any other similar operations.
+   */
+  public void storeFlush() {
+  }
+
+  /**
+   * Indicates that a flow of records have been completed.
+   */
+  public void performCleanup() {
+  }
+
   public abstract void wipe(RepositorySessionWipeDelegate delegate);
 
   /**
    * Synchronously perform the shared work of beginning. Throws on failure.
    * @throws InvalidSessionTransitionException
    *
    */
   protected void sharedBegin() throws InvalidSessionTransitionException {
@@ -247,24 +262,24 @@ public abstract class RepositorySession 
 
   /**
    * Run the provided command if we're active and our delegate queue
    * is not shut down.
    */
   protected synchronized void executeDelegateCommand(Runnable command)
       throws InactiveSessionException {
     if (!isActive() || delegateQueue.isShutdown()) {
-      throw new InactiveSessionException(null);
+      throw new InactiveSessionException();
     }
     delegateQueue.execute(command);
   }
 
   public synchronized void ensureActive() throws InactiveSessionException {
     if (!isActive()) {
-      throw new InactiveSessionException(null);
+      throw new InactiveSessionException();
     }
   }
 
   public synchronized boolean isActive() {
     return status == SessionStatus.ACTIVE;
   }
 
   public synchronized SessionStatus getStatus() {
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/RepositoryStateProvider.java
@@ -0,0 +1,47 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.repositories;
+
+import android.support.annotation.CheckResult;
+import android.support.annotation.Nullable;
+
+/**
+ * Interface describing a repository state provider.
+ * Repository's state might consist of a number of key-value pairs.
+ *
+ * Currently there are two types of implementations: persistent and non-persistent state.
+ * Persistent state survives between syncs, and is currently used by the BatchingDownloader to
+ * resume downloads in case of interruptions. Non-persistent state is used when resuming downloads
+ * is not possible.
+ *
+ * In order to safely use a persistent state provider for resuming downloads, a sync stage must match
+ * the following criteria:
+ * - records are downloaded with sort=oldest
+ * - records must be downloaded into a persistent buffer, or applied to live storage
+ *
+ * @author grisha
+ */
+public interface RepositoryStateProvider {
+    String KEY_HIGH_WATER_MARK = "highWaterMark";
+    String KEY_OFFSET = "offset";
+    String KEY_OFFSET_SINCE = "offsetSince";
+    String KEY_OFFSET_ORDER = "offsetOrder";
+
+    boolean isPersistent();
+
+    @CheckResult
+    boolean commit();
+
+    RepositoryStateProvider clear(String key);
+
+    RepositoryStateProvider setString(String key, String value);
+    @Nullable String getString(String key);
+
+    RepositoryStateProvider setLong(String key, Long value);
+    @Nullable Long getLong(String key);
+
+    @CheckResult
+    boolean resetAndCommit();
+}
rename from mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server11Repository.java
rename to mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server15Repository.java
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server11Repository.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server15Repository.java
@@ -1,144 +1,122 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.repositories;
 
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.util.ArrayList;
 
 import org.mozilla.gecko.sync.InfoCollections;
 import org.mozilla.gecko.sync.InfoConfiguration;
-import org.mozilla.gecko.sync.Utils;
 import org.mozilla.gecko.sync.net.AuthHeaderProvider;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionCreationDelegate;
 
 import android.content.Context;
 import android.support.annotation.NonNull;
 import android.support.annotation.Nullable;
 
 /**
- * A Server11Repository implements fetching and storing against the Sync 1.1 API.
+ * A Server15Repository implements fetching and storing against the Sync 1.5 API.
  * It doesn't do crypto: that's the job of the middleware.
  *
  * @author rnewman
  */
-public class Server11Repository extends Repository {
-  protected String collection;
-  protected URI collectionURI;
-  protected final AuthHeaderProvider authHeaderProvider;
+public class Server15Repository extends Repository {
+  public final AuthHeaderProvider authHeaderProvider;
+
+  private final long syncDeadlineMillis;
+  /* package-local */ final URI collectionURI;
+
+  protected final String collection;
   protected final InfoCollections infoCollections;
 
+  protected RepositoryStateProvider stateProvider;
+
   private final InfoConfiguration infoConfiguration;
+  private final static String DEFAULT_SORT_ORDER = "oldest";
+  private final static long DEFAULT_BATCH_LIMIT = 100;
 
   /**
-   * Construct a new repository that fetches and stores against the Sync 1.1. API.
+   * Construct a new repository that fetches and stores against the Sync 1.5 API.
    *
    * @param collection name.
    * @param storageURL full URL to storage endpoint.
    * @param authHeaderProvider to use in requests; may be null.
    * @param infoCollections instance; must not be null.
    * @throws URISyntaxException
    */
-  public Server11Repository(@NonNull String collection, @NonNull String storageURL, AuthHeaderProvider authHeaderProvider, @NonNull InfoCollections infoCollections, @NonNull InfoConfiguration infoConfiguration) throws URISyntaxException {
+  public Server15Repository(
+          @NonNull String collection,
+          long syncDeadlineMillis,
+          @NonNull String storageURL,
+          AuthHeaderProvider authHeaderProvider,
+          @NonNull InfoCollections infoCollections,
+          @NonNull InfoConfiguration infoConfiguration,
+          @NonNull RepositoryStateProvider stateProvider) throws URISyntaxException {
     if (collection == null) {
       throw new IllegalArgumentException("collection must not be null");
     }
     if (storageURL == null) {
       throw new IllegalArgumentException("storageURL must not be null");
     }
     if (infoCollections == null) {
       throw new IllegalArgumentException("infoCollections must not be null");
     }
     this.collection = collection;
+    this.syncDeadlineMillis = syncDeadlineMillis;
     this.collectionURI = new URI(storageURL + (storageURL.endsWith("/") ? collection : "/" + collection));
     this.authHeaderProvider = authHeaderProvider;
     this.infoCollections = infoCollections;
     this.infoConfiguration = infoConfiguration;
+    this.stateProvider = stateProvider;
   }
 
   @Override
   public void createSession(RepositorySessionCreationDelegate delegate,
                             Context context) {
-    delegate.onSessionCreated(new Server11RepositorySession(this));
+    delegate.onSessionCreated(new Server15RepositorySession(this));
   }
 
   public URI collectionURI() {
     return this.collectionURI;
   }
 
-  public URI collectionURI(boolean full, long newer, long limit, String sort, String ids, String offset) throws URISyntaxException {
-    ArrayList<String> params = new ArrayList<String>();
-    if (full) {
-      params.add("full=1");
-    }
-    if (newer >= 0) {
-      // Translate local millisecond timestamps into server decimal seconds.
-      String newerString = Utils.millisecondsToDecimalSecondsString(newer);
-      params.add("newer=" + newerString);
-    }
-    if (limit > 0) {
-      params.add("limit=" + limit);
-    }
-    if (sort != null) {
-      params.add("sort=" + sort);       // We trust these values.
-    }
-    if (ids != null) {
-      params.add("ids=" + ids);         // We trust these values.
-    }
-    if (offset != null) {
-      // Offset comes straight out of HTTP headers and it is the responsibility of the caller to URI-escape it.
-      params.add("offset=" + offset);
-    }
-    if (params.size() == 0) {
-      return this.collectionURI;
-    }
-
-    StringBuilder out = new StringBuilder();
-    char indicator = '?';
-    for (String param : params) {
-      out.append(indicator);
-      indicator = '&';
-      out.append(param);
-    }
-    String uri = this.collectionURI + out.toString();
-    return new URI(uri);
-  }
-
-  public URI wboURI(String id) throws URISyntaxException {
-    return new URI(this.collectionURI + "/" + id);
-  }
-
-  // Override these.
-  @SuppressWarnings("static-method")
-  public long getDefaultBatchLimit() {
-    return -1;
-  }
-
-  @SuppressWarnings("static-method")
-  public String getDefaultSort() {
-    return null;
-  }
-
-  public long getDefaultTotalLimit() {
-    return -1;
-  }
-
-  public AuthHeaderProvider getAuthHeaderProvider() {
-    return authHeaderProvider;
-  }
-
-  public boolean updateNeeded(long lastSyncTimestamp) {
+  /* package-local */ boolean updateNeeded(long lastSyncTimestamp) {
     return infoCollections.updateNeeded(collection, lastSyncTimestamp);
   }
 
   @Nullable
-  public Long getCollectionLastModified() {
+  /* package-local */ Long getCollectionLastModified() {
     return infoCollections.getTimestamp(collection);
   }
 
   public InfoConfiguration getInfoConfiguration() {
     return infoConfiguration;
   }
+
+  public String getSortOrder() {
+    return DEFAULT_SORT_ORDER;
+  }
+
+  public Long getBatchLimit() {
+    return DEFAULT_BATCH_LIMIT;
+  }
+
+  public boolean getAllowMultipleBatches() {
+    return true;
+  }
+
+  public boolean getAllowHighWaterMark() {
+    return false;
+  }
+
+  /**
+   * A point in time by which this repository's session must complete fetch and store operations.
+   * Particularly pertinent for batching downloads performed by the session (should we fetch
+   * another batch?) and buffered repositories (do we have enough time to merge what we've downloaded?).
+   */
+  public long getSyncDeadline() {
+    return syncDeadlineMillis;
+  }
 }
rename from mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server11RepositorySession.java
rename to mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server15RepositorySession.java
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server11RepositorySession.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server15RepositorySession.java
@@ -1,84 +1,100 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.repositories;
 
+import android.net.Uri;
+
 import org.mozilla.gecko.background.common.log.Logger;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionFetchRecordsDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionGuidsSinceDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionWipeDelegate;
 import org.mozilla.gecko.sync.repositories.domain.Record;
 import org.mozilla.gecko.sync.repositories.downloaders.BatchingDownloader;
+import org.mozilla.gecko.sync.repositories.downloaders.BatchingDownloaderController;
 import org.mozilla.gecko.sync.repositories.uploaders.BatchingUploader;
 
-public class Server11RepositorySession extends RepositorySession {
-  public static final String LOG_TAG = "Server11Session";
+public class Server15RepositorySession extends RepositorySession {
+  public static final String LOG_TAG = "Server15RepositorySession";
 
-  Server11Repository serverRepository;
+  protected final Server15Repository serverRepository;
   private BatchingUploader uploader;
   private final BatchingDownloader downloader;
 
-  public Server11RepositorySession(Repository repository) {
+  public Server15RepositorySession(Repository repository) {
     super(repository);
-    serverRepository = (Server11Repository) repository;
-    this.downloader = new BatchingDownloader(serverRepository, this);
-  }
-
-  public Server11Repository getServerRepository() {
-    return serverRepository;
+    this.serverRepository = (Server15Repository) repository;
+    this.downloader = new BatchingDownloader(
+            this.serverRepository.authHeaderProvider,
+            Uri.parse(this.serverRepository.collectionURI().toString()),
+            this.serverRepository.getSyncDeadline(),
+            this.serverRepository.getAllowMultipleBatches(),
+            this.serverRepository.getAllowHighWaterMark(),
+            this.serverRepository.stateProvider,
+            this);
   }
 
   @Override
-  public void setStoreDelegate(RepositorySessionStoreDelegate delegate) {
-    this.delegate = delegate;
+  public void setStoreDelegate(RepositorySessionStoreDelegate storeDelegate) {
+    super.setStoreDelegate(storeDelegate);
 
     // Now that we have the delegate, we can initialize our uploader.
-    this.uploader = new BatchingUploader(this, storeWorkQueue, delegate);
+    this.uploader = new BatchingUploader(
+            this, storeWorkQueue, storeDelegate, Uri.parse(serverRepository.collectionURI.toString()),
+            serverRepository.getCollectionLastModified(), serverRepository.getInfoConfiguration(),
+            serverRepository.authHeaderProvider);
   }
 
   @Override
   public void guidsSince(long timestamp,
                          RepositorySessionGuidsSinceDelegate delegate) {
     // TODO Auto-generated method stub
 
   }
 
   @Override
-  public void fetchSince(long timestamp,
+  public void fetchSince(long sinceTimestamp,
                          RepositorySessionFetchRecordsDelegate delegate) {
-    this.downloader.fetchSince(timestamp, delegate);
+    BatchingDownloaderController.resumeFetchSinceIfPossible(
+            this.downloader,
+            this.serverRepository.stateProvider,
+            delegate,
+            sinceTimestamp,
+            serverRepository.getBatchLimit(),
+            serverRepository.getSortOrder()
+    );
   }
 
   @Override
   public void fetchAll(RepositorySessionFetchRecordsDelegate delegate) {
     this.fetchSince(-1, delegate);
   }
 
   @Override
   public void fetch(String[] guids,
                     RepositorySessionFetchRecordsDelegate delegate) {
     this.downloader.fetch(guids, delegate);
   }
 
   @Override
   public void wipe(RepositorySessionWipeDelegate delegate) {
     if (!isActive()) {
-      delegate.onWipeFailed(new InactiveSessionException(null));
+      delegate.onWipeFailed(new InactiveSessionException());
       return;
     }
     // TODO: implement wipe.
   }
 
   @Override
   public void store(Record record) throws NoStoreDelegateException {
-    if (delegate == null) {
+    if (storeDelegate == null) {
       throw new NoStoreDelegateException();
     }
 
     // If delegate was set, this shouldn't happen.
     if (uploader == null) {
       throw new IllegalStateException("Uploader haven't been initialized");
     }
 
@@ -92,13 +108,37 @@ public class Server11RepositorySession e
     // If delegate was set, this shouldn't happen.
     if (uploader == null) {
       throw new IllegalStateException("Uploader haven't been initialized");
     }
 
     uploader.noMoreRecordsToUpload();
   }
 
+  /**
+   * @return Repository's high-water-mark if it's available, its use is allowed by the repository,
+   * repository is set to fetch oldest-first, and it's greater than collection's last-synced timestamp.
+   * Otherwise, returns repository's last-synced timestamp.
+   */
+  @Override
+  public long getLastSyncTimestamp() {
+    if (!serverRepository.getAllowHighWaterMark() || !serverRepository.getSortOrder().equals("oldest")) {
+      return super.getLastSyncTimestamp();
+    }
+
+    final Long highWaterMark = serverRepository.stateProvider.getLong(
+            RepositoryStateProvider.KEY_HIGH_WATER_MARK);
+
+    // After a successful sync we expect that last-synced timestamp for a collection will be greater
+    // than the high-water-mark. High-water-mark is mostly useful in case of resuming a sync,
+    // and if we're resuming we did not bump our last-sync timestamps during the previous sync.
+    if (highWaterMark == null || super.getLastSyncTimestamp() > highWaterMark) {
+      return super.getLastSyncTimestamp();
+    }
+
+    return highWaterMark;
+  }
+
   @Override
   public boolean dataAvailable() {
     return serverRepository.updateNeeded(getLastSyncTimestamp());
   }
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/AndroidBrowserBookmarksRepositorySession.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/AndroidBrowserBookmarksRepositorySession.java
@@ -609,29 +609,29 @@ public class AndroidBrowserBookmarksRepo
   @Override
   public boolean insertFolder(BookmarkRecord record) {
     // A folder that is *not* deleted needs its androidID updated, so that
     // updateBookkeeping can re-parent, etc.
     Record toStore = prepareRecord(record);
     try {
       Uri recordURI = dbHelper.insert(toStore);
       if (recordURI == null) {
-        delegate.onRecordStoreFailed(new RuntimeException("Got null URI inserting folder with guid " + toStore.guid + "."), record.guid);
+        storeDelegate.onRecordStoreFailed(new RuntimeException("Got null URI inserting folder with guid " + toStore.guid + "."), record.guid);
         return false;
       }
       toStore.androidID = ContentUris.parseId(recordURI);
       Logger.debug(LOG_TAG, "Inserted folder with guid " + toStore.guid + " as androidID " + toStore.androidID);
 
       updateBookkeeping(toStore);
     } catch (Exception e) {
-      delegate.onRecordStoreFailed(e, record.guid);
+      storeDelegate.onRecordStoreFailed(e, record.guid);
       return false;
     }
     trackRecord(toStore);
-    delegate.onRecordStoreSucceeded(record.guid);
+    storeDelegate.onRecordStoreSucceeded(record.guid);
     return true;
   }
 
   /**
    * Implement method of BookmarksInsertionManager.BookmarkInserter.
    */
   @Override
   public void bulkInsertNonFolders(Collection<BookmarkRecord> records) {
@@ -644,36 +644,36 @@ public class AndroidBrowserBookmarksRepo
     }
 
     try {
       int stored = dataAccessor.bulkInsert(toStores);
       if (stored != toStores.size()) {
         // Something failed; most pessimistic action is to declare that all insertions failed.
         // TODO: perform the bulkInsert in a transaction and rollback unless all insertions succeed?
         for (Record failed : toStores) {
-          delegate.onRecordStoreFailed(new RuntimeException("Possibly failed to bulkInsert non-folder with guid " + failed.guid + "."), failed.guid);
+          storeDelegate.onRecordStoreFailed(new RuntimeException("Possibly failed to bulkInsert non-folder with guid " + failed.guid + "."), failed.guid);
         }
         return;
       }
     } catch (NullCursorException e) {
       for (Record failed : toStores) {
-        delegate.onRecordStoreFailed(e, failed.guid);
+        storeDelegate.onRecordStoreFailed(e, failed.guid);
       }
       return;
     }
 
     // Success For All!
     for (Record succeeded : toStores) {
       try {
         updateBookkeeping(succeeded);
       } catch (Exception e) {
         Logger.warn(LOG_TAG, "Got exception updating bookkeeping of non-folder with guid " + succeeded.guid + ".", e);
       }
       trackRecord(succeeded);
-      delegate.onRecordStoreSucceeded(succeeded.guid);
+      storeDelegate.onRecordStoreSucceeded(succeeded.guid);
     }
   }
 
   @Override
   public void finish(RepositorySessionFinishDelegate delegate) throws InactiveSessionException {
     // Allow these to be GCed.
     deletionManager = null;
     insertionManager = null;
@@ -897,56 +897,52 @@ public class AndroidBrowserBookmarksRepo
       Logger.debug(LOG_TAG, "Done applying deletions.");
     } catch (Exception e) {
       Logger.error(LOG_TAG, "Unable to apply deletions.", e);
     }
   }
 
   @SuppressWarnings("unchecked")
   private void finishUp() {
-    try {
-      flushQueues();
-      Logger.debug(LOG_TAG, "Have " + parentToChildArray.size() + " folders whose children might need repositioning.");
-      for (Entry<String, JSONArray> entry : parentToChildArray.entrySet()) {
-        String guid = entry.getKey();
-        JSONArray onServer = entry.getValue();
-        try {
-          final long folderID = getIDForGUID(guid);
-          final JSONArray inDB = new JSONArray();
-          final boolean clean = getChildrenArray(folderID, false, inDB);
-          final boolean sameArrays = Utils.sameArrays(onServer, inDB);
+    flushQueues();
+    Logger.debug(LOG_TAG, "Have " + parentToChildArray.size() + " folders whose children might need repositioning.");
+    for (Entry<String, JSONArray> entry : parentToChildArray.entrySet()) {
+      String guid = entry.getKey();
+      JSONArray onServer = entry.getValue();
+      try {
+        final long folderID = getIDForGUID(guid);
+        final JSONArray inDB = new JSONArray();
+        final boolean clean = getChildrenArray(folderID, false, inDB);
+        final boolean sameArrays = Utils.sameArrays(onServer, inDB);
 
-          // If the local children and the remote children are already
-          // the same, then we don't need to bump the modified time of the
-          // parent: we wouldn't upload a different record, so avoid the cycle.
-          if (!sameArrays) {
-            int added = 0;
-            for (Object o : inDB) {
-              if (!onServer.contains(o)) {
-                onServer.add(o);
-                added++;
-              }
+        // If the local children and the remote children are already
+        // the same, then we don't need to bump the modified time of the
+        // parent: we wouldn't upload a different record, so avoid the cycle.
+        if (!sameArrays) {
+          int added = 0;
+          for (Object o : inDB) {
+            if (!onServer.contains(o)) {
+              onServer.add(o);
+              added++;
             }
-            Logger.debug(LOG_TAG, "Added " + added + " items locally.");
-            Logger.debug(LOG_TAG, "Untracking and bumping " + guid + "(" + folderID + ")");
-            dataAccessor.bumpModified(folderID, now());
-            untrackGUID(guid);
           }
+          Logger.debug(LOG_TAG, "Added " + added + " items locally.");
+          Logger.debug(LOG_TAG, "Untracking and bumping " + guid + "(" + folderID + ")");
+          dataAccessor.bumpModified(folderID, now());
+          untrackGUID(guid);
+        }
 
-          // If the arrays are different, or they're the same but not flushed to disk,
-          // write them out now.
-          if (!sameArrays || !clean) {
-            dataAccessor.updatePositions(new ArrayList<String>(onServer));
-          }
-        } catch (Exception e) {
-          Logger.warn(LOG_TAG, "Error repositioning children for " + guid, e);
+        // If the arrays are different, or they're the same but not flushed to disk,
+        // write them out now.
+        if (!sameArrays || !clean) {
+          dataAccessor.updatePositions(new ArrayList<String>(onServer));
         }
+      } catch (Exception e) {
+        Logger.warn(LOG_TAG, "Error repositioning children for " + guid, e);
       }
-    } finally {
-      super.storeDone();
     }
   }
 
   /**
    * Hook into the deletion manager on wipe.
    */
   class BookmarkWipeRunnable extends WipeRunnable {
     public BookmarkWipeRunnable(RepositorySessionWipeDelegate delegate) {
@@ -972,17 +968,21 @@ public class AndroidBrowserBookmarksRepo
     return new BookmarkWipeRunnable(delegate);
   }
 
   @Override
   public void storeDone() {
     Runnable command = new Runnable() {
       @Override
       public void run() {
-        finishUp();
+        try {
+          finishUp();
+        } finally {
+          AndroidBrowserBookmarksRepositorySession.super.storeDone();
+        }
       }
     };
     storeWorkQueue.execute(command);
   }
 
   @Override
   protected String buildRecordString(Record record) {
     BookmarkRecord bmk = (BookmarkRecord) record;
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/AndroidBrowserHistoryDataAccessor.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/AndroidBrowserHistoryDataAccessor.java
@@ -12,16 +12,17 @@ import org.mozilla.gecko.background.comm
 import org.mozilla.gecko.db.BrowserContract;
 import org.mozilla.gecko.sync.repositories.NullCursorException;
 import org.mozilla.gecko.sync.repositories.domain.HistoryRecord;
 import org.mozilla.gecko.sync.repositories.domain.Record;
 
 import android.content.ContentValues;
 import android.content.Context;
 import android.net.Uri;
+import android.os.Bundle;
 
 public class AndroidBrowserHistoryDataAccessor extends
     AndroidBrowserRepositoryDataAccessor {
 
   public AndroidBrowserHistoryDataAccessor(Context context) {
     super(context);
   }
 
@@ -40,17 +41,17 @@ public class AndroidBrowserHistoryDataAc
     if (rec.visits != null) {
       JSONArray visits = rec.visits;
       long mostRecent = getLastVisited(visits);
 
       // Fennec stores history timestamps in milliseconds, and visit timestamps in microseconds.
       // The rest of Sync works in microseconds. This is the conversion point for records coming form Sync.
       cv.put(BrowserContract.History.DATE_LAST_VISITED, mostRecent / 1000);
       cv.put(BrowserContract.History.REMOTE_DATE_LAST_VISITED, mostRecent / 1000);
-      cv.put(BrowserContract.History.VISITS, Long.toString(visits.size()));
+      cv.put(BrowserContract.History.VISITS, visits.size());
     }
     return cv;
   }
 
   @Override
   protected String[] getAllColumns() {
     return BrowserContractHelpers.HistoryColumns;
   }
@@ -104,73 +105,48 @@ public class AndroidBrowserHistoryDataAc
    * then inserts all the visit information (also using <code>ContentProvider.bulkInsert</code>).
    *
    * @param records
    *          the records to insert.
    * @return
    *          the number of records actually inserted.
    * @throws NullCursorException
    */
-  public int bulkInsert(ArrayList<HistoryRecord> records) throws NullCursorException {
-    if (records.isEmpty()) {
-      Logger.debug(LOG_TAG, "No records to insert, returning.");
-    }
-
-    int size = records.size();
-    ContentValues[] cvs = new ContentValues[size];
-    int index = 0;
-    for (Record record : records) {
+  public boolean bulkInsert(ArrayList<HistoryRecord> records) throws NullCursorException {
+    final Bundle[] historyBundles = new Bundle[records.size()];
+    int i = 0;
+    for (HistoryRecord record : records) {
       if (record.guid == null) {
-        throw new IllegalArgumentException("Record with null GUID passed in to bulkInsert.");
+        throw new IllegalArgumentException("Record with null GUID passed into bulkInsert.");
       }
-      cvs[index] = getContentValues(record);
-      index += 1;
-    }
-
-    // First update the history records.
-    int inserted = context.getContentResolver().bulkInsert(getUri(), cvs);
-    if (inserted == size) {
-      Logger.debug(LOG_TAG, "Inserted " + inserted + " records, as expected.");
-    } else {
-      Logger.debug(LOG_TAG, "Inserted " +
-                   inserted + " records but expected " +
-                   size     + " records; continuing to update visits.");
+      final Bundle historyBundle = new Bundle();
+      historyBundle.putParcelable(BrowserContract.METHOD_PARAM_OBJECT, getContentValues(record));
+      historyBundle.putSerializable(
+              BrowserContract.History.VISITS,
+              VisitsHelper.getVisitsContentValues(record.guid, record.visits)
+      );
+      historyBundles[i] = historyBundle;
+      i++;
     }
 
-    final ContentValues remoteVisitAggregateValues = new ContentValues();
-    final Uri historyIncrementRemoteAggregateUri = getUri().buildUpon()
-            .appendQueryParameter(BrowserContract.PARAM_INCREMENT_REMOTE_AGGREGATES, "true")
-            .build();
-    for (Record record : records) {
-      HistoryRecord rec = (HistoryRecord) record;
-      if (rec.visits != null && rec.visits.size() != 0) {
-        int remoteVisitsInserted = context.getContentResolver().bulkInsert(
-                BrowserContract.Visits.CONTENT_URI,
-                VisitsHelper.getVisitsContentValues(rec.guid, rec.visits)
-        );
+    final Bundle data = new Bundle();
+    data.putSerializable(BrowserContract.METHOD_PARAM_DATA, historyBundles);
 
-        // If we just inserted any visits, update remote visit aggregate values.
-        // While inserting visits, we might not insert all of rec.visits - if we already have a local
-        // visit record with matching (guid,date), we will skip that visit.
-        // Remote visits aggregate value will be incremented by number of visits inserted.
-        // Note that we don't need to set REMOTE_DATE_LAST_VISITED, because it already gets set above.
-        if (remoteVisitsInserted > 0) {
-          // Note that REMOTE_VISITS must be set before calling cr.update(...) with a URI
-          // that has PARAM_INCREMENT_REMOTE_AGGREGATES=true.
-          remoteVisitAggregateValues.put(BrowserContract.History.REMOTE_VISITS, remoteVisitsInserted);
-          context.getContentResolver().update(
-                  historyIncrementRemoteAggregateUri,
-                  remoteVisitAggregateValues,
-                  BrowserContract.History.GUID + " = ?", new String[] {rec.guid}
-          );
-        }
-      }
+    // Let our ContentProvider handle insertion of everything.
+    final Bundle result = context.getContentResolver().call(
+            getUri(),
+            BrowserContract.METHOD_INSERT_HISTORY_WITH_VISITS_FROM_SYNC,
+            getUri().toString(),
+            data
+    );
+    if (result == null) {
+      throw new IllegalStateException("Unexpected null result while bulk inserting history");
     }
-
-    return inserted;
+    final Exception thrownException = (Exception) result.getSerializable(BrowserContract.METHOD_RESULT);
+    return thrownException == null;
   }
 
   /**
    * Helper method used to find largest <code>VisitsHelper.SYNC_DATE_KEY</code> value in a provided JSONArray.
    *
    * @param visits Array of objects which will be searched.
    * @return largest value of <code>VisitsHelper.SYNC_DATE_KEY</code>.
      */
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/AndroidBrowserHistoryRepositorySession.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/AndroidBrowserHistoryRepositorySession.java
@@ -23,17 +23,17 @@ import android.database.Cursor;
 import android.os.RemoteException;
 
 public class AndroidBrowserHistoryRepositorySession extends AndroidBrowserRepositorySession {
   public static final String LOG_TAG = "ABHistoryRepoSess";
 
   /**
    * The number of records to queue for insertion before writing to databases.
    */
-  public static final int INSERT_RECORD_THRESHOLD = 50;
+  public static final int INSERT_RECORD_THRESHOLD = 5000;
   public static final int RECENT_VISITS_LIMIT = 20;
 
   public AndroidBrowserHistoryRepositorySession(Repository repository, Context context) {
     super(repository);
     dbHelper = new AndroidBrowserHistoryDataAccessor(context);
   }
 
   @Override
@@ -157,52 +157,76 @@ public class AndroidBrowserHistoryReposi
     if (recordsBuffer.size() < 1) {
       Logger.debug(LOG_TAG, "No records to flush, returning.");
       return;
     }
 
     final ArrayList<HistoryRecord> outgoing = recordsBuffer;
     recordsBuffer = new ArrayList<HistoryRecord>();
     Logger.debug(LOG_TAG, "Flushing " + outgoing.size() + " records to database.");
-    // TODO: move bulkInsert to AndroidBrowserDataAccessor?
-    int inserted = ((AndroidBrowserHistoryDataAccessor) dbHelper).bulkInsert(outgoing);
-    if (inserted != outgoing.size()) {
-      // Something failed; most pessimistic action is to declare that all insertions failed.
-      // TODO: perform the bulkInsert in a transaction and rollback unless all insertions succeed?
+    boolean transactionSuccess = ((AndroidBrowserHistoryDataAccessor) dbHelper).bulkInsert(outgoing);
+    if (!transactionSuccess) {
       for (HistoryRecord failed : outgoing) {
-        delegate.onRecordStoreFailed(new RuntimeException("Failed to insert history item with guid " + failed.guid + "."), failed.guid);
+        storeDelegate.onRecordStoreFailed(new RuntimeException("Failed to insert history item with guid " + failed.guid + "."), failed.guid);
       }
       return;
     }
 
     // All good, everybody succeeded.
     for (HistoryRecord succeeded : outgoing) {
       try {
         // Does not use androidID -- just GUID -> String map.
         updateBookkeeping(succeeded);
       } catch (NoGuidForIdException | ParentNotFoundException e) {
         // Should not happen.
         throw new NullCursorException(e);
       } catch (NullCursorException e) {
         throw e;
       }
       trackRecord(succeeded);
-      delegate.onRecordStoreSucceeded(succeeded.guid); // At this point, we are really inserted.
+      storeDelegate.onRecordStoreSucceeded(succeeded.guid); // At this point, we are really inserted.
     }
   }
 
   @Override
   public void storeDone() {
+    storeDone(System.currentTimeMillis());
+  }
+
+  /**
+   * We need to flush our internal buffer of records in case of any interruptions of record flow
+   * from our "source". Downloader might be maintaining a "high-water-mark" based on the records
+   * it tried to store, so it's pertinent that all of the records that were queued for storage
+   * are eventually persisted.
+   */
+  @Override
+  public void storeIncomplete() {
     storeWorkQueue.execute(new Runnable() {
       @Override
       public void run() {
         synchronized (recordsBufferMonitor) {
           try {
             flushNewRecords();
           } catch (Exception e) {
             Logger.warn(LOG_TAG, "Error flushing records to database.", e);
           }
         }
-        storeDone(System.currentTimeMillis());
+      }
+    });
+  }
+
+  @Override
+  public void storeDone(final long end) {
+    storeWorkQueue.execute(new Runnable() {
+      @Override
+      public void run() {
+        synchronized (recordsBufferMonitor) {
+          try {
+            flushNewRecords();
+          } catch (Exception e) {
+            Logger.warn(LOG_TAG, "Error flushing records to database.", e);
+          }
+        }
+        AndroidBrowserHistoryRepositorySession.super.storeDone(end);
       }
     });
   }
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/AndroidBrowserRepositorySession.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/AndroidBrowserRepositorySession.java
@@ -204,17 +204,17 @@ public abstract class AndroidBrowserRepo
                               RepositorySessionGuidsSinceDelegate delegate) {
       this.timestamp = timestamp;
       this.delegate = delegate;
     }
 
     @Override
     public void run() {
       if (!isActive()) {
-        delegate.onGuidsSinceFailed(new InactiveSessionException(null));
+        delegate.onGuidsSinceFailed(new InactiveSessionException());
         return;
       }
 
       Cursor cur;
       try {
         cur = dbHelper.getGUIDsSince(timestamp);
       } catch (Exception e) {
         delegate.onGuidsSinceFailed(e);
@@ -275,20 +275,20 @@ public abstract class AndroidBrowserRepo
                 Logger.debug(LOG_TAG, "Skipping filtered record " + r.guid);
               }
             }
             cursor.moveToNext();
           }
           delegate.onFetchCompleted(end);
         } catch (NoGuidForIdException e) {
           Logger.warn(LOG_TAG, "No GUID for ID.", e);
-          delegate.onFetchFailed(e, null);
+          delegate.onFetchFailed(e);
         } catch (Exception e) {
           Logger.warn(LOG_TAG, "Exception in fetchFromCursor.", e);
-          delegate.onFetchFailed(e, null);
+          delegate.onFetchFailed(e);
           return;
         }
       } finally {
         Logger.trace(LOG_TAG, "Closing cursor after fetch.");
         cursor.close();
       }
     }
   }
@@ -306,31 +306,31 @@ public abstract class AndroidBrowserRepo
       this.guids  = guids;
       this.end    = end;
       this.filter = filter;
     }
 
     @Override
     public void run() {
       if (!isActive()) {
-        delegate.onFetchFailed(new InactiveSessionException(null), null);
+        delegate.onFetchFailed(new InactiveSessionException());
         return;
       }
 
       if (guids == null || guids.length < 1) {
         Logger.error(LOG_TAG, "No guids sent to fetch");
-        delegate.onFetchFailed(new InvalidRequestException(null), null);
+        delegate.onFetchFailed(new InvalidRequestException());
         return;
       }
 
       try {
         Cursor cursor = dbHelper.fetch(guids);
         this.fetchFromCursor(cursor, filter, end);
       } catch (NullCursorException e) {
-        delegate.onFetchFailed(e, null);
+        delegate.onFetchFailed(e);
       }
     }
   }
 
   @Override
   public void fetchSince(long timestamp,
                          RepositorySessionFetchRecordsDelegate delegate) {
     if (this.storeTracker == null) {
@@ -355,40 +355,40 @@ public abstract class AndroidBrowserRepo
       this.since  = since;
       this.end    = end;
       this.filter = filter;
     }
 
     @Override
     public void run() {
       if (!isActive()) {
-        delegate.onFetchFailed(new InactiveSessionException(null), null);
+        delegate.onFetchFailed(new InactiveSessionException());
         return;
       }
 
       try {
         Cursor cursor = dbHelper.fetchSince(since);
         this.fetchFromCursor(cursor, filter, end);
       } catch (NullCursorException e) {
-        delegate.onFetchFailed(e, null);
+        delegate.onFetchFailed(e);
         return;
       }
     }
   }
 
   @Override
   public void fetchAll(RepositorySessionFetchRecordsDelegate delegate) {
     this.fetchSince(0, delegate);
   }
 
   protected int storeCount = 0;
 
   @Override
   public void store(final Record record) throws NoStoreDelegateException {
-    if (delegate == null) {
+    if (storeDelegate == null) {
       throw new NoStoreDelegateException();
     }
     if (record == null) {
       Logger.error(LOG_TAG, "Record sent to store was null");
       throw new IllegalArgumentException("Null record passed to AndroidBrowserRepositorySession.store().");
     }
 
     storeCount += 1;
@@ -397,17 +397,17 @@ public abstract class AndroidBrowserRepo
     // Store Runnables *must* complete synchronously. It's OK, they
     // run on a background thread.
     Runnable command = new Runnable() {
 
       @Override
       public void run() {
         if (!isActive()) {
           Logger.warn(LOG_TAG, "AndroidBrowserRepositorySession is inactive. Store failing.");
-          delegate.onRecordStoreFailed(new InactiveSessionException(null), record.guid);
+          storeDelegate.onRecordStoreFailed(new InactiveSessionException(), record.guid);
           return;
         }
 
         // Check that the record is a valid type.
         // Fennec only supports bookmarks and folders. All other types of records,
         // including livemarks and queries, are simply ignored.
         // See Bug 708149. This might be resolved by Fennec changing its database
         // schema, or by Sync storing non-applied records in its own private database.
@@ -512,30 +512,30 @@ public abstract class AndroidBrowserRepo
                        (toStore.deleted ? " with deleted record " : " with record ") +
                        toStore.guid);
           Record replaced = replace(toStore, existingRecord);
 
           // Note that we don't track records here; deciding that is the job
           // of reconcileRecords.
           Logger.debug(LOG_TAG, "Calling delegate callback with guid " + replaced.guid +
                                 "(" + replaced.androidID + ")");
-          delegate.onRecordStoreSucceeded(replaced.guid);
+          storeDelegate.onRecordStoreSucceeded(replaced.guid);
           return;
 
         } catch (MultipleRecordsForGuidException e) {
           Logger.error(LOG_TAG, "Multiple records returned for given guid: " + record.guid);
-          delegate.onRecordStoreFailed(e, record.guid);
+          storeDelegate.onRecordStoreFailed(e, record.guid);
           return;
         } catch (NoGuidForIdException e) {
           Logger.error(LOG_TAG, "Store failed for " + record.guid, e);
-          delegate.onRecordStoreFailed(e, record.guid);
+          storeDelegate.onRecordStoreFailed(e, record.guid);
           return;
         } catch (Exception e) {
           Logger.error(LOG_TAG, "Store failed for " + record.guid, e);
-          delegate.onRecordStoreFailed(e, record.guid);
+          storeDelegate.onRecordStoreFailed(e, record.guid);
           return;
         }
       }
     };
     storeWorkQueue.execute(command);
   }
 
   /**
@@ -544,30 +544,30 @@ public abstract class AndroidBrowserRepo
    *
    * @param record the incoming record. This will be mostly blank, given that it's a deletion.
    * @param existingRecord the existing record. Use this to decide how to process the deletion.
    */
   protected void storeRecordDeletion(final Record record, final Record existingRecord) {
     // TODO: we ought to mark the record as deleted rather than purging it,
     // in order to support syncing to multiple destinations. Bug 722607.
     dbHelper.purgeGuid(record.guid);
-    delegate.onRecordStoreSucceeded(record.guid);
+    storeDelegate.onRecordStoreSucceeded(record.guid);
   }
 
   protected void insert(Record record) throws NoGuidForIdException, NullCursorException, ParentNotFoundException {
     Record toStore = prepareRecord(record);
     Uri recordURI = dbHelper.insert(toStore);
     if (recordURI == null) {
       throw new NullCursorException(new RuntimeException("Got null URI inserting record with guid " + record.guid));
     }
     toStore.androidID = ContentUris.parseId(recordURI);
 
     updateBookkeeping(toStore);
     trackRecord(toStore);
-    delegate.onRecordStoreSucceeded(toStore.guid);
+    storeDelegate.onRecordStoreSucceeded(toStore.guid);
 
     Logger.debug(LOG_TAG, "Inserted record with guid " + toStore.guid + " as androidID " + toStore.androidID);
   }
 
   protected Record replace(Record newRecord, Record existingRecord) throws NoGuidForIdException, NullCursorException, ParentNotFoundException {
     Record toStore = prepareRecord(newRecord);
 
     // newRecord should already have suitable androidID and guid.
@@ -772,17 +772,17 @@ public abstract class AndroidBrowserRepo
 
     public WipeRunnable(RepositorySessionWipeDelegate delegate) {
       this.delegate = delegate;
     }
 
     @Override
     public void run() {
       if (!isActive()) {
-        delegate.onWipeFailed(new InactiveSessionException(null));
+        delegate.onWipeFailed(new InactiveSessionException());
         return;
       }
       dbHelper.wipe();
       delegate.onWipeSucceeded();
     }
   }
 
   // For testing purposes.
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/FennecTabsRepository.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/FennecTabsRepository.java
@@ -163,17 +163,17 @@ public class FennecTabsRepository extend
               if (tabsRecord.lastModified >= timestamp ||
                   clientsDataDelegate.getLastModifiedTimestamp() >= timestamp) {
                 delegate.onFetchedRecord(tabsRecord);
               }
             } finally {
               cursor.close();
             }
           } catch (Exception e) {
-            delegate.onFetchFailed(e, null);
+            delegate.onFetchFailed(e);
             return;
           }
           delegate.onFetchCompleted(now());
         }
       };
 
       delegateQueue.execute(command);
     }
@@ -197,17 +197,17 @@ public class FennecTabsRepository extend
       fetchSince(0, delegate);
     }
 
     private static final String TABS_CLIENT_GUID_IS = BrowserContract.Tabs.CLIENT_GUID + " = ?";
     private static final String CLIENT_GUID_IS = BrowserContract.Clients.GUID + " = ?";
 
     @Override
     public void store(final Record record) throws NoStoreDelegateException {
-      if (delegate == null) {
+      if (storeDelegate == null) {
         Logger.warn(LOG_TAG, "No store delegate.");
         throw new NoStoreDelegateException();
       }
       if (record == null) {
         Logger.error(LOG_TAG, "Record sent to store was null");
         throw new IllegalArgumentException("Null record passed to FennecTabsRepositorySession.store().");
       }
       if (!(record instanceof TabsRecord)) {
@@ -216,36 +216,36 @@ public class FennecTabsRepository extend
       }
       final TabsRecord tabsRecord = (TabsRecord) record;
 
       Runnable command = new Runnable() {
         @Override
         public void run() {
           Logger.debug(LOG_TAG, "Storing tabs for client " + tabsRecord.guid);
           if (!isActive()) {
-            delegate.onRecordStoreFailed(new InactiveSessionException(null), record.guid);
+            storeDelegate.onRecordStoreFailed(new InactiveSessionException(), record.guid);
             return;
           }
           if (tabsRecord.guid == null) {
-            delegate.onRecordStoreFailed(new RuntimeException("Can't store record with null GUID."), record.guid);
+            storeDelegate.onRecordStoreFailed(new RuntimeException("Can't store record with null GUID."), record.guid);
             return;
           }
 
           try {
             // This is nice and easy: we *always* store.
             final String[] selectionArgs = new String[] { tabsRecord.guid };
             if (tabsRecord.deleted) {
               try {
                 Logger.debug(LOG_TAG, "Clearing entry for client " + tabsRecord.guid);
                 clientsProvider.delete(BrowserContractHelpers.CLIENTS_CONTENT_URI,
                                        CLIENT_GUID_IS,
                                        selectionArgs);
-                delegate.onRecordStoreSucceeded(record.guid);
+                storeDelegate.onRecordStoreSucceeded(record.guid);
               } catch (Exception e) {
-                delegate.onRecordStoreFailed(e, record.guid);
+                storeDelegate.onRecordStoreFailed(e, record.guid);
               }
               return;
             }
 
             // If it exists, update the client record; otherwise insert.
             final ContentValues clientsCV = tabsRecord.getClientsContentValues();
 
             final ClientRecord clientRecord = clientsDatabase.fetchClient(tabsRecord.guid);
@@ -266,20 +266,20 @@ public class FennecTabsRepository extend
             // Now insert tabs.
             final ContentValues[] tabsArray = tabsRecord.getTabsContentValues();
             Logger.debug(LOG_TAG, "Inserting " + tabsArray.length + " tabs for client " + tabsRecord.guid);
 
             tabsProvider.delete(BrowserContractHelpers.TABS_CONTENT_URI, TABS_CLIENT_GUID_IS, selectionArgs);
             final int inserted = tabsProvider.bulkInsert(BrowserContractHelpers.TABS_CONTENT_URI, tabsArray);
             Logger.trace(LOG_TAG, "Inserted: " + inserted);
 
-            delegate.onRecordStoreSucceeded(record.guid);
+            storeDelegate.onRecordStoreSucceeded(record.guid);
           } catch (Exception e) {
             Logger.warn(LOG_TAG, "Error storing tabs.", e);
-            delegate.onRecordStoreFailed(e, record.guid);
+            storeDelegate.onRecordStoreFailed(e, record.guid);
           }
         }
       };
 
       storeWorkQueue.execute(command);
     }
 
     @Override
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/FormHistoryRepositorySession.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/FormHistoryRepositorySession.java
@@ -122,17 +122,17 @@ public class FormHistoryRepositorySessio
   protected static final String[] GUID_COLUMNS = new String[] { FormHistory.GUID };
 
   @Override
   public void guidsSince(final long timestamp, final RepositorySessionGuidsSinceDelegate delegate) {
     Runnable command = new Runnable() {
       @Override
       public void run() {
         if (!isActive()) {
-          delegate.onGuidsSinceFailed(new InactiveSessionException(null));
+          delegate.onGuidsSinceFailed(new InactiveSessionException());
           return;
         }
 
         ArrayList<String> guids = new ArrayList<String>();
 
         final long sharedEnd = now();
         Cursor cur = null;
         try {
@@ -246,28 +246,28 @@ public class FormHistoryRepositorySessio
     }
 
     final RecordFilter filter = this.storeTracker.getFilter();
 
     Runnable command = new Runnable() {
       @Override
       public void run() {
         if (!isActive()) {
-          delegate.onFetchFailed(new InactiveSessionException(null), null);
+          delegate.onFetchFailed(new InactiveSessionException());
           return;
         }
 
         for (Callable<Cursor> cursorCallable : cursorCallables) {
           Cursor cursor = null;
           try {
             cursor = cursorCallable.call();
             fetchFromCursor(cursor, filter, delegate); // Closes cursor.
           } catch (Exception e) {
             Logger.warn(LOG_TAG, "Exception during fetchHelper", e);
-            delegate.onFetchFailed(e, null);
+            delegate.onFetchFailed(e);
             return;
           }
         }
 
         delegate.onFetchCompleted(end);
       }
     };
 
@@ -442,17 +442,17 @@ public class FormHistoryRepositorySessio
 
   protected void enqueueRegularRecord(Record record) {
     synchronized (recordsBufferMonitor) {
       if (recordsBuffer.size() >= INSERT_ITEM_THRESHOLD) {
         // Insert the existing contents, then enqueue.
         try {
           flushInsertQueue();
         } catch (Exception e) {
-          delegate.onRecordStoreFailed(e, record.guid);
+          storeDelegate.onRecordStoreFailed(e, record.guid);
           return;
         }
       }
       // Store the ContentValues, rather than the record.
       recordsBuffer.add(contentValuesForRegularRecord(record));
     }
   }
 
@@ -484,17 +484,17 @@ public class FormHistoryRepositorySessio
         Logger.debug(LOG_TAG, "Checking for residual form history items to insert.");
         try {
           synchronized (recordsBufferMonitor) {
             flushInsertQueue();
           }
           storeDone(now());
         } catch (Exception e) {
           // XXX TODO
-          delegate.onRecordStoreFailed(e, null);
+          storeDelegate.onRecordStoreFailed(e, null);
         }
       }
     };
     storeWorkQueue.execute(command);
   }
 
   /**
    * Called when a regular record with locally unknown GUID has been fetched
@@ -536,17 +536,17 @@ public class FormHistoryRepositorySessio
     int updated = formsProvider.update(FORM_HISTORY_CONTENT_URI, cv, GUID_IS, new String[] { existingRecord.guid });
     if (updated != 1) {
       Logger.warn(LOG_TAG, "Expected to update 1 record with guid " + existingRecord.guid + " but updated " + updated + " records.");
     }
   }
 
   @Override
   public void store(Record rawRecord) throws NoStoreDelegateException {
-    if (delegate == null) {
+    if (storeDelegate == null) {
       Logger.warn(LOG_TAG, "No store delegate.");
       throw new NoStoreDelegateException();
     }
     if (rawRecord == null) {
       Logger.error(LOG_TAG, "Record sent to store was null");
       throw new IllegalArgumentException("Null record passed to FormHistoryRepositorySession.store().");
     }
     if (!(rawRecord instanceof FormHistoryRecord)) {
@@ -555,17 +555,17 @@ public class FormHistoryRepositorySessio
     }
     final FormHistoryRecord record = (FormHistoryRecord) rawRecord;
 
     Runnable command = new Runnable() {
       @Override
       public void run() {
         if (!isActive()) {
           Logger.warn(LOG_TAG, "FormHistoryRepositorySession is inactive. Store failing.");
-          delegate.onRecordStoreFailed(new InactiveSessionException(null), record.guid);
+          storeDelegate.onRecordStoreFailed(new InactiveSessionException(), record.guid);
           return;
         }
 
         // TODO: lift these into the session.
         // Temporary: this matches prior syncing semantics, in which only
         // the relationship between the local and remote record is considered.
         // In the future we'll track these two timestamps and use them to
         // determine which records have changed, and thus process incoming
@@ -598,26 +598,26 @@ public class FormHistoryRepositorySessio
               return;
             }
 
             boolean locallyModified = existingRecord.lastModified > lastLocalRetrieval;
             if (!locallyModified) {
               Logger.trace(LOG_TAG, "Remote modified, local not. Deleting.");
               deleteExistingRecord(existingRecord);
               trackRecord(record);
-              delegate.onRecordStoreSucceeded(record.guid);
+              storeDelegate.onRecordStoreSucceeded(record.guid);
               return;
             }
 
             Logger.trace(LOG_TAG, "Both local and remote records have been modified.");
             if (record.lastModified > existingRecord.lastModified) {
               Logger.trace(LOG_TAG, "Remote is newer, and deleted. Purging local.");
               deleteExistingRecord(existingRecord);
               trackRecord(record);
-              delegate.onRecordStoreSucceeded(record.guid);
+              storeDelegate.onRecordStoreSucceeded(record.guid);
               return;
             }
 
             Logger.trace(LOG_TAG, "Remote is older, local is not deleted. Ignoring.");
             return;
           }
           // End deletion logic.
 
@@ -627,55 +627,55 @@ public class FormHistoryRepositorySessio
             existingRecord = findExistingRecordByPayload(record);
           }
 
           if (existingRecord == null) {
             // The record is new.
             Logger.trace(LOG_TAG, "No match. Inserting.");
             insertNewRegularRecord(record);
             trackRecord(record);
-            delegate.onRecordStoreSucceeded(record.guid);
+            storeDelegate.onRecordStoreSucceeded(record.guid);
             return;
           }
 
           // We found a local duplicate.
           Logger.trace(LOG_TAG, "Incoming record " + record.guid + " dupes to local record " + existingRecord.guid);
 
           if (!RepoUtils.stringsEqual(record.guid, existingRecord.guid)) {
             // We found a local record that does NOT have the same GUID -- keep the server's version.
             Logger.trace(LOG_TAG, "Remote guid different from local guid. Storing to keep remote guid.");
             replaceExistingRecordWithRegularRecord(record, existingRecord);
             trackRecord(record);
-            delegate.onRecordStoreSucceeded(record.guid);
+            storeDelegate.onRecordStoreSucceeded(record.guid);
             return;
           }
 
           // We found a local record that does have the same GUID -- check modification times.
           boolean locallyModified = existingRecord.lastModified > lastLocalRetrieval;
           if (!locallyModified) {
             Logger.trace(LOG_TAG, "Remote modified, local not. Storing.");
             replaceExistingRecordWithRegularRecord(record, existingRecord);
             trackRecord(record);
-            delegate.onRecordStoreSucceeded(record.guid);
+            storeDelegate.onRecordStoreSucceeded(record.guid);
             return;
           }
 
           Logger.trace(LOG_TAG, "Both local and remote records have been modified.");
           if (record.lastModified > existingRecord.lastModified) {
             Logger.trace(LOG_TAG, "Remote is newer, and not deleted. Storing.");
             replaceExistingRecordWithRegularRecord(record, existingRecord);
             trackRecord(record);
-            delegate.onRecordStoreSucceeded(record.guid);
+            storeDelegate.onRecordStoreSucceeded(record.guid);
             return;
           }
 
           Logger.trace(LOG_TAG, "Remote is older, local is not deleted. Ignoring.");
         } catch (Exception e) {
           Logger.error(LOG_TAG, "Store failed for " + record.guid, e);
-          delegate.onRecordStoreFailed(e, record.guid);
+          storeDelegate.onRecordStoreFailed(e, record.guid);
           return;
         }
       }
     };
 
     storeWorkQueue.execute(command);
   }
 
@@ -689,17 +689,17 @@ public class FormHistoryRepositorySessio
   }
 
   @Override
   public void wipe(final RepositorySessionWipeDelegate delegate) {
     Runnable command = new Runnable() {
       @Override
       public void run() {
         if (!isActive()) {
-          delegate.onWipeFailed(new InactiveSessionException(null));
+          delegate.onWipeFailed(new InactiveSessionException());
           return;
         }
 
         try {
           Logger.debug(LOG_TAG, "Wiping form history and deleted form history...");
           purgeDatabases(formsProvider);
           Logger.debug(LOG_TAG, "Wiping form history and deleted form history... DONE");
         } catch (Exception e) {
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/PasswordsRepositorySession.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/android/PasswordsRepositorySession.java
@@ -73,17 +73,17 @@ public class PasswordsRepositorySession 
 
   @Override
   public void guidsSince(final long timestamp, final RepositorySessionGuidsSinceDelegate delegate) {
     final Runnable guidsSinceRunnable = new Runnable() {
       @Override
       public void run() {
 
         if (!isActive()) {
-          delegate.onGuidsSinceFailed(new InactiveSessionException(null));
+          delegate.onGuidsSinceFailed(new InactiveSessionException());
           return;
         }
 
         // Checks succeeded, now get GUIDs.
         final List<String> guids = new ArrayList<String>();
         try {
           Logger.debug(LOG_TAG, "Fetching guidsSince from data table.");
           final Cursor data = passwordsHelper.safeQuery(passwordsProvider, ".getGUIDsSince", GUID_COLS, dateModifiedWhere(timestamp), null, null);
@@ -126,17 +126,17 @@ public class PasswordsRepositorySession 
 
   @Override
   public void fetchSince(final long timestamp, final RepositorySessionFetchRecordsDelegate delegate) {
     final RecordFilter filter = this.storeTracker.getFilter();
     final Runnable fetchSinceRunnable = new Runnable() {
       @Override
       public void run() {
         if (!isActive()) {
-          delegate.onFetchFailed(new InactiveSessionException(null), null);
+          delegate.onFetchFailed(new InactiveSessionException());
           return;
         }
 
         final long end = now();
         try {
           // Fetch from data table.
           Cursor data = passwordsHelper.safeQuery(passwordsProvider, ".fetchSince",
                                                   getAllColumns(),
@@ -160,17 +160,17 @@ public class PasswordsRepositorySession 
             delegate.onFetchCompleted(end);
           } catch (Exception e) {
             Logger.error(LOG_TAG, "Delegate fetch completed callback failed.", e);
             // Don't call failure callback.
             return;
           }
         } catch (Exception e) {
           Logger.error(LOG_TAG, "Exception in fetch.");
-          delegate.onFetchFailed(e, null);
+          delegate.onFetchFailed(e);
         }
       }
     };
 
     delegateQueue.execute(fetchSinceRunnable);
   }
 
   @Override
@@ -188,17 +188,17 @@ public class PasswordsRepositorySession 
     }
 
     // Checks succeeded, now fetch.
     final RecordFilter filter = this.storeTracker.getFilter();
     final Runnable fetchRunnable = new Runnable() {
       @Override
       public void run() {
         if (!isActive()) {
-          delegate.onFetchFailed(new InactiveSessionException(null), null);
+          delegate.onFetchFailed(new InactiveSessionException());
           return;
         }
 
         final long end = now();
         final String where = RepoUtils.computeSQLInClause(guids.length, "guid");
         Logger.trace(LOG_TAG, "Fetch guids where: " + where);
 
         try {
@@ -217,32 +217,32 @@ public class PasswordsRepositorySession 
           if (!fetchAndCloseCursorDeleted(deleted, true, filter, delegate)) {
             return;
           }
 
           delegate.onFetchCompleted(end);
 
         } catch (Exception e) {
           Logger.error(LOG_TAG, "Exception in fetch.");
-          delegate.onFetchFailed(e, null);
+          delegate.onFetchFailed(e);
         }
       }
     };
 
     delegateQueue.execute(fetchRunnable);
   }
 
   @Override
   public void fetchAll(RepositorySessionFetchRecordsDelegate delegate) {
     fetchSince(0, delegate);
   }
 
   @Override
   public void store(final Record record) throws NoStoreDelegateException {
-    if (delegate == null) {
+    if (storeDelegate == null) {
       Logger.error(LOG_TAG, "No store delegate.");
       throw new NoStoreDelegateException();
     }
     if (record == null) {
       Logger.error(LOG_TAG, "Record sent to store was null.");
       throw new IllegalArgumentException("Null record passed to PasswordsRepositorySession.store().");
     }
     if (!(record instanceof PasswordRecord)) {
@@ -252,32 +252,32 @@ public class PasswordsRepositorySession 
 
     final PasswordRecord remoteRecord = (PasswordRecord) record;
 
     final Runnable storeRunnable = new Runnable() {
       @Override
       public void run() {
         if (!isActive()) {
           Logger.warn(LOG_TAG, "RepositorySession is inactive. Store failing.");
-          delegate.onRecordStoreFailed(new InactiveSessionException(null), record.guid);
+          storeDelegate.onRecordStoreFailed(new InactiveSessionException(), record.guid);
           return;
         }
 
         final String guid = remoteRecord.guid;
         if (guid == null) {
-          delegate.onRecordStoreFailed(new RuntimeException("Can't store record with null GUID."), record.guid);
+          storeDelegate.onRecordStoreFailed(new RuntimeException("Can't store record with null GUID."), record.guid);
           return;
         }
 
         PasswordRecord existingRecord;
         try {
           existingRecord = retrieveByGUID(guid);
         } catch (NullCursorException | RemoteException e) {
           // Indicates a serious problem.
-          delegate.onRecordStoreFailed(e, record.guid);
+          storeDelegate.onRecordStoreFailed(e, record.guid);
           return;
         }
 
         long lastLocalRetrieval  = 0;      // lastSyncTimestamp?
         long lastRemoteRetrieval = 0;      // TODO: adjust for clock skew.
         boolean remotelyModified = remoteRecord.lastModified > lastRemoteRetrieval;
 
         // Check deleted state first.
@@ -319,48 +319,48 @@ public class PasswordsRepositorySession 
 
           return;
         }
         // End deletion logic.
 
         // Validate the incoming record.
         if (!remoteRecord.isValid()) {
             Logger.warn(LOG_TAG, "Incoming record is invalid. Reporting store failed.");
-            delegate.onRecordStoreFailed(new RuntimeException("Can't store invalid password record."), record.guid);
+            storeDelegate.onRecordStoreFailed(new RuntimeException("Can't store invalid password record."), record.guid);
             return;
         }
 
         // Now we're processing a non-deleted incoming record.
         if (existingRecord == null) {
           trace("Looking up match for record " + remoteRecord.guid);
           try {
             existingRecord = findExistingRecord(remoteRecord);
           } catch (RemoteException e) {
             Logger.error(LOG_TAG, "Remote exception in findExistingRecord.");
-            delegate.onRecordStoreFailed(e, record.guid);
+            storeDelegate.onRecordStoreFailed(e, record.guid);
           } catch (NullCursorException e) {
             Logger.error(LOG_TAG, "Null cursor in findExistingRecord.");
-            delegate.onRecordStoreFailed(e, record.guid);
+            storeDelegate.onRecordStoreFailed(e, record.guid);
           }
         }
 
         if (existingRecord == null) {
           // The record is new.
           trace("No match. Inserting.");
           Logger.debug(LOG_TAG, "Didn't find matching record. Inserting.");
           Record inserted = null;
           try {
             inserted = insert(remoteRecord);
           } catch (RemoteException e) {
             Logger.debug(LOG_TAG, "Record insert caused a RemoteException.");
-            delegate.onRecordStoreFailed(e, record.guid);
+            storeDelegate.onRecordStoreFailed(e, record.guid);
             return;
           }
           trackRecord(inserted);
-          delegate.onRecordStoreSucceeded(inserted.guid);
+          storeDelegate.onRecordStoreSucceeded(inserted.guid);
           return;
         }
 
         // We found a local dupe.
         trace("Incoming record " + remoteRecord.guid + " dupes to local record " + existingRecord.guid);
         Logger.debug(LOG_TAG, "remote " + remoteRecord.guid + " dupes to " + existingRecord.guid);
 
         if (existingRecord.deleted && existingRecord.lastModified > remoteRecord.lastModified) {
@@ -376,40 +376,40 @@ public class PasswordsRepositorySession 
 
         // TODO: pass in timestamps?
         Logger.debug(LOG_TAG, "Replacing " + existingRecord.guid + " with record " + toStore.guid);
         Record replaced = null;
         try {
           replaced = replace(existingRecord, toStore);
         } catch (RemoteException e) {
           Logger.debug(LOG_TAG, "Record replace caused a RemoteException.");
-          delegate.onRecordStoreFailed(e, record.guid);
+          storeDelegate.onRecordStoreFailed(e, record.guid);
           return;
         }
 
         // Note that we don't track records here; deciding that is the job
         // of reconcileRecords.
         Logger.debug(LOG_TAG, "Calling delegate callback with guid " + replaced.guid +
                               "(" + replaced.androidID + ")");
-        delegate.onRecordStoreSucceeded(record.guid);
+        storeDelegate.onRecordStoreSucceeded(record.guid);
         return;
       }
     };
     storeWorkQueue.execute(storeRunnable);
   }
 
   @Override
   public void wipe(final RepositorySessionWipeDelegate delegate) {
     Logger.info(LOG_TAG, "Wiping " + BrowserContractHelpers.PASSWORDS_CONTENT_URI + ", " + BrowserContractHelpers.DELETED_PASSWORDS_CONTENT_URI);
 
     Runnable wipeRunnable = new Runnable() {
       @Override
       public void run() {
         if (!isActive()) {
-          delegate.onWipeFailed(new InactiveSessionException(null));
+          delegate.onWipeFailed(new InactiveSessionException());
           return;
         }
 
         // Wipe both data and deleted.
         try {
           context.getContentResolver().delete(BrowserContractHelpers.PASSWORDS_CONTENT_URI, null, null);
           context.getContentResolver().delete(BrowserContractHelpers.DELETED_PASSWORDS_CONTENT_URI, null, null);
         } catch (Exception e) {
@@ -554,17 +554,17 @@ public class PasswordsRepositorySession 
             delegate.onFetchedRecord(r);
           } else {
             Logger.debug(LOG_TAG, "Skipping filtered record " + r.guid);
           }
         }
       }
     } catch (Exception e) {
       Logger.error(LOG_TAG, "Exception in fetch.");
-      delegate.onFetchFailed(e, null);
+      delegate.onFetchFailed(e);
       return false;
     } finally {
       cursor.close();
     }
 
     return true;
   }
 
@@ -639,28 +639,26 @@ public class PasswordsRepositorySession 
     return null;
   }
 
   private void storeRecordDeletion(Record record) {
     try {
       deleteGUID(record.guid);
     } catch (RemoteException e) {
       Logger.error(LOG_TAG, "RemoteException in password delete.");
-      delegate.onRecordStoreFailed(e, record.guid);
+      storeDelegate.onRecordStoreFailed(e, record.guid);
       return;
     }
-    delegate.onRecordStoreSucceeded(record.guid);
+    storeDelegate.onRecordStoreSucceeded(record.guid);
   }
 
   /**
    * Make a PasswordRecord from a Cursor.
    * @param cur
    *        Cursor from query.
-   * @param deleted
-   *        true if creating a deleted Record, false if otherwise.
    * @return
    *        PasswordRecord populated from Cursor.
    */
   private static PasswordRecord passwordRecordFromCursor(Cursor cur) {
     if (cur.isAfterLast()) {
       return null;
     }
     String guid = RepoUtils.getStringFromCursor(cur, BrowserContract.Passwords.GUID);
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/delegates/DeferredRepositorySessionFetchRecordsDelegate.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/delegates/DeferredRepositorySessionFetchRecordsDelegate.java
@@ -22,35 +22,45 @@ public class DeferredRepositorySessionFe
       @Override
       public void run() {
          inner.onFetchedRecord(record);
       }
     });
   }
 
   @Override
-  public void onFetchFailed(final Exception ex, final Record record) {
+  public void onFetchFailed(final Exception ex) {
     executor.execute(new Runnable() {
       @Override
       public void run() {
-        inner.onFetchFailed(ex, record);
+        inner.onFetchFailed(ex);
       }
     });
   }
 
   @Override
   public void onFetchCompleted(final long fetchEnd) {
     executor.execute(new Runnable() {
       @Override
       public void run() {
         inner.onFetchCompleted(fetchEnd);
       }
     });
   }
 
   @Override
+  public void onBatchCompleted() {
+    executor.execute(new Runnable() {
+      @Override
+      public void run() {
+        inner.onBatchCompleted();
+      }
+    });
+  }
+
+  @Override
   public RepositorySessionFetchRecordsDelegate deferredFetchDelegate(ExecutorService newExecutor) {
     if (newExecutor == executor) {
       return this;
     }
     throw new IllegalArgumentException("Can't re-defer this delegate.");
   }
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/delegates/DeferredRepositorySessionStoreDelegate.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/delegates/DeferredRepositorySessionStoreDelegate.java
@@ -49,9 +49,19 @@ public class DeferredRepositorySessionSt
   public void onStoreCompleted(final long storeEnd) {
     executor.execute(new Runnable() {
       @Override
       public void run() {
         inner.onStoreCompleted(storeEnd);
       }
     });
   }
+
+  @Override
+  public void onStoreFailed(final Exception e) {
+    executor.execute(new Runnable() {
+      @Override
+      public void run() {
+        inner.onStoreFailed(e);
+      }
+    });
+  }
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/delegates/RepositorySessionFetchRecordsDelegate.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/delegates/RepositorySessionFetchRecordsDelegate.java
@@ -4,24 +4,30 @@
 
 package org.mozilla.gecko.sync.repositories.delegates;
 
 import java.util.concurrent.ExecutorService;
 
 import org.mozilla.gecko.sync.repositories.domain.Record;
 
 public interface RepositorySessionFetchRecordsDelegate {
-  public void onFetchFailed(Exception ex, Record record);
-  public void onFetchedRecord(Record record);
+  void onFetchFailed(Exception ex);
+  void onFetchedRecord(Record record);
 
   /**
    * Called when all records in this fetch have been returned.
    *
    * @param fetchEnd
    *        A millisecond-resolution timestamp indicating the *remote* timestamp
    *        at the end of the range of records. Usually this is the timestamp at
    *        which the request was received.
    *        E.g., the (normalized) value of the X-Weave-Timestamp header.
    */
-  public void onFetchCompleted(final long fetchEnd);
+  void onFetchCompleted(final long fetchEnd);
 
-  public RepositorySessionFetchRecordsDelegate deferredFetchDelegate(ExecutorService executor);
+  /**
+   * Called when a number of records have been returned but more are still expected to come,
+   * possibly after a certain pause.
+   */
+  void onBatchCompleted();
+
+  RepositorySessionFetchRecordsDelegate deferredFetchDelegate(ExecutorService executor);
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/delegates/RepositorySessionStoreDelegate.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/delegates/RepositorySessionStoreDelegate.java
@@ -9,15 +9,16 @@ import java.util.concurrent.ExecutorServ
 /**
  * These methods *must* be invoked asynchronously. Use deferredStoreDelegate if you
  * need help doing this.
  *
  * @author rnewman
  *
  */
 public interface RepositorySessionStoreDelegate {
-  public void onRecordStoreFailed(Exception ex, String recordGuid);
+  void onRecordStoreFailed(Exception ex, String recordGuid);
 
   // Called with a GUID when store has succeeded.
-  public void onRecordStoreSucceeded(String guid);
-  public void onStoreCompleted(long storeEnd);
-  public RepositorySessionStoreDelegate deferredStoreDelegate(ExecutorService executor);
+  void onRecordStoreSucceeded(String guid);
+  void onStoreCompleted(long storeEnd);
+  void onStoreFailed(Exception e);
+  RepositorySessionStoreDelegate deferredStoreDelegate(ExecutorService executor);
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/downloaders/BatchingDownloader.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/downloaders/BatchingDownloader.java
@@ -1,74 +1,98 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.repositories.downloaders;
 
+import android.net.Uri;
+import android.os.SystemClock;
 import android.support.annotation.Nullable;
 import android.support.annotation.VisibleForTesting;
 
 import org.mozilla.gecko.background.common.log.Logger;
+import org.mozilla.gecko.sync.CollectionConcurrentModificationException;
 import org.mozilla.gecko.sync.CryptoRecord;
 import org.mozilla.gecko.sync.DelayedWorkTracker;
+import org.mozilla.gecko.sync.SyncDeadlineReachedException;
+import org.mozilla.gecko.sync.Utils;
+import org.mozilla.gecko.sync.net.AuthHeaderProvider;
 import org.mozilla.gecko.sync.net.SyncResponse;
 import org.mozilla.gecko.sync.net.SyncStorageCollectionRequest;
 import org.mozilla.gecko.sync.net.SyncStorageResponse;
-import org.mozilla.gecko.sync.repositories.Server11Repository;
-import org.mozilla.gecko.sync.repositories.Server11RepositorySession;
+import org.mozilla.gecko.sync.repositories.RepositorySession;
+import org.mozilla.gecko.sync.repositories.RepositoryStateProvider;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionFetchRecordsDelegate;
 
 import java.io.UnsupportedEncodingException;
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.net.URLEncoder;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 /**
- * Batching Downloader, which implements batching protocol as supported by Sync 1.5.
+ * Batching Downloader implements batching protocol as supported by Sync 1.5.
  *
  * Downloader's batching behaviour is configured via two parameters, obtained from the repository:
  * - Per-batch limit, which specified how many records may be fetched in an individual GET request.
- * - Total limit, which controls number of batch GET requests we will make.
- *
+ * - allowMultipleBatches, which determines if downloader is allowed to perform more than one fetch.
  *
  * Batching is implemented via specifying a 'limit' GET parameter, and looking for an 'offset' token
  * in the response. If offset token is present, this indicates that there are more records than what
- * we've received so far, and we perform an additional fetch. Batching stops when either we hit a total
- * limit, or offset token is no longer present (indicating that we're done).
+ * we've received so far, and we perform an additional fetch, if we're allowed to do so by our
+ * configuration. Batching stops when offset token is no longer present (indicating that we're done).
  *
- * For unlimited repositories (such as passwords), both of these value will be -1. Downloader will not
- * specify a limit parameter in this case, and the response will contain every record available and no
- * offset token, thus fully completing in one go.
+ * If we are not allowed to perform multiple batches, we consider batching to be successfully completed
+ * after fist fetch request succeeds. Similarly, a trivial case of collection having less records than
+ * the batch limit will also successfully complete in one fetch.
  *
- * In between batches, we maintain a Last-Modified timestamp, based off the value return in the header
+ * In between batches, we maintain a Last-Modified timestamp, based off the value returned in the header
  * of the first response. Every response will have a Last-Modified header, indicating when the collection
  * was modified last. We pass along this header in our subsequent requests in a X-If-Unmodified-Since
  * header. Server will ensure that our collection did not change while we are batching, if it did it will
- * fail our fetch with a 412 (Consequent Modification) error. Additionally, we perform the same checks
- * locally.
+ * fail our fetch with a 412 error. Additionally, we perform the same checks locally.
  */
 public class BatchingDownloader {
     public static final String LOG_TAG = "BatchingDownloader";
+    private static final String DEFAULT_SORT_ORDER = "index";
 
-    protected final Server11Repository repository;
-    private final Server11RepositorySession repositorySession;
+    private final RepositorySession repositorySession;
     private final DelayedWorkTracker workTracker = new DelayedWorkTracker();
+    private final Uri baseCollectionUri;
+    private final long fetchDeadline;
+    private final boolean allowMultipleBatches;
+    private final boolean keepTrackOfHighWaterMark;
+
+    private RepositoryStateProvider stateProvider;
+
+    /* package-local */ final AuthHeaderProvider authHeaderProvider;
+
     // Used to track outstanding requests, so that we can abort them as needed.
     @VisibleForTesting
     protected final Set<SyncStorageCollectionRequest> pending = Collections.synchronizedSet(new HashSet<SyncStorageCollectionRequest>());
     /* @GuardedBy("this") */ private String lastModified;
-    /* @GuardedBy("this") */ private long numRecords = 0;
 
-    public BatchingDownloader(final Server11Repository repository, final Server11RepositorySession repositorySession) {
-        this.repository = repository;
+    public BatchingDownloader(
+            AuthHeaderProvider authHeaderProvider,
+            Uri baseCollectionUri,
+            long fetchDeadline,
+            boolean allowMultipleBatches,
+            boolean keepTrackOfHighWaterMark,
+            RepositoryStateProvider stateProvider,
+            RepositorySession repositorySession) {
         this.repositorySession = repositorySession;
+        this.authHeaderProvider = authHeaderProvider;
+        this.baseCollectionUri = baseCollectionUri;
+        this.allowMultipleBatches = allowMultipleBatches;
+        this.keepTrackOfHighWaterMark = keepTrackOfHighWaterMark;
+        this.fetchDeadline = fetchDeadline;
+        this.stateProvider = stateProvider;
     }
 
     @VisibleForTesting
     protected static String flattenIDs(String[] guids) {
         // Consider using Utils.toDelimitedString if and when the signature changes
         // to Collection<String> guids.
         if (guids.length == 0) {
             return "";
@@ -89,185 +113,218 @@ public class BatchingDownloader {
     protected void fetchWithParameters(long newer,
                                     long batchLimit,
                                     boolean full,
                                     String sort,
                                     String ids,
                                     SyncStorageCollectionRequest request,
                                     RepositorySessionFetchRecordsDelegate fetchRecordsDelegate)
             throws URISyntaxException, UnsupportedEncodingException {
-        if (batchLimit > repository.getDefaultTotalLimit()) {
-            throw new IllegalArgumentException("Batch limit should not be greater than total limit");
-        }
-
         request.delegate = new BatchingDownloaderDelegate(this, fetchRecordsDelegate, request,
                 newer, batchLimit, full, sort, ids);
         this.pending.add(request);
         request.get();
     }
 
     @VisibleForTesting
-    @Nullable
-    protected String encodeParam(String param) throws UnsupportedEncodingException {
-        if (param != null) {
-            return URLEncoder.encode(param, "UTF-8");
-        }
-        return null;
-    }
-
-    @VisibleForTesting
     protected SyncStorageCollectionRequest makeSyncStorageCollectionRequest(long newer,
                                                   long batchLimit,
                                                   boolean full,
                                                   String sort,
                                                   String ids,
                                                   String offset)
             throws URISyntaxException, UnsupportedEncodingException {
-        URI collectionURI = repository.collectionURI(full, newer, batchLimit, sort, ids, encodeParam(offset));
+        final URI collectionURI = buildCollectionURI(baseCollectionUri, full, newer, batchLimit, sort, ids, offset);
         Logger.debug(LOG_TAG, collectionURI.toString());
 
         return new SyncStorageCollectionRequest(collectionURI);
     }
 
-    public void fetchSince(long timestamp, RepositorySessionFetchRecordsDelegate fetchRecordsDelegate) {
-        this.fetchSince(timestamp, null, fetchRecordsDelegate);
-    }
-
-    private void fetchSince(long timestamp, String offset,
-                           RepositorySessionFetchRecordsDelegate fetchRecordsDelegate) {
-        long batchLimit = repository.getDefaultBatchLimit();
-        String sort = repository.getDefaultSort();
-
+    public void fetchSince(RepositorySessionFetchRecordsDelegate fetchRecordsDelegate, long timestamp, long batchLimit, String sortOrder, String offset) {
         try {
             SyncStorageCollectionRequest request = makeSyncStorageCollectionRequest(timestamp,
-                    batchLimit, true, sort, null, offset);
-            this.fetchWithParameters(timestamp, batchLimit, true, sort, null, request, fetchRecordsDelegate);
+                    batchLimit, true, sortOrder, null, offset);
+            this.fetchWithParameters(timestamp, batchLimit, true, sortOrder, null, request, fetchRecordsDelegate);
         } catch (URISyntaxException | UnsupportedEncodingException e) {
-            fetchRecordsDelegate.onFetchFailed(e, null);
+            fetchRecordsDelegate.onFetchFailed(e);
         }
     }
 
     public void fetch(String[] guids, RepositorySessionFetchRecordsDelegate fetchRecordsDelegate) {
         String ids = flattenIDs(guids);
-        String index = "index";
 
         try {
             SyncStorageCollectionRequest request = makeSyncStorageCollectionRequest(
-                    -1, -1, true, index, ids, null);
-            this.fetchWithParameters(-1, -1, true, index, ids, request, fetchRecordsDelegate);
+                    -1, -1, true, DEFAULT_SORT_ORDER, ids, null);
+            this.fetchWithParameters(-1, -1, true, DEFAULT_SORT_ORDER, ids, request, fetchRecordsDelegate);
         } catch (URISyntaxException | UnsupportedEncodingException e) {
-            fetchRecordsDelegate.onFetchFailed(e, null);
+            fetchRecordsDelegate.onFetchFailed(e);
         }
     }
 
-    public Server11Repository getServerRepository() {
-        return this.repository;
-    }
-
     public void onFetchCompleted(SyncStorageResponse response,
                                  final RepositorySessionFetchRecordsDelegate fetchRecordsDelegate,
                                  final SyncStorageCollectionRequest request, long newer,
                                  long limit, boolean full, String sort, String ids) {
         removeRequestFromPending(request);
 
-        // When we process our first request, we get back a X-Last-Modified header indicating when collection was modified last.
-        // We pass it to the server with every subsequent request (if we need to make more) as the X-If-Unmodified-Since header,
-        // and server is supposed to ensure that this pre-condition is met, and fail our request with a 412 error code otherwise.
-        // So, if all of this happens, these checks should never fail.
-        // However, we also track this header in client side, and can defensively validate against it here as well.
+        // When we process our first request, we get back a X-Last-Modified header indicating when
+        // collection was modified last. We pass it to the server with every subsequent request
+        // (if we need to make more) as the X-If-Unmodified-Since header, and server is supposed to
+        // ensure that this pre-condition is met, and fail our request with a 412 error code otherwise.
+        // So, if all of this happens, these checks should never fail. However, we also track this
+        // header on the client side, and can defensively validate against it here as well.
+
+        // This value won't be null, since we check for this in the delegate.
         final String currentLastModifiedTimestamp = response.lastModified();
         Logger.debug(LOG_TAG, "Last modified timestamp " + currentLastModifiedTimestamp);
 
-        // Sanity check. We also did a null check in delegate before passing it into here.
-        if (currentLastModifiedTimestamp == null) {
-            this.abort(fetchRecordsDelegate, "Last modified timestamp is missing");
-            return;
-        }
-
         final boolean lastModifiedChanged;
         synchronized (this) {
             if (this.lastModified == null) {
                 // First time seeing last modified timestamp.
                 this.lastModified = currentLastModifiedTimestamp;
             }
             lastModifiedChanged = !this.lastModified.equals(currentLastModifiedTimestamp);
         }
 
+        // We expected server to fail our request with 412 in case of concurrent modifications, so
+        // this is unexpected. However, let's treat this case just as if we received a 412.
         if (lastModifiedChanged) {
-            this.abort(fetchRecordsDelegate, "Last modified timestamp has changed unexpectedly");
+            this.handleFetchFailed(
+                    fetchRecordsDelegate,
+                    new CollectionConcurrentModificationException()
+            );
             return;
         }
 
-        final boolean hasNotReachedLimit;
-        synchronized (this) {
-            this.numRecords += response.weaveRecords();
-            hasNotReachedLimit = this.numRecords < repository.getDefaultTotalLimit();
-        }
+        // If we can (or must) stop batching at this point, let the delegate know that we're all done!
+        final String offset = response.weaveOffset();
+        if (offset == null || !allowMultipleBatches) {
+            final long normalizedTimestamp = response.normalizedTimestampForHeader(SyncResponse.X_LAST_MODIFIED);
+            Logger.debug(LOG_TAG, "Fetch completed. Timestamp is " + normalizedTimestamp);
 
-        final String offset = response.weaveOffset();
-        final SyncStorageCollectionRequest newRequest;
-        try {
-            newRequest = makeSyncStorageCollectionRequest(newer,
-                    limit, full, sort, ids, offset);
-        } catch (final URISyntaxException | UnsupportedEncodingException e) {
+            // This isn't great, but shouldn't be too problematic - but do see notes below.
+            // Failing to reset a resume context after we're done with batching means that on next
+            // sync we'll erroneously try to resume downloading. If resume proceeds, we will fetch
+            // from an older timestamp, but offset by the amount of records we've fetched prior.
+            // Since we're diligent about setting a X-I-U-S header, any remote collection changes
+            // will be caught and we'll receive a 412.
+            if (!BatchingDownloaderController.resetResumeContextAndCommit(this.stateProvider)) {
+                Logger.warn(LOG_TAG, "Failed to reset resume context while completing a batch");
+            }
+
             this.workTracker.delayWorkItem(new Runnable() {
                 @Override
                 public void run() {
                     Logger.debug(LOG_TAG, "Delayed onFetchCompleted running.");
-                    fetchRecordsDelegate.onFetchFailed(e, null);
+                    fetchRecordsDelegate.onFetchCompleted(normalizedTimestamp);
                 }
             });
             return;
         }
 
-        if (offset != null && hasNotReachedLimit) {
-            try {
-                this.fetchWithParameters(newer, limit, full, sort, ids, newRequest, fetchRecordsDelegate);
-            } catch (final URISyntaxException | UnsupportedEncodingException e) {
-                this.workTracker.delayWorkItem(new Runnable() {
-                    @Override
-                    public void run() {
-                        Logger.debug(LOG_TAG, "Delayed onFetchCompleted running.");
-                        fetchRecordsDelegate.onFetchFailed(e, null);
-                    }
-                });
+        // This is unfortunate, but largely just means that in case we need to resume later on, it
+        // either won't be possible (and we'll fetch w/o resuming), or won't be as efficient (i.e.
+        // we'll download more records than necessary).
+        if (BatchingDownloaderController.isResumeContextSet(this.stateProvider)) {
+            if (!BatchingDownloaderController.updateResumeContextAndCommit(this.stateProvider, offset)) {
+                Logger.warn(LOG_TAG, "Failed to update resume context while processing a batch.");
+            }
+        } else {
+            if (!BatchingDownloaderController.setInitialResumeContextAndCommit(this.stateProvider, offset, newer, sort)) {
+                Logger.warn(LOG_TAG, "Failed to set initial resume context while processing a batch.");
             }
+        }
+
+        // We need to make another batching request!
+        // Let the delegate know that a batch fetch just completed before we proceed.
+        // This operation needs to run after every call to onFetchedRecord for this batch has been
+        // processed, hence the delayWorkItem call.
+        this.workTracker.delayWorkItem(new Runnable() {
+            @Override
+            public void run() {
+                Logger.debug(LOG_TAG, "Running onBatchCompleted.");
+                fetchRecordsDelegate.onBatchCompleted();
+            }
+        });
+
+        // Should we proceed, however? Do we have enough time?
+        if (!mayProceedWithBatching(fetchDeadline)) {
+            this.handleFetchFailed(fetchRecordsDelegate, new SyncDeadlineReachedException());
             return;
         }
 
-        final long normalizedTimestamp = response.normalizedTimestampForHeader(SyncResponse.X_LAST_MODIFIED);
-        Logger.debug(LOG_TAG, "Fetch completed. Timestamp is " + normalizedTimestamp);
+        // Create and execute new batch request.
+        try {
+            final SyncStorageCollectionRequest newRequest = makeSyncStorageCollectionRequest(newer,
+                    limit, full, sort, ids, offset);
+            this.fetchWithParameters(newer, limit, full, sort, ids, newRequest, fetchRecordsDelegate);
+        } catch (final URISyntaxException | UnsupportedEncodingException e) {
+            if (!this.stateProvider.commit()) {
+                Logger.warn(LOG_TAG, "Failed to commit repository state while handling request creation error");
+            }
+            this.workTracker.delayWorkItem(new Runnable() {
+                @Override
+                public void run() {
+                    Logger.debug(LOG_TAG, "Delayed onFetchCompleted running.");
+                    fetchRecordsDelegate.onFetchFailed(e);
+                }
+            });
+        }
+    }
+
+    private void handleFetchFailed(final RepositorySessionFetchRecordsDelegate fetchRecordsDelegate,
+                                  final Exception ex) {
+        handleFetchFailed(fetchRecordsDelegate, ex, null);
+    }
+
+    /* package-local */ void handleFetchFailed(final RepositorySessionFetchRecordsDelegate fetchRecordsDelegate,
+                              final Exception ex,
+                              @Nullable final SyncStorageCollectionRequest request) {
+        this.removeRequestFromPending(request);
+        this.abortRequests();
+
+        // Resume context is not discarded if we failed because of reaching our deadline. In this case,
+        // we keep it allowing us to resume our download exactly where we left off.
+        // Discard resume context for all other failures: 412 (concurrent modification), HTTP errors, ...
+        if (!(ex instanceof SyncDeadlineReachedException)) {
+            // Failing to reset context means that we will try to resume once we re-sync current stage.
+            // This won't affect X-I-U-S logic in case of 412 (it's set separately from resume context),
+            // and same notes apply after failing to reset context in onFetchCompleted (see above).
+            if (!BatchingDownloaderController.resetResumeContextAndCommit(stateProvider)) {
+                Logger.warn(LOG_TAG, "Failed to reset resume context while processing a non-deadline exception");
+            }
+        } else {
+            // Failing to commit the context here means that we didn't commit the latest high-water-mark,
+            // and won't be as efficient once we re-sync. That is, we might download more records than necessary.
+            if (!this.stateProvider.commit()) {
+                Logger.warn(LOG_TAG, "Failed to commit resume context while processing a deadline exception");
+            }
+        }
 
         this.workTracker.delayWorkItem(new Runnable() {
             @Override
             public void run() {
-                Logger.debug(LOG_TAG, "Delayed onFetchCompleted running.");
-                fetchRecordsDelegate.onFetchCompleted(normalizedTimestamp);
-            }
-        });
-    }
-
-    public void onFetchFailed(final Exception ex,
-                              final RepositorySessionFetchRecordsDelegate fetchRecordsDelegate,
-                              final SyncStorageCollectionRequest request) {
-        removeRequestFromPending(request);
-        this.workTracker.delayWorkItem(new Runnable() {
-            @Override
-            public void run() {
                 Logger.debug(LOG_TAG, "Running onFetchFailed.");
-                fetchRecordsDelegate.onFetchFailed(ex, null);
+                fetchRecordsDelegate.onFetchFailed(ex);
             }
         });
     }
 
     public void onFetchedRecord(CryptoRecord record,
                                 RepositorySessionFetchRecordsDelegate fetchRecordsDelegate) {
         this.workTracker.incrementOutstanding();
+
         try {
             fetchRecordsDelegate.onFetchedRecord(record);
+            // NB: changes to stateProvider are committed in either onFetchCompleted or handleFetchFailed.
+            if (this.keepTrackOfHighWaterMark) {
+                this.stateProvider.setLong(RepositoryStateProvider.KEY_HIGH_WATER_MARK, record.lastModified);
+            }
         } catch (Exception ex) {
             Logger.warn(LOG_TAG, "Got exception calling onFetchedRecord with WBO.", ex);
             throw new RuntimeException(ex);
         } finally {
             this.workTracker.decrementOutstanding();
         }
     }
 
@@ -289,22 +346,45 @@ public class BatchingDownloader {
         }
     }
 
     @Nullable
     protected synchronized String getLastModified() {
         return this.lastModified;
     }
 
-    private void abort(final RepositorySessionFetchRecordsDelegate delegate, final String msg) {
-        Logger.error(LOG_TAG, msg);
-        this.abortRequests();
-        this.workTracker.delayWorkItem(new Runnable() {
-            @Override
-            public void run() {
-                Logger.debug(LOG_TAG, "Delayed onFetchCompleted running.");
-                delegate.onFetchFailed(
-                        new IllegalStateException(msg),
-                        null);
-            }
-        });
+    private static boolean mayProceedWithBatching(long deadline) {
+        // For simplicity, allow batching to proceed if there's at least a minute left for the sync.
+        // This should be enough to fetch and process records in the batch.
+        final long timeLeft = deadline - SystemClock.elapsedRealtime();
+        return timeLeft > TimeUnit.MINUTES.toMillis(1);
+    }
+
+    @VisibleForTesting
+    public static URI buildCollectionURI(Uri baseCollectionUri, boolean full, long newer, long limit, String sort, String ids, String offset) throws URISyntaxException {
+        Uri.Builder uriBuilder = baseCollectionUri.buildUpon();
+
+        if (full) {
+            uriBuilder.appendQueryParameter("full", "1");
+        }
+
+        if (newer >= 0) {
+            // Translate local millisecond timestamps into server decimal seconds.
+            String newerString = Utils.millisecondsToDecimalSecondsString(newer);
+            uriBuilder.appendQueryParameter("newer", newerString);
+        }
+        if (limit > 0) {
+            uriBuilder.appendQueryParameter("limit", Long.toString(limit));
+        }
+        if (sort != null) {
+            uriBuilder.appendQueryParameter("sort", sort); // We trust these values.
+        }
+        if (ids != null) {
+            uriBuilder.appendQueryParameter("ids", ids); // We trust these values.
+        }
+        if (offset != null) {
+            // Offset comes straight out of HTTP headers and it is the responsibility of the caller to URI-escape it.
+            uriBuilder.appendQueryParameter("offset", offset);
+        }
+
+        return new URI(uriBuilder.build().toString());
     }
 }
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/downloaders/BatchingDownloaderController.java
@@ -0,0 +1,118 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.repositories.downloaders;
+
+import android.support.annotation.CheckResult;
+import android.util.Log;
+
+import org.mozilla.gecko.sync.repositories.RepositoryStateProvider;
+import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionFetchRecordsDelegate;
+
+/**
+ * Encapsulates logic for resuming batching downloads.
+ *
+ * It's possible to resume a batching download if we have an offset token and the context in which
+ * we obtained the offset token did not change. Namely, we ensure that `since` and `order` parameters
+ * remain the same if offset is being used. See Bug 1330839 for a discussion on this.
+ *
+ * @author grisha
+ */
+public class BatchingDownloaderController {
+    private final static String LOG_TAG = "BatchingDownloaderCtrl";
+
+    private BatchingDownloaderController() {}
+
+    private static class ResumeContext {
+        private final String offset;
+        private final Long since;
+        private final String order;
+
+        private ResumeContext(String offset, Long since, String order) {
+            this.offset = offset;
+            this.since = since;
+            this.order = order;
+        }
+    }
+
+    private static ResumeContext getResumeContext(RepositoryStateProvider stateProvider, Long since, String order) {
+        // Build a "default" context around passed-in values if no context is available.
+        if (!isResumeContextSet(stateProvider)) {
+            return new ResumeContext(null, since, order);
+        }
+
+        final String offset = stateProvider.getString(RepositoryStateProvider.KEY_OFFSET);
+        final Long offsetSince = stateProvider.getLong(RepositoryStateProvider.KEY_OFFSET_SINCE);
+        final String offsetOrder = stateProvider.getString(RepositoryStateProvider.KEY_OFFSET_ORDER);
+
+        // If context is still valid, we can use it!
+        if (order.equals(offsetOrder)) {
+            return new ResumeContext(offset, offsetSince, offsetOrder);
+        }
+
+        // Build a "default" context around passed-in values.
+        return new ResumeContext(null, since, order);
+    }
+
+    /**
+     * Resumes a fetch if there is an offset present, and offset's context matches provided values.
+     * Otherwise, performs a regular fetch.
+     */
+    public static void resumeFetchSinceIfPossible(
+            BatchingDownloader downloader,
+            RepositoryStateProvider stateProvider,
+            RepositorySessionFetchRecordsDelegate fetchRecordsDelegate,
+            long since, long limit, String order) {
+        ResumeContext resumeContext = getResumeContext(stateProvider, since, order);
+
+        downloader.fetchSince(
+                fetchRecordsDelegate,
+                resumeContext.since,
+                limit,
+                resumeContext.order,
+                resumeContext.offset
+        );
+    }
+
+    @CheckResult
+    /* package-local */ static boolean setInitialResumeContextAndCommit(RepositoryStateProvider stateProvider, String offset, long since, String order) {
+        if (isResumeContextSet(stateProvider)) {
+            throw new IllegalStateException("Not allowed to set resume context more than once. Use update instead.");
+        }
+
+        return stateProvider
+                .setString(RepositoryStateProvider.KEY_OFFSET, offset)
+                .setLong(RepositoryStateProvider.KEY_OFFSET_SINCE, since)
+                .setString(RepositoryStateProvider.KEY_OFFSET_ORDER, order)
+                .commit();
+    }
+
+    @CheckResult
+    /* package-local */ static boolean updateResumeContextAndCommit(RepositoryStateProvider stateProvider, String offset) {
+        if (!isResumeContextSet(stateProvider)) {
+            throw new IllegalStateException("Tried to update resume context before it was set.");
+        }
+
+        return stateProvider
+                .setString(RepositoryStateProvider.KEY_OFFSET, offset)
+                .commit();
+    }
+
+    @CheckResult
+    /* package-local */ static boolean resetResumeContextAndCommit(RepositoryStateProvider stateProvider) {
+        return stateProvider
+                .clear(RepositoryStateProvider.KEY_OFFSET)
+                .clear(RepositoryStateProvider.KEY_OFFSET_SINCE)
+                .clear(RepositoryStateProvider.KEY_OFFSET_ORDER)
+                .commit();
+    }
+
+    /*package-local */ static boolean isResumeContextSet(RepositoryStateProvider stateProvider) {
+        final String offset = stateProvider.getString(RepositoryStateProvider.KEY_OFFSET);
+        final Long offsetSince = stateProvider.getLong(RepositoryStateProvider.KEY_OFFSET_SINCE);
+        final String offsetOrder = stateProvider.getString(RepositoryStateProvider.KEY_OFFSET_ORDER);
+
+        return offset != null && offsetSince != null && offsetOrder != null;
+    }
+}
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/downloaders/BatchingDownloaderDelegate.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/downloaders/BatchingDownloaderDelegate.java
@@ -1,39 +1,41 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.repositories.downloaders;
 
 import org.mozilla.gecko.background.common.log.Logger;
+import org.mozilla.gecko.sync.CollectionConcurrentModificationException;
 import org.mozilla.gecko.sync.CryptoRecord;
 import org.mozilla.gecko.sync.HTTPFailureException;
 import org.mozilla.gecko.sync.crypto.KeyBundle;
 import org.mozilla.gecko.sync.net.AuthHeaderProvider;
 import org.mozilla.gecko.sync.net.SyncStorageCollectionRequest;
 import org.mozilla.gecko.sync.net.SyncStorageResponse;
 import org.mozilla.gecko.sync.net.WBOCollectionRequestDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionFetchRecordsDelegate;
 
+
 /**
  * Delegate that gets passed into fetch methods to handle server response from fetch.
  */
 public class BatchingDownloaderDelegate extends WBOCollectionRequestDelegate {
     public static final String LOG_TAG = "BatchingDownloaderDelegate";
 
-    private BatchingDownloader downloader;
-    private RepositorySessionFetchRecordsDelegate fetchRecordsDelegate;
-    public SyncStorageCollectionRequest request;
+    private final BatchingDownloader downloader;
+    private final RepositorySessionFetchRecordsDelegate fetchRecordsDelegate;
+    public final SyncStorageCollectionRequest request;
     // Used to pass back to BatchDownloader to start another fetch with these parameters if needed.
-    private long newer;
-    private long batchLimit;
-    private boolean full;
-    private String sort;
-    private String ids;
+    private final long newer;
+    private final long batchLimit;
+    private final boolean full;
+    private final String sort;
+    private final String ids;
 
     public BatchingDownloaderDelegate(final BatchingDownloader downloader,
                                       final RepositorySessionFetchRecordsDelegate fetchRecordsDelegate,
                                       final SyncStorageCollectionRequest request, long newer,
                                       long batchLimit, boolean full, String sort, String ids) {
         this.downloader = downloader;
         this.fetchRecordsDelegate = fetchRecordsDelegate;
         this.request = request;
@@ -41,47 +43,59 @@ public class BatchingDownloaderDelegate 
         this.batchLimit = batchLimit;
         this.full = full;
         this.sort = sort;
         this.ids = ids;
     }
 
     @Override
     public AuthHeaderProvider getAuthHeaderProvider() {
-        return this.downloader.getServerRepository().getAuthHeaderProvider();
+        return this.downloader.authHeaderProvider;
     }
 
     @Override
     public String ifUnmodifiedSince() {
         return this.downloader.getLastModified();
     }
 
     @Override
     public void handleRequestSuccess(SyncStorageResponse response) {
         Logger.debug(LOG_TAG, "Fetch done.");
-        if (response.lastModified() != null) {
-            this.downloader.onFetchCompleted(response, this.fetchRecordsDelegate, this.request,
-                    this.newer, this.batchLimit, this.full, this.sort, this.ids);
+
+        // Sanity check.
+        if (response.lastModified() == null) {
+            this.downloader.handleFetchFailed(
+                    this.fetchRecordsDelegate,
+                    new IllegalStateException("Missing last modified header from response"),
+                    this.request
+            );
             return;
         }
-        this.downloader.onFetchFailed(
-                new IllegalStateException("Missing last modified header from response"),
-                this.fetchRecordsDelegate,
-                this.request);
+
+        this.downloader.onFetchCompleted(response, this.fetchRecordsDelegate, this.request,
+                this.newer, this.batchLimit, this.full, this.sort, this.ids);
     }
 
     @Override
     public void handleRequestFailure(SyncStorageResponse response) {
-        this.handleRequestError(new HTTPFailureException(response));
+        Logger.warn(LOG_TAG, "Got a non-success response.");
+        // Handle concurrent modification errors separately.
+        final Exception ex;
+        if (response.getStatusCode() == 412) {
+            ex = new CollectionConcurrentModificationException();
+        } else {
+            ex = new HTTPFailureException(response);
+        }
+        this.handleRequestError(ex);
     }
 
     @Override
     public void handleRequestError(final Exception ex) {
         Logger.warn(LOG_TAG, "Got request error.", ex);
-        this.downloader.onFetchFailed(ex, this.fetchRecordsDelegate, this.request);
+        this.downloader.handleFetchFailed(this.fetchRecordsDelegate, ex, this.request);
     }
 
     @Override
     public void handleWBO(CryptoRecord record) {
         this.downloader.onFetchedRecord(record, this.fetchRecordsDelegate);
     }
 
     @Override
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/BatchingUploader.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/BatchingUploader.java
@@ -3,29 +3,27 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.repositories.uploaders;
 
 import android.net.Uri;
 import android.support.annotation.VisibleForTesting;
 
 import org.mozilla.gecko.background.common.log.Logger;
+import org.mozilla.gecko.sync.CollectionConcurrentModificationException;
 import org.mozilla.gecko.sync.InfoConfiguration;
-import org.mozilla.gecko.sync.Server11RecordPostFailedException;
-import org.mozilla.gecko.sync.net.SyncResponse;
-import org.mozilla.gecko.sync.net.SyncStorageResponse;
-import org.mozilla.gecko.sync.repositories.Server11RepositorySession;
+import org.mozilla.gecko.sync.Server15PreviousPostFailedException;
+import org.mozilla.gecko.sync.net.AuthHeaderProvider;
+import org.mozilla.gecko.sync.repositories.RepositorySession;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
 import org.mozilla.gecko.sync.repositories.domain.Record;
 
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Set;
 import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
 import java.util.concurrent.atomic.AtomicLong;
 
 /**
  * Uploader which implements batching introduced in Sync 1.5.
  *
  * Batch vs payload terminology:
  * - batch is comprised of a series of payloads, which are all committed at the same time.
  * -- identified via a "batch token", which is returned after first payload for the batch has been uploaded.
@@ -70,49 +68,72 @@ public class BatchingUploader {
     // Sanity check. RECORD_SEPARATOR and RECORD_START are assumed to be of the same length.
     static {
         if (RecordUploadRunnable.RECORD_SEPARATOR.length != RecordUploadRunnable.RECORDS_START.length) {
             throw new IllegalStateException("Separator and start tokens must be of the same length");
         }
     }
 
     // Accessed by the record consumer thread pool.
+    private final ExecutorService executor;
     // Will be re-created, so mark it as volatile.
     private volatile Payload payload;
 
     // Accessed by both the record consumer thread pool and the network worker thread(s).
     /* package-local */ final Uri collectionUri;
     /* package-local */ final RepositorySessionStoreDelegate sessionStoreDelegate;
     /* package-local */ @VisibleForTesting final PayloadDispatcher payloadDispatcher;
-    private final Server11RepositorySession repositorySession;
+    /* package-local */ final AuthHeaderProvider authHeaderProvider;
+    private final RepositorySession repositorySession;
     // Will be re-created, so mark it as volatile.
     private volatile UploaderMeta uploaderMeta;
 
     // Used to ensure we have thread-safe access to the following:
     // - byte and record counts in both Payload and BatchMeta objects
     // - buffers in the Payload object
     private final Object payloadLock = new Object();
 
-
-    public BatchingUploader(final Server11RepositorySession repositorySession, final Executor workQueue, final RepositorySessionStoreDelegate sessionStoreDelegate) {
+    public BatchingUploader(
+            final RepositorySession repositorySession, final ExecutorService workQueue,
+            final RepositorySessionStoreDelegate sessionStoreDelegate, final Uri baseCollectionUri,
+            final Long localCollectionLastModified, final InfoConfiguration infoConfiguration,
+            final AuthHeaderProvider authHeaderProvider) {
         this.repositorySession = repositorySession;
         this.sessionStoreDelegate = sessionStoreDelegate;
-        this.collectionUri = Uri.parse(repositorySession.getServerRepository().collectionURI().toString());
+        this.collectionUri = baseCollectionUri;
+        this.authHeaderProvider = authHeaderProvider;
 
-        InfoConfiguration config = repositorySession.getServerRepository().getInfoConfiguration();
-        this.uploaderMeta = new UploaderMeta(payloadLock, config.maxTotalBytes, config.maxTotalRecords);
-        this.payload = new Payload(payloadLock, config.maxPostBytes, config.maxPostRecords);
+        this.uploaderMeta = new UploaderMeta(
+                payloadLock, infoConfiguration.maxTotalBytes, infoConfiguration.maxTotalRecords);
+        this.payload = new Payload(
+                payloadLock, infoConfiguration.maxPostBytes, infoConfiguration.maxPostRecords);
 
-        this.payloadDispatcher = new PayloadDispatcher(
-                workQueue, this, repositorySession.getServerRepository().getCollectionLastModified());
+        this.payloadDispatcher = createPayloadDispatcher(workQueue, localCollectionLastModified);
+
+        this.executor = workQueue;
     }
 
     // Called concurrently from the threads running off of a record consumer thread pool.
     public void process(final Record record) {
         final String guid = record.guid;
+
+        // If store failed entirely, just bail out. We've already told our delegate that we failed.
+        if (payloadDispatcher.storeFailed) {
+            return;
+        }
+
+        // If a record or a payload failed, we won't let subsequent requests proceed.'
+        // This means that we may bail much earlier.
+        if (payloadDispatcher.recordUploadFailed) {
+            sessionStoreDelegate.deferredStoreDelegate(executor).onRecordStoreFailed(
+                    new Server15PreviousPostFailedException(), guid
+            );
+            return;
+        }
+
         final byte[] recordBytes = record.toJSONBytes();
         final long recordDeltaByteCount = recordBytes.length + PER_RECORD_OVERHEAD_BYTE_COUNT;
 
         Logger.debug(LOG_TAG, "Processing a record with guid: " + guid);
 
         // We can't upload individual records which exceed our payload byte limit.
         if ((recordDeltaByteCount + PER_PAYLOAD_OVERHEAD_BYTE_COUNT) > payload.maxBytes) {
             sessionStoreDelegate.onRecordStoreFailed(new RecordTooLargeToUpload(), guid);
@@ -192,20 +213,16 @@ public class BatchingUploader {
     // Will be called from a thread dispatched by PayloadDispatcher.
     // NB: Access to `uploaderMeta.isUnlimited` is guarded by the payloadLock.
     /* package-local */ void setUnlimitedMode(boolean isUnlimited) {
         // If we know for sure that we're not in a batching mode,
         // consider our batch to be of unlimited size.
         this.uploaderMeta.setIsUnlimited(isUnlimited);
     }
 
-    /* package-local */ Server11RepositorySession getRepositorySession() {
-        return repositorySession;
-    }
-
     private void flush(final boolean isCommit, final boolean isLastPayload) {
         final ArrayList<byte[]> outgoing;
         final ArrayList<String> outgoingGuids;
         final long byteCount;
 
         // Even though payload object itself is thread-safe, we want to ensure we get these altogether
         // as a "unit". Another approach would be to create a wrapper object for these values, but this works.
         synchronized (payloadLock) {
@@ -217,17 +234,25 @@ public class BatchingUploader {
 
         payloadDispatcher.queue(outgoing, outgoingGuids, byteCount, isCommit, isLastPayload);
 
         if (isCommit && !isLastPayload) {
             uploaderMeta = uploaderMeta.nextUploaderMeta();
         }
     }
 
-    /* package-local */ static class BatchingUploaderException extends Exception {
+    /**
+     * Allows tests to define their own PayloadDispatcher.
+     */
+    @VisibleForTesting
+    PayloadDispatcher createPayloadDispatcher(ExecutorService workQueue, Long localCollectionLastModified) {
+        return new PayloadDispatcher(workQueue, this, localCollectionLastModified);
+    }
+
+    public static class BatchingUploaderException extends Exception {
         private static final long serialVersionUID = 1L;
     }
     /* package-local */ static class LastModifiedDidNotChange extends BatchingUploaderException {
         private static final long serialVersionUID = 1L;
     }
     /* package-local */ static class LastModifiedChangedUnexpectedly extends BatchingUploaderException {
         private static final long serialVersionUID = 1L;
     }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/PayloadDispatcher.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/PayloadDispatcher.java
@@ -3,17 +3,18 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.repositories.uploaders;
 
 import android.support.annotation.Nullable;
 import android.support.annotation.VisibleForTesting;
 
 import org.mozilla.gecko.background.common.log.Logger;
-import org.mozilla.gecko.sync.Server11RecordPostFailedException;
+import org.mozilla.gecko.sync.CollectionConcurrentModificationException;
+import org.mozilla.gecko.sync.Server15RecordPostFailedException;
 import org.mozilla.gecko.sync.net.SyncResponse;
 import org.mozilla.gecko.sync.net.SyncStorageResponse;
 
 import java.util.ArrayList;
 import java.util.concurrent.Executor;
 import java.util.concurrent.atomic.AtomicLong;
 
 /**
@@ -25,22 +26,25 @@ import java.util.concurrent.atomic.Atomi
 class PayloadDispatcher {
     private static final String LOG_TAG = "PayloadDispatcher";
 
     // All payload runnables share the same whiteboard.
     // It's accessed directly by the runnables; tests also make use of this direct access.
     volatile BatchMeta batchWhiteboard;
     private final AtomicLong uploadTimestamp = new AtomicLong(0);
 
-    // Accessed from different threads sequentially running on the 'executor'.
-    private volatile boolean recordUploadFailed = false;
-
     private final Executor executor;
     private final BatchingUploader uploader;
 
+    // For both of these flags:
+    // Written from sequentially running thread(s) on the SingleThreadExecutor `executor`.
+    // Read by many threads running concurrently on the records consumer thread pool.
+    volatile boolean recordUploadFailed = false;
+    volatile boolean storeFailed = false;
+
     PayloadDispatcher(Executor executor, BatchingUploader uploader, @Nullable Long initialLastModified) {
         // Initially we don't know if we're in a batching mode.
         this.batchWhiteboard = new BatchMeta(initialLastModified, null);
         this.uploader = uploader;
         this.executor = executor;
     }
 
     void queue(
@@ -48,31 +52,17 @@ class PayloadDispatcher {
             final ArrayList<String> outgoingGuids,
             final long byteCount,
             final boolean isCommit, final boolean isLastPayload) {
 
         // Note that `executor` is expected to be a SingleThreadExecutor.
         executor.execute(new BatchContextRunnable(isCommit) {
             @Override
             public void run() {
-                new RecordUploadRunnable(
-                        new BatchingAtomicUploaderMayUploadProvider(),
-                        uploader.collectionUri,
-                        batchWhiteboard.getToken(),
-                        new PayloadUploadDelegate(
-                                uploader.getRepositorySession().getServerRepository().getAuthHeaderProvider(),
-                                PayloadDispatcher.this,
-                                outgoingGuids,
-                                isCommit,
-                                isLastPayload
-                        ),
-                        outgoing,
-                        byteCount,
-                        isCommit
-                ).run();
+                createRecordUploadRunnable(outgoing, outgoingGuids, byteCount, isCommit, isLastPayload).run();
             }
         });
     }
 
     void setInBatchingMode(boolean inBatchingMode) {
         batchWhiteboard.setInBatchingMode(inBatchingMode);
         uploader.setUnlimitedMode(!inBatchingMode);
     }
@@ -128,25 +118,31 @@ class PayloadDispatcher {
                 } else {
                     uploader.finished(uploadTimestamp);
                 }
             }
         });
     }
 
     void recordFailed(final String recordGuid) {
-        recordFailed(new Server11RecordPostFailedException(), recordGuid);
+        recordFailed(new Server15RecordPostFailedException(), recordGuid);
     }
 
     void recordFailed(final Exception e, final String recordGuid) {
         Logger.debug(LOG_TAG, "Record store failed for guid " + recordGuid + " with exception: " + e.toString());
         recordUploadFailed = true;
         uploader.sessionStoreDelegate.onRecordStoreFailed(e, recordGuid);
     }
 
+    void concurrentModificationDetected() {
+        recordUploadFailed = true;
+        storeFailed = true;
+        uploader.sessionStoreDelegate.onStoreFailed(new CollectionConcurrentModificationException());
+    }
+
     void prepareForNextBatch() {
         batchWhiteboard = batchWhiteboard.nextBatchMeta();
     }
 
     private static void bumpTimestampTo(final AtomicLong current, long newValue) {
         while (true) {
             long existing = current.get();
             if (existing > newValue) {
@@ -154,16 +150,41 @@ class PayloadDispatcher {
             }
             if (current.compareAndSet(existing, newValue)) {
                 return;
             }
         }
     }
 
     /**
+     * Allows tests to define their own RecordUploadRunnable.
+     */
+    @VisibleForTesting
+    Runnable createRecordUploadRunnable(final ArrayList<byte[]> outgoing,
+                                                  final ArrayList<String> outgoingGuids,
+                                                  final long byteCount,
+                                                  final boolean isCommit, final boolean isLastPayload) {
+        return new RecordUploadRunnable(
+                new BatchingAtomicUploaderMayUploadProvider(),
+                uploader.collectionUri,
+                batchWhiteboard.getToken(),
+                new PayloadUploadDelegate(
+                        uploader.authHeaderProvider,
+                        PayloadDispatcher.this,
+                        outgoingGuids,
+                        isCommit,
+                        isLastPayload
+                ),
+                outgoing,
+                byteCount,
+                isCommit
+        );
+    }
+
+    /**
      * Allows tests to easily peek into the flow of upload tasks.
      */
     @VisibleForTesting
     abstract static class BatchContextRunnable implements Runnable {
         boolean isCommit;
 
         BatchContextRunnable(boolean isCommit) {
             this.isCommit = isCommit;
@@ -171,14 +192,15 @@ class PayloadDispatcher {
     }
 
     /**
      * Allows tests to tell apart non-payload runnables going through the executor.
      */
     @VisibleForTesting
     abstract static class NonPayloadContextRunnable implements Runnable {}
 
+    // Instances of this class must be accessed from threads running on the `executor`.
     private class BatchingAtomicUploaderMayUploadProvider implements MayUploadProvider {
         public boolean mayUpload() {
             return !recordUploadFailed;
         }
     }
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/PayloadUploadDelegate.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/PayloadUploadDelegate.java
@@ -179,17 +179,21 @@ class PayloadUploadDelegate implements S
 
         if (isCommit && !isLastPayload) {
             dispatcher.prepareForNextBatch();
         }
     }
 
     @Override
     public void handleRequestFailure(final SyncStorageResponse response) {
-        this.handleRequestError(new HTTPFailureException(response));
+        if (response.getStatusCode() == 412) {
+            dispatcher.concurrentModificationDetected();
+        } else {
+            this.handleRequestError(new HTTPFailureException(response));
+        }
     }
 
     @Override
     public void handleRequestError(Exception e) {
         for (String guid : postedRecordGuids) {
             dispatcher.recordFailed(e, guid);
         }
         // GC
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/RecordUploadRunnable.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/RecordUploadRunnable.java
@@ -4,18 +4,18 @@
 
 package org.mozilla.gecko.sync.repositories.uploaders;
 
 import android.net.Uri;
 import android.support.annotation.Nullable;
 import android.support.annotation.VisibleForTesting;
 
 import org.mozilla.gecko.background.common.log.Logger;
-import org.mozilla.gecko.sync.Server11PreviousPostFailedException;
 import org.mozilla.gecko.sync.Utils;
+import org.mozilla.gecko.sync.Server15PreviousPostFailedException;
 import org.mozilla.gecko.sync.net.SyncStorageRequest;
 import org.mozilla.gecko.sync.net.SyncStorageRequestDelegate;
 
 import java.io.IOException;
 import java.io.OutputStream;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
@@ -131,17 +131,17 @@ public class RecordUploadRunnable implem
             return true;
         }
     }
 
     @Override
     public void run() {
         if (!mayUploadProvider.mayUpload()) {
             Logger.info(LOG_TAG, "Told not to proceed by the uploader. Cancelling upload, failing records.");
-            uploadDelegate.handleRequestError(new Server11PreviousPostFailedException());
+            uploadDelegate.handleRequestError(new Server15PreviousPostFailedException());
             return;
         }
 
         Logger.trace(LOG_TAG, "Running upload task. Outgoing records: " + outgoing.size());
 
         // We don't want the task queue to proceed until this request completes.
         // Fortunately, BaseResource is currently synchronous.
         // If that ever changes, you'll need to block here.
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/AndroidBrowserBookmarksServerSyncStage.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/AndroidBrowserBookmarksServerSyncStage.java
@@ -1,73 +1,95 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.stage;
 
 import java.net.URISyntaxException;
 
-import org.mozilla.gecko.sync.JSONRecordFetcher;
 import org.mozilla.gecko.sync.MetaGlobalException;
-import org.mozilla.gecko.sync.net.AuthHeaderProvider;
+import org.mozilla.gecko.sync.middleware.BufferingMiddlewareRepository;
+import org.mozilla.gecko.sync.middleware.storage.MemoryBufferStorage;
+import org.mozilla.gecko.sync.repositories.ConfigurableServer15Repository;
+import org.mozilla.gecko.sync.repositories.NonPersistentRepositoryStateProvider;
 import org.mozilla.gecko.sync.repositories.RecordFactory;
 import org.mozilla.gecko.sync.repositories.Repository;
 import org.mozilla.gecko.sync.repositories.android.AndroidBrowserBookmarksRepository;
 import org.mozilla.gecko.sync.repositories.domain.BookmarkRecordFactory;
 import org.mozilla.gecko.sync.repositories.domain.VersionConstants;
 
 public class AndroidBrowserBookmarksServerSyncStage extends ServerSyncStage {
   protected static final String LOG_TAG = "BookmarksStage";
 
   // Eventually this kind of sync stage will be data-driven,
   // and all this hard-coding can go away.
-  private static final String BOOKMARKS_SORT          = "index";
-  // Sanity limit. Batch and total limit are the same for now, and will be adjusted
-  // once buffer and high water mark are in place. See Bug 730142.
+  private static final String BOOKMARKS_SORT = "oldest";
   private static final long BOOKMARKS_BATCH_LIMIT = 5000;
-  private static final long BOOKMARKS_TOTAL_LIMIT = 5000;
 
   @Override
   protected String getCollection() {
     return "bookmarks";
   }
 
   @Override
   protected String getEngineName() {
     return "bookmarks";
   }
 
   @Override
   public Integer getStorageVersion() {
     return VersionConstants.BOOKMARKS_ENGINE_VERSION;
   }
 
+  /**
+   * We're downloading records into a non-persistent buffer for safety, so we can't use a H.W.M.
+   * Once this stage is using a persistent buffer, this should change. See Bug 1318515.
+   *
+   * @return HighWaterMark.Disabled
+   */
+  @Override
+  protected HighWaterMark getAllowedToUseHighWaterMark() {
+    return HighWaterMark.Disabled;
+  }
+
+  /**
+   * Full batching is allowed, because we want all of the records.
+   *
+   * @return MultipleBatches.Enabled
+   */
+  @Override
+  protected MultipleBatches getAllowedMultipleBatches() {
+    return MultipleBatches.Enabled;
+  }
+
   @Override
   protected Repository getRemoteRepository() throws URISyntaxException {
-    // If this is a first sync, we need to check server counts to make sure that we aren't
-    // going to screw up. SafeConstrainedServer11Repository does this. See Bug 814331.
-    AuthHeaderProvider authHeaderProvider = session.getAuthHeaderProvider();
-    final JSONRecordFetcher countsFetcher = new JSONRecordFetcher(session.config.infoCollectionCountsURL(), authHeaderProvider);
-    String collection = getCollection();
-    return new SafeConstrainedServer11Repository(
-        collection,
-        session.config.storageURL(),
-        session.getAuthHeaderProvider(),
-        session.config.infoCollections,
-        session.config.infoConfiguration,
-        BOOKMARKS_BATCH_LIMIT,
-        BOOKMARKS_TOTAL_LIMIT,
-        BOOKMARKS_SORT,
-        countsFetcher);
+    return new ConfigurableServer15Repository(
+            getCollection(),
+            session.getSyncDeadline(),
+            session.config.storageURL(),
+            session.getAuthHeaderProvider(),
+            session.config.infoCollections,
+            session.config.infoConfiguration,
+            BOOKMARKS_BATCH_LIMIT,
+            BOOKMARKS_SORT,
+            getAllowedMultipleBatches(),
+            getAllowedToUseHighWaterMark(),
+            getRepositoryStateProvider()
+    );
   }
 
   @Override
   protected Repository getLocalRepository() {
-    return new AndroidBrowserBookmarksRepository();
+    return new BufferingMiddlewareRepository(
+            session.getSyncDeadline(),
+            new MemoryBufferStorage(),
+            new AndroidBrowserBookmarksRepository()
+    );
   }
 
   @Override
   protected RecordFactory getRecordFactory() {
     return new BookmarkRecordFactory();
   }
 
   @Override
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/AndroidBrowserHistoryServerSyncStage.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/AndroidBrowserHistoryServerSyncStage.java
@@ -2,33 +2,32 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.stage;
 
 import java.net.URISyntaxException;
 
 import org.mozilla.gecko.sync.MetaGlobalException;
-import org.mozilla.gecko.sync.repositories.ConstrainedServer11Repository;
+import org.mozilla.gecko.sync.repositories.ConfigurableServer15Repository;
+import org.mozilla.gecko.sync.repositories.PersistentRepositoryStateProvider;
 import org.mozilla.gecko.sync.repositories.RecordFactory;
 import org.mozilla.gecko.sync.repositories.Repository;
+import org.mozilla.gecko.sync.repositories.RepositoryStateProvider;
 import org.mozilla.gecko.sync.repositories.android.AndroidBrowserHistoryRepository;
 import org.mozilla.gecko.sync.repositories.domain.HistoryRecordFactory;
 import org.mozilla.gecko.sync.repositories.domain.VersionConstants;
 
 public class AndroidBrowserHistoryServerSyncStage extends ServerSyncStage {
   protected static final String LOG_TAG = "HistoryStage";
 
   // Eventually this kind of sync stage will be data-driven,
   // and all this hard-coding can go away.
-  private static final String HISTORY_SORT          = "index";
-  // Sanity limit. Batch and total limit are the same for now, and will be adjusted
-  // once buffer and high water mark are in place. See Bug 730142.
-  private static final long HISTORY_BATCH_LIMIT = 250;
-  private static final long HISTORY_TOTAL_LIMIT = 250;
+  private static final String HISTORY_SORT = "oldest";
+  private static final long HISTORY_BATCH_LIMIT = 500;
 
   @Override
   protected String getCollection() {
     return "history";
   }
 
   @Override
   protected String getEngineName() {
@@ -40,28 +39,67 @@ public class AndroidBrowserHistoryServer
     return VersionConstants.HISTORY_ENGINE_VERSION;
   }
 
   @Override
   protected Repository getLocalRepository() {
     return new AndroidBrowserHistoryRepository();
   }
 
+  /**
+   * We use a persistent state provider for this stage, because it lets us resume interrupted
+   * syncs more efficiently.
+   * We are able to do this because we match criteria described in {@link RepositoryStateProvider}.
+   *
+   * @return Persistent repository state provider.
+     */
+  @Override
+  protected RepositoryStateProvider getRepositoryStateProvider() {
+    return new PersistentRepositoryStateProvider(
+            session.config.getBranch(statePreferencesPrefix())
+    );
+  }
+
+  /**
+   * We're downloading records oldest-first directly into live storage, forgoing any buffering other
+   * than AndroidBrowserHistoryRepository's internal records queue. These conditions allow us to use
+   * high-water-mark to resume downloads in case of interruptions.
+   *
+   * @return HighWaterMark.Enabled
+   */
+  @Override
+  protected HighWaterMark getAllowedToUseHighWaterMark() {
+    return HighWaterMark.Enabled;
+  }
+
+  /**
+   * Full batching is allowed, because we want all of the records.
+   *
+   * @return MultipleBatches.Enabled
+   */
+  @Override
+  protected MultipleBatches getAllowedMultipleBatches() {
+    return MultipleBatches.Enabled;
+  }
+
   @Override
   protected Repository getRemoteRepository() throws URISyntaxException {
-    String collection = getCollection();
-    return new ConstrainedServer11Repository(
-                                             collection,
-                                             session.config.storageURL(),
-                                             session.getAuthHeaderProvider(),
-                                             session.config.infoCollections,
-                                             session.config.infoConfiguration,
-                                             HISTORY_BATCH_LIMIT,
-                                             HISTORY_TOTAL_LIMIT,
-                                             HISTORY_SORT);
+    return new ConfigurableServer15Repository(
+            getCollection(),
+            session.getSyncDeadline(),
+            session.config.storageURL(),
+            session.getAuthHeaderProvider(),
+            session.config.infoCollections,
+            session.config.infoConfiguration,
+            HISTORY_BATCH_LIMIT,
+            HISTORY_SORT,
+            getAllowedMultipleBatches(),
+            getAllowedToUseHighWaterMark(),
+            getRepositoryStateProvider()
+    );
   }
 
   @Override
   protected RecordFactory getRecordFactory() {
     return new HistoryRecordFactory();
   }
 
   @Override
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/AndroidBrowserRecentHistoryServerSyncStage.java
@@ -0,0 +1,124 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.stage;
+
+import org.mozilla.gecko.sync.MetaGlobalException;
+import org.mozilla.gecko.sync.NonObjectJSONException;
+import org.mozilla.gecko.sync.SynchronizerConfiguration;
+import org.mozilla.gecko.sync.middleware.BufferingMiddlewareRepository;
+import org.mozilla.gecko.sync.middleware.storage.MemoryBufferStorage;
+import org.mozilla.gecko.sync.repositories.ConfigurableServer15Repository;
+import org.mozilla.gecko.sync.repositories.NonPersistentRepositoryStateProvider;
+import org.mozilla.gecko.sync.repositories.PersistentRepositoryStateProvider;
+import org.mozilla.gecko.sync.repositories.Repository;
+import org.mozilla.gecko.sync.repositories.RepositoryStateProvider;
+import org.mozilla.gecko.sync.repositories.android.AndroidBrowserHistoryRepository;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+
+/**
+ * History sync stage which is limited to just recent history, and will only run if the full history
+ * sync stage did not complete yet. Its purpose is to give users with a lot of history in their
+ * profiles a good experience during a large collection sync.
+ *
+ * @author grisha
+ */
+public class AndroidBrowserRecentHistoryServerSyncStage extends AndroidBrowserHistoryServerSyncStage {
+    protected static final String LOG_TAG = "RecentHistoryStage";
+
+    // Bug 1316110 tracks follow up work to generalize this stage and make it more efficient.
+    private static final int HISTORY_BATCH_LIMIT = 50;
+    // We need a custom configuration bundle name for this stage, because we want to track last-synced
+    // timestamp for this stage separately from that of a full history sync stage, yet their collection
+    // names are the same.
+    private static final String BUNDLE_NAME = "recentHistory.";
+    private static final String HISTORY_SORT = "newest";
+
+    @Override
+    public String bundlePrefix() {
+        return BUNDLE_NAME;
+    }
+
+    /**
+     * We use a non-persistent state provider for this stage, as it's designed to just run once.
+     *
+     * @return Non-persistent repository state provider.
+     */
+    @Override
+    protected RepositoryStateProvider getRepositoryStateProvider() {
+        return new NonPersistentRepositoryStateProvider();
+    }
+
+    /**
+     * Force download to be limited to a single batch.
+     * We just to want fetch a batch-worth of records for this stage.
+     *
+     * @return MultipleBatches.Disabled
+     */
+    @Override
+    protected MultipleBatches getAllowedMultipleBatches() {
+        return MultipleBatches.Disabled;
+    }
+
+    /**
+     * Right now this stage is designed to run just once, when there's no history data available.
+     *
+     * @return HighWaterMark.Disabled
+     */
+    @Override
+    protected HighWaterMark getAllowedToUseHighWaterMark() {
+        return HighWaterMark.Disabled;
+    }
+
+    @Override
+    protected Repository getLocalRepository() {
+        return new BufferingMiddlewareRepository(
+                session.getSyncDeadline(),
+                new MemoryBufferStorage(),
+                new AndroidBrowserHistoryRepository()
+        );
+    }
+
+    @Override
+    protected Repository getRemoteRepository() throws URISyntaxException {
+        return new ConfigurableServer15Repository(
+                getCollection(),
+                session.getSyncDeadline(),
+                session.config.storageURL(),
+                session.getAuthHeaderProvider(),
+                session.config.infoCollections,
+                session.config.infoConfiguration,
+                HISTORY_BATCH_LIMIT,
+                HISTORY_SORT,
+                getAllowedMultipleBatches(),
+                getAllowedToUseHighWaterMark(),
+                getRepositoryStateProvider());
+    }
+
+    /**
+     * This stage is only enabled if full history session is enabled and did not complete a sync yet.
+     */
+    @Override
+    public boolean isEnabled() throws MetaGlobalException {
+        final boolean historyStageEnabled = super.isEnabled();
+        if (!historyStageEnabled) {
+            return false;
+        }
+
+        if (session.config == null) {
+            return false;
+        }
+
+        final SynchronizerConfiguration synchronizerConfiguration;
+        try {
+            synchronizerConfiguration = new SynchronizerConfiguration(session.config.getBranch(getCollection() + "."));
+        } catch (IOException|NonObjectJSONException e) {
+            return false;
+        }
+
+        return synchronizerConfiguration.localBundle.getTimestamp() == -1;
+    }
+}
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/FennecTabsServerSyncStage.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/FennecTabsServerSyncStage.java
@@ -1,14 +1,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.stage;
 
+import org.mozilla.gecko.sync.middleware.BufferingMiddlewareRepository;
+import org.mozilla.gecko.sync.middleware.storage.MemoryBufferStorage;
 import org.mozilla.gecko.sync.repositories.RecordFactory;
 import org.mozilla.gecko.sync.repositories.Repository;
 import org.mozilla.gecko.sync.repositories.android.FennecTabsRepository;
 import org.mozilla.gecko.sync.repositories.domain.TabsRecordFactory;
 import org.mozilla.gecko.sync.repositories.domain.VersionConstants;
 
 public class FennecTabsServerSyncStage extends ServerSyncStage {
   private static final String COLLECTION = "tabs";
@@ -25,16 +27,20 @@ public class FennecTabsServerSyncStage e
 
   @Override
   public Integer getStorageVersion() {
     return VersionConstants.TABS_ENGINE_VERSION;
   }
 
   @Override
   protected Repository getLocalRepository() {
-    return new FennecTabsRepository(session.getClientsDelegate());
+    return new BufferingMiddlewareRepository(
+            session.getSyncDeadline(),
+            new MemoryBufferStorage(),
+            new FennecTabsRepository(session.getClientsDelegate())
+    );
   }
 
   @Override
   protected RecordFactory getRecordFactory() {
     return new TabsRecordFactory();
   }
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/FormHistoryServerSyncStage.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/FormHistoryServerSyncStage.java
@@ -2,66 +2,95 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.stage;
 
 import java.net.URISyntaxException;
 
 import org.mozilla.gecko.sync.CryptoRecord;
-import org.mozilla.gecko.sync.repositories.ConstrainedServer11Repository;
+import org.mozilla.gecko.sync.middleware.BufferingMiddlewareRepository;
+import org.mozilla.gecko.sync.middleware.storage.MemoryBufferStorage;
+import org.mozilla.gecko.sync.repositories.ConfigurableServer15Repository;
+import org.mozilla.gecko.sync.repositories.NonPersistentRepositoryStateProvider;
 import org.mozilla.gecko.sync.repositories.RecordFactory;
 import org.mozilla.gecko.sync.repositories.Repository;
 import org.mozilla.gecko.sync.repositories.android.FormHistoryRepositorySession;
 import org.mozilla.gecko.sync.repositories.domain.FormHistoryRecord;
 import org.mozilla.gecko.sync.repositories.domain.Record;
 import org.mozilla.gecko.sync.repositories.domain.VersionConstants;
 
 public class FormHistoryServerSyncStage extends ServerSyncStage {
 
   // Eventually this kind of sync stage will be data-driven,
   // and all this hard-coding can go away.
-  private static final String FORM_HISTORY_SORT          = "index";
-  // Sanity limit. Batch and total limit are the same for now, and will be adjusted
-  // once buffer and high water mark are in place. See Bug 730142.
+  private static final String FORM_HISTORY_SORT = "oldest";
   private static final long FORM_HISTORY_BATCH_LIMIT = 5000;
-  private static final long FORM_HISTORY_TOTAL_LIMIT = 5000;
 
   @Override
   protected String getCollection() {
     return "forms";
   }
 
   @Override
   protected String getEngineName() {
     return "forms";
   }
 
   @Override
   public Integer getStorageVersion() {
     return VersionConstants.FORMS_ENGINE_VERSION;
   }
 
+  /**
+   * We're downloading records into a non-persistent buffer for safety, so we can't use a H.W.M.
+   * Once this stage is using a persistent buffer, this should change.
+   *
+   * @return HighWaterMark.Disabled
+   */
+  @Override
+  protected HighWaterMark getAllowedToUseHighWaterMark() {
+    return HighWaterMark.Disabled;
+  }
+
+  /**
+   * Full batching is allowed, because we want all of the records.
+   *
+   * @return MultipleBatches.Enabled
+   */
+  @Override
+  protected MultipleBatches getAllowedMultipleBatches() {
+    return MultipleBatches.Enabled;
+  }
+
   @Override
   protected Repository getRemoteRepository() throws URISyntaxException {
     String collection = getCollection();
-    return new ConstrainedServer11Repository(
-        collection,
-        session.config.storageURL(),
-        session.getAuthHeaderProvider(),
-        session.config.infoCollections,
-        session.config.infoConfiguration,
-        FORM_HISTORY_BATCH_LIMIT,
-        FORM_HISTORY_TOTAL_LIMIT,
-        FORM_HISTORY_SORT);
+    return new ConfigurableServer15Repository(
+            collection,
+            session.getSyncDeadline(),
+            session.config.storageURL(),
+            session.getAuthHeaderProvider(),
+            session.config.infoCollections,
+            session.config.infoConfiguration,
+            FORM_HISTORY_BATCH_LIMIT,
+            FORM_HISTORY_SORT,
+            getAllowedMultipleBatches(),
+            getAllowedToUseHighWaterMark(),
+            getRepositoryStateProvider()
+    );
   }
 
   @Override
   protected Repository getLocalRepository() {
-    return new FormHistoryRepositorySession.FormHistoryRepository();
+    return new BufferingMiddlewareRepository(
+            session.getSyncDeadline(),
+            new MemoryBufferStorage(),
+            new FormHistoryRepositorySession.FormHistoryRepository()
+    );
   }
 
   public class FormHistoryRecordFactory extends RecordFactory {
 
     @Override
     public Record createRecord(Record record) {
       FormHistoryRecord r = new FormHistoryRecord();
       r.initFromEnvelope((CryptoRecord) record);
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/GlobalSyncStage.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/GlobalSyncStage.java
@@ -28,18 +28,19 @@ public interface GlobalSyncStage {
     syncClientsEngine(SyncClientsEngineStage.STAGE_NAME),
     /*
     processFirstSyncPref,
     processClientCommands,
     updateEnabledEngines,
     */
     syncTabs("tabs"),
     syncPasswords("passwords"),
+    syncRecentHistory("recentHistory"),
     syncBookmarks("bookmarks"),
-    syncHistory("history"),
+    syncFullHistory("history"),
     syncFormHistory("forms"),
 
     uploadMetaGlobal,
     completed;
 
     // Maintain a mapping from names ("bookmarks") to Stage enumerations (syncBookmarks).
     private static final Map<String, Stage> named = new HashMap<String, Stage>();
     static {
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/PasswordsServerSyncStage.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/PasswordsServerSyncStage.java
@@ -1,14 +1,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.stage;
 
+import org.mozilla.gecko.sync.middleware.BufferingMiddlewareRepository;
+import org.mozilla.gecko.sync.middleware.storage.MemoryBufferStorage;
 import org.mozilla.gecko.sync.repositories.RecordFactory;
 import org.mozilla.gecko.sync.repositories.Repository;
 import org.mozilla.gecko.sync.repositories.android.PasswordsRepositorySession;
 import org.mozilla.gecko.sync.repositories.domain.PasswordRecordFactory;
 import org.mozilla.gecko.sync.repositories.domain.VersionConstants;
 
 public class PasswordsServerSyncStage extends ServerSyncStage {
   @Override
@@ -23,16 +25,20 @@ public class PasswordsServerSyncStage ex
 
   @Override
   public Integer getStorageVersion() {
     return VersionConstants.PASSWORDS_ENGINE_VERSION;
   }
 
   @Override
   protected Repository getLocalRepository() {
-    return new PasswordsRepositorySession.PasswordsRepository();
+    return new BufferingMiddlewareRepository(
+            session.getSyncDeadline(),
+            new MemoryBufferStorage(),
+            new PasswordsRepositorySession.PasswordsRepository()
+    );
   }
 
   @Override
   protected RecordFactory getRecordFactory() {
     return new PasswordRecordFactory();
   }
 }
deleted file mode 100644
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/SafeConstrainedServer11Repository.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-package org.mozilla.gecko.sync.stage;
-
-import java.net.URISyntaxException;
-
-import org.mozilla.gecko.background.common.log.Logger;
-import org.mozilla.gecko.sync.InfoCollections;
-import org.mozilla.gecko.sync.InfoConfiguration;
-import org.mozilla.gecko.sync.InfoCounts;
-import org.mozilla.gecko.sync.JSONRecordFetcher;
-import org.mozilla.gecko.sync.net.AuthHeaderProvider;
-import org.mozilla.gecko.sync.repositories.ConstrainedServer11Repository;
-import org.mozilla.gecko.sync.repositories.Repository;
-import org.mozilla.gecko.sync.repositories.Server11RepositorySession;
-import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionCreationDelegate;
-
-import android.content.Context;
-
-/**
- * This is a constrained repository -- one which fetches a limited number
- * of records -- that additionally refuses to sync if the limit will
- * be exceeded on a first sync by the number of records on the server.
- *
- * You must pass an {@link InfoCounts} instance, which will be interrogated
- * in the event of a first sync.
- *
- * "First sync" means that our sync timestamp is not greater than zero.
- */
-public class SafeConstrainedServer11Repository extends ConstrainedServer11Repository {
-
-  // This can be lazily evaluated if we need it.
-  private final JSONRecordFetcher countFetcher;
-
-  public SafeConstrainedServer11Repository(String collection,
-                                           String storageURL,
-                                           AuthHeaderProvider authHeaderProvider,
-                                           InfoCollections infoCollections,
-                                           InfoConfiguration infoConfiguration,
-                                           long batchLimit,
-                                           long totalLimit,
-                                           String sort,
-                                           JSONRecordFetcher countFetcher)
-    throws URISyntaxException {
-    super(collection, storageURL, authHeaderProvider, infoCollections, infoConfiguration,
-            batchLimit, totalLimit, sort);
-    if (countFetcher == null) {
-      throw new IllegalArgumentException("countFetcher must not be null");
-    }
-    this.countFetcher = countFetcher;
-  }
-
-  @Override
-  public void createSession(RepositorySessionCreationDelegate delegate,
-                            Context context) {
-    delegate.onSessionCreated(new CountCheckingServer11RepositorySession(this, this.getDefaultBatchLimit()));
-  }
-
-  public class CountCheckingServer11RepositorySession extends Server11RepositorySession {
-    private static final String LOG_TAG = "CountCheckingServer11RepositorySession";
-
-    /**
-     * The session will report no data available if this is a first sync
-     * and the server has more data available than this limit.
-     */
-    private final long fetchLimit;
-
-    public CountCheckingServer11RepositorySession(Repository repository, long fetchLimit) {
-      super(repository);
-      this.fetchLimit = fetchLimit;
-    }
-
-    @Override
-    public boolean shouldSkip() {
-      // If this is a first sync, verify that we aren't going to blow through our limit.
-      final long lastSyncTimestamp = getLastSyncTimestamp();
-      if (lastSyncTimestamp > 0) {
-        Logger.info(LOG_TAG, "Collection " + collection + " has already had a first sync: " +
-            "timestamp is " + lastSyncTimestamp  + "; " +
-            "ignoring any updated counts and syncing as usual.");
-      } else {
-        Logger.info(LOG_TAG, "Collection " + collection + " is starting a first sync; checking counts.");
-
-        final InfoCounts counts;
-        try {
-          // This'll probably be the same object, but best to obey the API.
-          counts = new InfoCounts(countFetcher.fetchBlocking());
-        } catch (Exception e) {
-          Logger.warn(LOG_TAG, "Skipping " + collection + " until we can fetch counts.", e);
-          return true;
-        }
-
-        Integer c = counts.getCount(collection);
-        if (c == null) {
-          Logger.info(LOG_TAG, "Fetched counts does not include collection " + collection + "; syncing as usual.");
-          return false;
-        }
-
-        Logger.info(LOG_TAG, "First sync for " + collection + ": " + c + " items.");
-        if (c > fetchLimit) {
-          Logger.warn(LOG_TAG, "Too many items to sync safely. Skipping.");
-          return true;
-        }
-      }
-      return super.shouldSkip();
-    }
-  }
-}
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/ServerSyncStage.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/ServerSyncStage.java
@@ -1,40 +1,44 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.stage;
 
 import android.content.Context;
+import android.os.SystemClock;
 
 import org.mozilla.gecko.background.common.log.Logger;
 import org.mozilla.gecko.sync.EngineSettings;
 import org.mozilla.gecko.sync.GlobalSession;
 import org.mozilla.gecko.sync.HTTPFailureException;
 import org.mozilla.gecko.sync.MetaGlobalException;
 import org.mozilla.gecko.sync.NoCollectionKeysSetException;
 import org.mozilla.gecko.sync.NonObjectJSONException;
+import org.mozilla.gecko.sync.ReflowIsNecessaryException;
 import org.mozilla.gecko.sync.SynchronizerConfiguration;
 import org.mozilla.gecko.sync.Utils;
 import org.mozilla.gecko.sync.crypto.KeyBundle;
 import org.mozilla.gecko.sync.delegates.WipeServerDelegate;
 import org.mozilla.gecko.sync.middleware.Crypto5MiddlewareRepository;
 import org.mozilla.gecko.sync.net.AuthHeaderProvider;
 import org.mozilla.gecko.sync.net.BaseResource;
 import org.mozilla.gecko.sync.net.SyncStorageRequest;
 import org.mozilla.gecko.sync.net.SyncStorageRequestDelegate;
 import org.mozilla.gecko.sync.net.SyncStorageResponse;
 import org.mozilla.gecko.sync.repositories.InactiveSessionException;
 import org.mozilla.gecko.sync.repositories.InvalidSessionTransitionException;
+import org.mozilla.gecko.sync.repositories.NonPersistentRepositoryStateProvider;
 import org.mozilla.gecko.sync.repositories.RecordFactory;
 import org.mozilla.gecko.sync.repositories.Repository;
 import org.mozilla.gecko.sync.repositories.RepositorySession;
 import org.mozilla.gecko.sync.repositories.RepositorySessionBundle;
-import org.mozilla.gecko.sync.repositories.Server11Repository;
+import org.mozilla.gecko.sync.repositories.RepositoryStateProvider;
+import org.mozilla.gecko.sync.repositories.Server15Repository;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionBeginDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionCreationDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionFinishDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionWipeDelegate;
 import org.mozilla.gecko.sync.synchronizer.ServerLocalSynchronizer;
 import org.mozilla.gecko.sync.synchronizer.Synchronizer;
 import org.mozilla.gecko.sync.synchronizer.SynchronizerDelegate;
 import org.mozilla.gecko.sync.synchronizer.SynchronizerSession;
@@ -54,16 +58,30 @@ import java.util.concurrent.ExecutorServ
 public abstract class ServerSyncStage extends AbstractSessionManagingSyncStage implements SynchronizerDelegate {
 
   protected static final String LOG_TAG = "ServerSyncStage";
 
   protected long stageStartTimestamp = -1;
   protected long stageCompleteTimestamp = -1;
 
   /**
+   * Poor-man's boolean typing.
+   * These enums are used to configure {@link org.mozilla.gecko.sync.repositories.ConfigurableServer15Repository}.
+   */
+  public enum HighWaterMark {
+    Enabled,
+    Disabled
+  }
+
+  public enum MultipleBatches {
+    Enabled,
+    Disabled
+  }
+
+  /**
    * Override these in your subclasses.
    *
    * @return true if this stage should be executed.
    * @throws MetaGlobalException
    */
   protected boolean isEnabled() throws MetaGlobalException {
     EngineSettings engineSettings = null;
     try {
@@ -134,44 +152,78 @@ public abstract class ServerSyncStage ex
     return new EngineSettings(config.syncID, version);
   }
 
   protected abstract String getCollection();
   protected abstract String getEngineName();
   protected abstract Repository getLocalRepository();
   protected abstract RecordFactory getRecordFactory();
 
+  /**
+   * Used to configure a {@link org.mozilla.gecko.sync.repositories.ConfigurableServer15Repository}.
+   * Override this if you need a persistent repository state provider.
+   *
+   * @return Non-persistent state provider.
+   */
+  protected RepositoryStateProvider getRepositoryStateProvider() {
+    return new NonPersistentRepositoryStateProvider();
+  }
+
+  /**
+   * Used to configure a {@link org.mozilla.gecko.sync.repositories.ConfigurableServer15Repository}.
+   * Override this if you want to restrict downloader to just a single batch.
+   */
+  protected MultipleBatches getAllowedMultipleBatches() {
+    return MultipleBatches.Enabled;
+  }
+
+  /**
+   * Used to configure a {@link org.mozilla.gecko.sync.repositories.ConfigurableServer15Repository}.
+   * Override this if you want to allow resuming record downloads from a high-water-mark.
+   * Ensure you're using a {@link org.mozilla.gecko.sync.repositories.PersistentRepositoryStateProvider}
+   * to persist high-water-mark across syncs.
+   */
+  protected HighWaterMark getAllowedToUseHighWaterMark() {
+    return HighWaterMark.Disabled;
+  }
+
   // Override this in subclasses.
   protected Repository getRemoteRepository() throws URISyntaxException {
     String collection = getCollection();
-    return new Server11Repository(collection,
+    return new Server15Repository(collection,
+                                  session.getSyncDeadline(),
                                   session.config.storageURL(),
                                   session.getAuthHeaderProvider(),
                                   session.config.infoCollections,
-                                  session.config.infoConfiguration);
+                                  session.config.infoConfiguration,
+                                  new NonPersistentRepositoryStateProvider());
   }
 
   /**
-   * Return a Crypto5Middleware-wrapped Server11Repository.
+   * Return a Crypto5Middleware-wrapped Server15Repository.
    *
    * @throws NoCollectionKeysSetException
    * @throws URISyntaxException
    */
   protected Repository wrappedServerRepo() throws NoCollectionKeysSetException, URISyntaxException {
     String collection = this.getCollection();
     KeyBundle collectionKey = session.keyBundleForCollection(collection);
     Crypto5MiddlewareRepository cryptoRepo = new Crypto5MiddlewareRepository(getRemoteRepository(), collectionKey);
     cryptoRepo.recordFactory = getRecordFactory();
     return cryptoRepo;
   }
 
   protected String bundlePrefix() {
     return this.getCollection() + ".";
   }
 
+  protected String statePreferencesPrefix() {
+    return this.getCollection() + ".state.";
+  }
+
   protected SynchronizerConfiguration getConfig() throws NonObjectJSONException, IOException {
     return new SynchronizerConfiguration(session.config.getBranch(bundlePrefix()));
   }
 
   protected void persistConfig(SynchronizerConfiguration synchronizerConfiguration) {
     synchronizerConfiguration.persist(session.config.getBranch(bundlePrefix()));
   }
 
@@ -182,21 +234,31 @@ public abstract class ServerSyncStage ex
     synchronizer.repositoryA = remote;
     synchronizer.repositoryB = this.getLocalRepository();
     synchronizer.load(getConfig());
 
     return synchronizer;
   }
 
   /**
-   * Reset timestamps.
+   * Reset timestamps and any repository state.
    */
   @Override
   protected void resetLocal() {
     resetLocalWithSyncID(null);
+    if (!getRepositoryStateProvider().resetAndCommit()) {
+      // At the very least, we can log this.
+      // Failing to reset at this point means that we'll have lingering state for any stages using a
+      // persistent provider. In certain cases this might negatively affect first sync of this stage
+      // in the future.
+      // Our timestamp resetting code in `persistConfig` is affected by the same problem.
+      // A way to work around this is to further prefix our persisted SharedPreferences with
+      // clientID/syncID, ensuring a very defined scope for any persisted state. See Bug 1332431.
+      Logger.warn(LOG_TAG, "Failed to reset repository state");
+    }
   }
 
   /**
    * Reset timestamps and possibly set syncID.
    * @param syncID if non-null, new syncID to persist.
    */
   protected void resetLocalWithSyncID(String syncID) {
     // Clear both timestamps.
@@ -470,17 +532,17 @@ public abstract class ServerSyncStage ex
     Logger.info(LOG_TAG, "Wiping server complete.");
   }
 
   @Override
   public void execute() throws NoSuchStageException {
     final String name = getEngineName();
     Logger.debug(LOG_TAG, "Starting execute for " + name);
 
-    stageStartTimestamp = System.currentTimeMillis();
+    stageStartTimestamp = SystemClock.elapsedRealtime();
 
     try {
       if (!this.isEnabled()) {
         Logger.info(LOG_TAG, "Skipping stage " + name + ".");
         session.advance();
         return;
       }
     } catch (MetaGlobalException.MetaGlobalMalformedSyncIDException e) {
@@ -570,17 +632,17 @@ public abstract class ServerSyncStage ex
 
   /**
    * We synced this engine!  Persist timestamps and advance the session.
    *
    * @param synchronizer the <code>Synchronizer</code> that succeeded.
    */
   @Override
   public void onSynchronized(Synchronizer synchronizer) {
-    stageCompleteTimestamp = System.currentTimeMillis();
+    stageCompleteTimestamp = SystemClock.elapsedRealtime();
     Logger.debug(LOG_TAG, "onSynchronized.");
 
     SynchronizerConfiguration newConfig = synchronizer.save();
     if (newConfig != null) {
       persistConfig(newConfig);
     } else {
       Logger.warn(LOG_TAG, "Didn't get configuration from synchronizer after success.");
     }
@@ -600,28 +662,34 @@ public abstract class ServerSyncStage ex
    * the next sync will include this sync's data), but do advance the session
    * (if we didn't get a Retry-After header).
    *
    * @param synchronizer the <code>Synchronizer</code> that failed.
    */
   @Override
   public void onSynchronizeFailed(Synchronizer synchronizer,
                                   Exception lastException, String reason) {
-    stageCompleteTimestamp = System.currentTimeMillis();
+    stageCompleteTimestamp = SystemClock.elapsedRealtime();
     Logger.warn(LOG_TAG, "Synchronize failed: " + reason, lastException);
 
     // This failure could be due to a 503 or a 401 and it could have headers.
     // Interrogate the headers but only abort the global session if Retry-After header is set.
     if (lastException instanceof HTTPFailureException) {
       SyncStorageResponse response = ((HTTPFailureException)lastException).response;
       if (response.retryAfterInSeconds() > 0) {
         session.handleHTTPError(response, reason); // Calls session.abort().
         return;
       } else {
         session.interpretHTTPFailure(response.httpResponse()); // Does not call session.abort().
       }
     }
 
+    // Let global session know that this stage is not complete (due to a 412 or hitting a deadline).
+    // This stage will be re-synced once current sync is complete.
+    if (lastException instanceof ReflowIsNecessaryException) {
+      session.handleIncompleteStage();
+    }
+
     Logger.info(LOG_TAG, "Advancing session even though stage failed (took " + getStageDurationString() +
         "). Timestamps not persisted.");
     session.advance();
   }
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/synchronizer/ConcurrentRecordConsumer.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/synchronizer/ConcurrentRecordConsumer.java
@@ -60,17 +60,21 @@ class ConcurrentRecordConsumer extends R
     Logger.trace(LOG_TAG, "Record stored. Notifying.");
     synchronized (countMonitor) {
       counter++;
     }
   }
 
   private void consumerIsDone() {
     Logger.debug(LOG_TAG, "Consumer is done. Processed " + counter + ((counter == 1) ? " record." : " records."));
-    delegate.consumerIsDone(!allRecordsQueued);
+    if (allRecordsQueued) {
+      delegate.consumerIsDoneFull();
+    } else {
+      delegate.consumerIsDonePartial();
+    }
   }
 
   @Override
   public void run() {
     Record record;
 
     while (true) {
       // The queue is concurrent-safe.
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/synchronizer/RecordsChannel.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/synchronizer/RecordsChannel.java
@@ -1,19 +1,24 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.synchronizer;
 
+import android.support.annotation.NonNull;
+import android.support.annotation.Nullable;
+import android.support.annotation.VisibleForTesting;
+
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.mozilla.gecko.background.common.log.Logger;
+import org.mozilla.gecko.sync.ReflowIsNecessaryException;
 import org.mozilla.gecko.sync.ThreadPool;
 import org.mozilla.gecko.sync.repositories.InvalidSessionTransitionException;
 import org.mozilla.gecko.sync.repositories.NoStoreDelegateException;
 import org.mozilla.gecko.sync.repositories.RepositorySession;
 import org.mozilla.gecko.sync.repositories.delegates.DeferredRepositorySessionBeginDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.DeferredRepositorySessionStoreDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionBeginDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionFetchRecordsDelegate;
@@ -67,16 +72,18 @@ public class RecordsChannel implements
   RepositorySessionBeginDelegate {
 
   private static final String LOG_TAG = "RecordsChannel";
   public RepositorySession source;
   public RepositorySession sink;
   private final RecordsChannelDelegate delegate;
   private long fetchEnd = -1;
 
+  private volatile ReflowIsNecessaryException reflowException;
+
   protected final AtomicInteger numFetched = new AtomicInteger();
   protected final AtomicInteger numFetchFailed = new AtomicInteger();
   protected final AtomicInteger numStored = new AtomicInteger();
   protected final AtomicInteger numStoreFailed = new AtomicInteger();
 
   public RecordsChannel(RepositorySession source, RepositorySession sink, RecordsChannelDelegate delegate) {
     this.source    = source;
     this.sink      = sink;
@@ -88,17 +95,17 @@ public class RecordsChannel implements
    * A separate thread is waiting for us to notify it of work to do.
    * When we tell it to stop, it'll stop. We do that when the fetch
    * is completed.
    * When it stops, we tell the sink that there are no more records,
    * and wait for the sink to tell us that storing is done.
    * Then we notify our delegate of completion.
    */
   private RecordConsumer consumer;
-  private boolean waitingForQueueDone = false;
+  private volatile boolean waitingForQueueDone = false;
   private final ConcurrentLinkedQueue<Record> toProcess = new ConcurrentLinkedQueue<Record>();
 
   @Override
   public ConcurrentLinkedQueue<Record> getQueue() {
     return toProcess;
   }
 
   protected boolean isReady() {
@@ -189,21 +196,25 @@ public class RecordsChannel implements
       sink.store(record);
     } catch (NoStoreDelegateException e) {
       Logger.error(LOG_TAG, "Got NoStoreDelegateException in RecordsChannel.store(). This should not occur. Aborting.", e);
       delegate.onFlowStoreFailed(this, e, record.guid);
     }
   }
 
   @Override
-  public void onFetchFailed(Exception ex, Record record) {
+  public void onFetchFailed(Exception ex) {
     Logger.warn(LOG_TAG, "onFetchFailed. Calling for immediate stop.", ex);
     numFetchFailed.incrementAndGet();
+    if (ex instanceof ReflowIsNecessaryException) {
+      setReflowException((ReflowIsNecessaryException) ex);
+    }
+    delegate.onFlowFetchFailed(this, ex);
+    // Sink will be informed once consumer finishes.
     this.consumer.halt();
-    delegate.onFlowFetchFailed(this, ex);
   }
 
   @Override
   public void onFetchedRecord(Record record) {
     numFetched.incrementAndGet();
     this.toProcess.add(record);
     this.consumer.doNotify();
   }
@@ -212,46 +223,100 @@ public class RecordsChannel implements
   public void onFetchCompleted(final long fetchEnd) {
     Logger.trace(LOG_TAG, "onFetchCompleted. Stopping consumer once stores are done.");
     Logger.trace(LOG_TAG, "Fetch timestamp is " + fetchEnd);
     this.fetchEnd = fetchEnd;
     this.consumer.queueFilled();
   }
 
   @Override
+  public void onBatchCompleted() {
+    this.sink.storeFlush();
+  }
+
+  @Override
   public void onRecordStoreFailed(Exception ex, String recordGuid) {
     Logger.trace(LOG_TAG, "Failed to store record with guid " + recordGuid);
     numStoreFailed.incrementAndGet();
     this.consumer.stored();
     delegate.onFlowStoreFailed(this, ex, recordGuid);
     // TODO: abort?
   }
 
   @Override
   public void onRecordStoreSucceeded(String guid) {
     Logger.trace(LOG_TAG, "Stored record with guid " + guid);
     this.consumer.stored();
   }
 
+  @Override
+  public void consumerIsDoneFull() {
+    Logger.trace(LOG_TAG, "Consumer is done, processed all records. Are we waiting for it? " + waitingForQueueDone);
+    if (waitingForQueueDone) {
+      waitingForQueueDone = false;
+
+      // Now we'll be waiting for sink to call its delegate's onStoreCompleted or onStoreFailed.
+      this.sink.storeDone();
+    }
+  }
 
   @Override
-  public void consumerIsDone(boolean allRecordsQueued) {
-    Logger.trace(LOG_TAG, "Consumer is done. Are we waiting for it? " + waitingForQueueDone);
+  public void consumerIsDonePartial() {
+    Logger.trace(LOG_TAG, "Consumer is done, processed some records. Are we waiting for it? " + waitingForQueueDone);
     if (waitingForQueueDone) {
       waitingForQueueDone = false;
-      this.sink.storeDone();                 // Now we'll be waiting for onStoreCompleted.
+
+      // Let sink clean up or flush records if necessary.
+      this.sink.storeIncomplete();
+
+      delegate.onFlowCompleted(this, fetchEnd, System.currentTimeMillis());
     }
   }
 
   @Override
   public void onStoreCompleted(long storeEnd) {
     Logger.trace(LOG_TAG, "onStoreCompleted. Notifying delegate of onFlowCompleted. " +
                           "Fetch end is " + fetchEnd + ", store end is " + storeEnd);
-    // TODO: synchronize on consumer callback?
+    // Source might have used caches used to facilitate flow of records, so now is a good
+    // time to clean up. Particularly pertinent for buffered sources.
+    // Rephrasing this in a more concrete way, buffers are cleared only once records have been merged
+    // locally and results of the merge have been uploaded to the server successfully.
+    this.source.performCleanup();
     delegate.onFlowCompleted(this, fetchEnd, storeEnd);
+
+  }
+
+  @Override
+  public void onStoreFailed(Exception ex) {
+    Logger.warn(LOG_TAG, "onStoreFailed. Calling for immediate stop.", ex);
+    if (ex instanceof ReflowIsNecessaryException) {
+      setReflowException((ReflowIsNecessaryException) ex);
+    }
+
+    // NB: consumer might or might not be running at this point. There are two cases here:
+    // 1) If we're storing records remotely, we might fail due to a 412.
+    // -- we might hit 412 at any point, so consumer might be in either state.
+    // Action: ignore consumer state, we have nothing else to do other to inform our delegate
+    // that we're done with this flow. Based on the reflow exception, it'll determine what to do.
+
+    // 2) If we're storing (merging) records locally, we might fail due to a sync deadline.
+    // -- we might hit a deadline only prior to attempting to merge records,
+    // -- at which point consumer would have finished already, and storeDone was called.
+    // Action: consumer state is known (done), so we can ignore it safely and inform our delegate
+    // that we're done.
+
+    // Prevent "once consumer is done..." actions from taking place. They already have (case 2), or
+    // we don't need them (case 1).
+    waitingForQueueDone = false;
+
+    // If consumer is still going at it, tell it to stop.
+    this.consumer.halt();
+
+    delegate.onFlowStoreFailed(this, ex, null);
+    delegate.onFlowCompleted(this, fetchEnd, System.currentTimeMillis());
   }
 
   @Override
   public void onBeginFailed(Exception ex) {
     delegate.onFlowBeginFailed(this, ex);
   }
 
   @Override
@@ -284,9 +349,23 @@ public class RecordsChannel implements
     return new DeferredRepositorySessionBeginDelegate(this, executor);
   }
 
   @Override
   public RepositorySessionFetchRecordsDelegate deferredFetchDelegate(ExecutorService executor) {
     // Lie outright. We know that all of our fetch methods are safe.
     return this;
   }
+
+  @Nullable
+  @VisibleForTesting
+  public synchronized ReflowIsNecessaryException getReflowException() {
+    return reflowException;
+  }
+
+  private synchronized void setReflowException(@NonNull ReflowIsNecessaryException e) {
+    // It is a mistake to set reflow exception multiple times.
+    if (reflowException != null) {
+      throw new IllegalStateException("Reflow exception already set: " + reflowException);
+    }
+    reflowException = e;
+  }
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/synchronizer/RecordsConsumerDelegate.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/synchronizer/RecordsConsumerDelegate.java
@@ -4,20 +4,24 @@
 
 package org.mozilla.gecko.sync.synchronizer;
 
 import java.util.concurrent.ConcurrentLinkedQueue;
 
 import org.mozilla.gecko.sync.repositories.domain.Record;
 
 interface RecordsConsumerDelegate {
-  public abstract ConcurrentLinkedQueue<Record> getQueue();
+  ConcurrentLinkedQueue<Record> getQueue();
 
   /**
    * Called when no more items will be processed.
-   * If forced is true, the consumer is terminating because it was told to halt;
-   * not all items will necessarily have been processed.
-   * If forced is false, the consumer has invoked store and received an onStoreCompleted callback.
-   * @param forced
+   * Indicates that all items have been processed.
    */
-  public abstract void consumerIsDone(boolean forced);
-  public abstract void store(Record record);
+  void consumerIsDoneFull();
+
+  /**
+   * Called when no more items will be processed.
+   * Indicates that only some of the items have been processed.
+   */
+  void consumerIsDonePartial();
+
+  void store(Record record);
 }
\ No newline at end of file
deleted file mode 100644
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/synchronizer/SerialRecordConsumer.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-package org.mozilla.gecko.sync.synchronizer;
-
-import org.mozilla.gecko.background.common.log.Logger;
-import org.mozilla.gecko.sync.repositories.domain.Record;
-
-/**
- * Consume records from a queue inside a RecordsChannel, storing them serially.
- * @author rnewman
- *
- */
-class SerialRecordConsumer extends RecordConsumer {
-  private static final String LOG_TAG = "SerialRecordConsumer";
-  protected boolean stopEventually = false;
-  private volatile long counter = 0;
-
-  public SerialRecordConsumer(RecordsConsumerDelegate delegate) {
-    this.delegate = delegate;
-  }
-
-  private final Object monitor = new Object();
-  @Override
-  public void doNotify() {
-    synchronized (monitor) {
-      monitor.notify();
-    }
-  }
-
-  @Override
-  public void queueFilled() {
-    Logger.debug(LOG_TAG, "Queue filled.");
-    synchronized (monitor) {
-      this.stopEventually = true;
-      monitor.notify();
-    }
-  }
-
-  @Override
-  public void halt() {
-    Logger.debug(LOG_TAG, "Halting.");
-    synchronized (monitor) {
-      this.stopEventually = true;
-      this.stopImmediately = true;
-      monitor.notify();
-    }
-  }
-
-  private final Object storeSerializer = new Object();
-  @Override
-  public void stored() {
-    Logger.debug(LOG_TAG, "Record stored. Notifying.");
-    synchronized (storeSerializer) {
-      Logger.debug(LOG_TAG, "stored() took storeSerializer.");
-      counter++;
-      storeSerializer.notify();
-      Logger.debug(LOG_TAG, "stored() dropped storeSerializer.");
-    }
-  }
-  private void storeSerially(Record record) {
-    Logger.debug(LOG_TAG, "New record to store.");
-    synchronized (storeSerializer) {
-      Logger.debug(LOG_TAG, "storeSerially() took storeSerializer.");
-      Logger.debug(LOG_TAG, "Storing...");
-      try {
-        this.delegate.store(record);
-      } catch (Exception e) {
-        Logger.warn(LOG_TAG, "Got exception in store. Not waiting.", e);
-        return;      // So we don't block for a stored() that never comes.
-      }
-      try {
-        Logger.debug(LOG_TAG, "Waiting...");
-        storeSerializer.wait();
-      } catch (InterruptedException e) {
-        // TODO
-      }
-      Logger.debug(LOG_TAG, "storeSerially() dropped storeSerializer.");
-    }
-  }
-
-  private void consumerIsDone() {
-    long counterNow = this.counter;
-    Logger.info(LOG_TAG, "Consumer is done. Processed " + counterNow + ((counterNow == 1) ? " record." : " records."));
-    delegate.consumerIsDone(stopImmediately);
-  }
-
-  @Override
-  public void run() {
-    while (true) {
-      synchronized (monitor) {
-        Logger.debug(LOG_TAG, "run() took monitor.");
-        if (stopImmediately) {
-          Logger.debug(LOG_TAG, "Stopping immediately. Clearing queue.");
-          delegate.getQueue().clear();
-          Logger.debug(LOG_TAG, "Notifying consumer.");
-          consumerIsDone();
-          return;
-        }
-        Logger.debug(LOG_TAG, "run() dropped monitor.");
-      }
-      // The queue is concurrent-safe.
-      while (!delegate.getQueue().isEmpty()) {
-        Logger.debug(LOG_TAG, "Grabbing record...");
-        Record record = delegate.getQueue().remove();
-        // Block here, allowing us to process records
-        // serially.
-        Logger.debug(LOG_TAG, "Invoking storeSerially...");
-        this.storeSerially(record);
-        Logger.debug(LOG_TAG, "Done with record.");
-      }
-      synchronized (monitor) {
-        Logger.debug(LOG_TAG, "run() took monitor.");
-
-        if (stopEventually) {
-          Logger.debug(LOG_TAG, "Done with records and told to stop. Notifying consumer.");
-          consumerIsDone();
-          return;
-        }
-        try {
-          Logger.debug(LOG_TAG, "Not told to stop but no records. Waiting.");
-          monitor.wait(10000);
-        } catch (InterruptedException e) {
-          // TODO
-        }
-        Logger.debug(LOG_TAG, "run() dropped monitor.");
-      }
-    }
-  }
-}
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/synchronizer/ServerLocalSynchronizerSession.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/synchronizer/ServerLocalSynchronizerSession.java
@@ -1,15 +1,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.synchronizer;
 
 import org.mozilla.gecko.background.common.log.Logger;
+import org.mozilla.gecko.sync.ReflowIsNecessaryException;
 import org.mozilla.gecko.sync.repositories.FetchFailedException;
 import org.mozilla.gecko.sync.repositories.StoreFailedException;
 
 /**
  * A <code>SynchronizerSession</code> designed to be used between a remote
  * server and a local repository.
  * <p>
  * Handles failure cases as follows (in the order they will occur during a sync):
@@ -24,16 +25,25 @@ public class ServerLocalSynchronizerSess
   protected static final String LOG_TAG = "ServLocSynchronizerSess";
 
   public ServerLocalSynchronizerSession(Synchronizer synchronizer, SynchronizerSessionDelegate delegate) {
     super(synchronizer, delegate);
   }
 
   @Override
   public void onFirstFlowCompleted(RecordsChannel recordsChannel, long fetchEnd, long storeEnd) {
+    // If a "reflow exception" was thrown, consider this synchronization failed.
+    final ReflowIsNecessaryException reflowException = recordsChannel.getReflowException();
+    if (reflowException != null) {
+      final String message = "Reflow is necessary: " + reflowException;
+      Logger.warn(LOG_TAG, message + " Aborting session.");
+      delegate.onSynchronizeFailed(this, reflowException, message);
+      return;
+    }
+
     // Fetch failures always abort.
     int numRemoteFetchFailed = recordsChannel.getFetchFailureCount();
     if (numRemoteFetchFailed > 0) {
       final String message = "Got " + numRemoteFetchFailed + " failures fetching remote records!";
       Logger.warn(LOG_TAG, message + " Aborting session.");
       delegate.onSynchronizeFailed(this, new FetchFailedException(), message);
       return;
     }
@@ -48,16 +58,25 @@ public class ServerLocalSynchronizerSess
       Logger.trace(LOG_TAG, "No failures storing local records.");
     }
 
     super.onFirstFlowCompleted(recordsChannel, fetchEnd, storeEnd);
   }
 
   @Override
   public void onSecondFlowCompleted(RecordsChannel recordsChannel, long fetchEnd, long storeEnd) {
+    // If a "reflow exception" was thrown, consider this synchronization failed.
+    final ReflowIsNecessaryException reflowException = recordsChannel.getReflowException();
+    if (reflowException != null) {
+      final String message = "Reflow is necessary: " + reflowException;
+      Logger.warn(LOG_TAG, message + " Aborting session.");
+      delegate.onSynchronizeFailed(this, reflowException, message);
+      return;
+    }
+
     // Fetch failures always abort.
     int numLocalFetchFailed = recordsChannel.getFetchFailureCount();
     if (numLocalFetchFailed > 0) {
       final String message = "Got " + numLocalFetchFailed + " failures fetching local records!";
       Logger.warn(LOG_TAG, message + " Aborting session.");
       delegate.onSynchronizeFailed(this, new FetchFailedException(), message);
       return;
     }
--- a/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/db/TestBookmarks.java
+++ b/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/db/TestBookmarks.java
@@ -660,16 +660,21 @@ public class TestBookmarks extends Andro
         public void onFetchedRecord(Record record) {
           fetchedGUIDs.add(record.guid);
         }
 
         @Override
         public void onFetchCompleted(long end) {
           finishAndNotify(session);
         }
+
+        @Override
+        public void onBatchCompleted() {
+
+        }
       };
       session.fetchSince(0, fetchDelegate);
     }
   }
 
   /**
    * Simple helper class for fetching a single record by GUID.
    * The fetched record is stored in `fetchedRecord`.
@@ -690,16 +695,21 @@ public class TestBookmarks extends Andro
         public void onFetchedRecord(Record record) {
           fetchedRecord = record;
         }
 
         @Override
         public void onFetchCompleted(long end) {
           finishAndNotify(session);
         }
+
+        @Override
+        public void onBatchCompleted() {
+
+        }
       };
       try {
         session.fetch(new String[] { guid }, fetchDelegate);
       } catch (InactiveSessionException e) {
         performNotify("Session is inactive.", e);
       }
     }
   }
@@ -731,16 +741,21 @@ public class TestBookmarks extends Andro
               }
             }
             finishAndNotify(session);
           }
 
           @Override
           public void onRecordStoreSucceeded(String guid) {
           }
+
+          @Override
+          public void onStoreFailed(Exception e) {
+
+          }
         };
         session.setStoreDelegate(storeDelegate);
         for (BookmarkRecord record : records) {
           try {
             session.store(record);
           } catch (NoStoreDelegateException e) {
             // Never happens.
           }
--- a/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/sync/TestStoreTracking.java
+++ b/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/sync/TestStoreTracking.java
@@ -99,23 +99,38 @@ public class TestStoreTracking extends A
                                                     RepositorySessionBundle bundle) {
                         performNotify();
                       }
                     });
                   } catch (InactiveSessionException e) {
                     performNotify(e);
                   }
                 }
+
+                @Override
+                public void onBatchCompleted() {
+
+                }
               });
             }
+
+            @Override
+            public void onBatchCompleted() {
+
+            }
           });
         } catch (InactiveSessionException e) {
           performNotify(e);
         }
       }
+
+      @Override
+      public void onStoreFailed(Exception e) {
+
+      }
     };
 
     session.setStoreDelegate(storeDelegate);
     try {
       Logger.debug(getName(), "Storing...");
       session.store(record);
       session.storeDone();
     } catch (NoStoreDelegateException e) {
@@ -151,16 +166,21 @@ public class TestStoreTracking extends A
                         // Hooray!
                         performNotify();
                       }
                     });
                   } catch (InactiveSessionException e) {
                     performNotify(e);
                   }
                 }
+
+                @Override
+                public void onBatchCompleted() {
+
+                }
               });
             }
           });
         } catch (InvalidSessionTransitionException e) {
           performNotify(e);
         }
       }
     };
--- a/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/sync/helpers/DefaultFetchDelegate.java
+++ b/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/sync/helpers/DefaultFetchDelegate.java
@@ -22,17 +22,17 @@ import org.mozilla.gecko.sync.repositori
 
 public class DefaultFetchDelegate extends DefaultDelegate implements RepositorySessionFetchRecordsDelegate {
 
   private static final String LOG_TAG = "DefaultFetchDelegate";
   public ArrayList<Record> records = new ArrayList<Record>();
   public Set<String> ignore = new HashSet<String>();
 
   @Override
-  public void onFetchFailed(Exception ex, Record record) {
+  public void onFetchFailed(Exception ex) {
     performNotify("Fetch failed.", ex);
   }
 
   protected void onDone(ArrayList<Record> records, HashMap<String, Record> expected, long end) {
     Logger.debug(LOG_TAG, "onDone.");
     Logger.debug(LOG_TAG, "End timestamp is " + end);
     Logger.debug(LOG_TAG, "Expected is " + expected);
     Logger.debug(LOG_TAG, "Records is " + records);
@@ -95,12 +95,17 @@ public class DefaultFetchDelegate extend
   }
 
   @Override
   public void onFetchCompleted(final long fetchEnd) {
     Logger.debug(LOG_TAG, "onFetchCompleted. Doing nothing.");
   }
 
   @Override
+  public void onBatchCompleted() {
+    Logger.debug(LOG_TAG, "onBatchCompleted. Doing nothing.");
+  }
+
+  @Override
   public RepositorySessionFetchRecordsDelegate deferredFetchDelegate(final ExecutorService executor) {
     return new DeferredRepositorySessionFetchRecordsDelegate(this, executor);
   }
 }
--- a/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/sync/helpers/DefaultStoreDelegate.java
+++ b/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/sync/helpers/DefaultStoreDelegate.java
@@ -6,30 +6,35 @@ package org.mozilla.gecko.background.syn
 import java.util.concurrent.ExecutorService;
 
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
 
 public class DefaultStoreDelegate extends DefaultDelegate implements RepositorySessionStoreDelegate {
 
   @Override
   public void onRecordStoreFailed(Exception ex, String guid) {
-    performNotify("Store failed", ex);
+    performNotify("Record store failed", ex);
   }
 
   @Override
   public void onRecordStoreSucceeded(String guid) {
     performNotify("DefaultStoreDelegate used", null);
   }
 
   @Override
   public void onStoreCompleted(long storeEnd) {
     performNotify("DefaultStoreDelegate used", null);
   }
 
   @Override
+  public void onStoreFailed(Exception ex) {
+    performNotify("Store failed", ex);
+  }
+
+  @Override
   public RepositorySessionStoreDelegate deferredStoreDelegate(final ExecutorService executor) {
     final RepositorySessionStoreDelegate self = this;
     return new RepositorySessionStoreDelegate() {
 
       @Override
       public void onRecordStoreSucceeded(final String guid) {
         executor.execute(new Runnable() {
           @Override
@@ -55,16 +60,21 @@ public class DefaultStoreDelegate extend
           @Override
           public void run() {
             self.onStoreCompleted(storeEnd);
           }
         });
       }
 
       @Override
+      public void onStoreFailed(Exception e) {
+
+      }
+
+      @Override
       public RepositorySessionStoreDelegate deferredStoreDelegate(ExecutorService newExecutor) {
         if (newExecutor == executor) {
           return this;
         }
         throw new IllegalArgumentException("Can't re-defer this delegate.");
       }
     };
   }
--- a/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/sync/helpers/ExpectInvalidRequestFetchDelegate.java
+++ b/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/sync/helpers/ExpectInvalidRequestFetchDelegate.java
@@ -1,21 +1,20 @@
 /* Any copyright is dedicated to the Public Domain.
    http://creativecommons.org/publicdomain/zero/1.0/ */
 
 package org.mozilla.gecko.background.sync.helpers;
 
 import org.mozilla.gecko.sync.repositories.InvalidRequestException;
-import org.mozilla.gecko.sync.repositories.domain.Record;
 
 public class ExpectInvalidRequestFetchDelegate extends DefaultFetchDelegate {
   public static final String LOG_TAG = "ExpInvRequestFetchDel";
 
   @Override
-  public void onFetchFailed(Exception ex, Record rec) {
+  public void onFetchFailed(Exception ex) {
     if (ex instanceof InvalidRequestException) {
       onDone();
     } else {
       performNotify("Expected InvalidRequestException but got ", ex);
     }
   }
 
   private void onDone() {
--- a/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/sync/helpers/SimpleSuccessFetchDelegate.java
+++ b/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/sync/helpers/SimpleSuccessFetchDelegate.java
@@ -1,22 +1,21 @@
 /* Any copyright is dedicated to the Public Domain.
    http://creativecommons.org/publicdomain/zero/1.0/ */
 
 package org.mozilla.gecko.background.sync.helpers;
 
 import java.util.concurrent.ExecutorService;
 
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionFetchRecordsDelegate;
-import org.mozilla.gecko.sync.repositories.domain.Record;
 
 public abstract class SimpleSuccessFetchDelegate extends DefaultDelegate implements
     RepositorySessionFetchRecordsDelegate {
   @Override
-  public void onFetchFailed(Exception ex, Record record) {
+  public void onFetchFailed(Exception ex) {
     performNotify("Fetch failed", ex);
   }
 
   @Override
   public RepositorySessionFetchRecordsDelegate deferredFetchDelegate(ExecutorService executor) {
     return this;
   }
 }
--- a/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/testhelpers/DefaultGlobalSessionCallback.java
+++ b/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/testhelpers/DefaultGlobalSessionCallback.java
@@ -24,16 +24,22 @@ public class DefaultGlobalSessionCallbac
   public void informUpgradeRequiredResponse(GlobalSession session) {
   }
 
   @Override
   public void informMigrated(GlobalSession globalSession) {
   }
 
   @Override
+  public void handleIncompleteStage(Stage currentState,
+                                    GlobalSession globalSession) {
+
+  }
+
+  @Override
   public void handleAborted(GlobalSession globalSession, String reason) {
   }
 
   @Override
   public void handleError(GlobalSession globalSession, Exception ex) {
   }
 
   @Override
--- a/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/testhelpers/WBORepository.java
+++ b/mobile/android/tests/background/junit3/src/org/mozilla/gecko/background/testhelpers/WBORepository.java
@@ -121,50 +121,50 @@ public class WBORepository extends Repos
       }
       long fetchCompleted  = now();
       stats.fetchCompleted = fetchCompleted;
       delegate.deferredFetchDelegate(delegateExecutor).onFetchCompleted(fetchCompleted);
     }
 
     @Override
     public void store(final Record record) throws NoStoreDelegateException {
-      if (delegate == null) {
+      if (storeDelegate == null) {
         throw new NoStoreDelegateException();
       }
       final long now = now();
       if (stats.storeBegan < 0) {
         stats.storeBegan = now;
       }
       Record existing = wbos.get(record.guid);
       Logger.debug(LOG_TAG, "Existing record is " + (existing == null ? "<null>" : (existing.guid + ", " + existing)));
       if (existing != null &&
           existing.lastModified > record.lastModified) {
         Logger.debug(LOG_TAG, "Local record is newer. Not storing.");
-        delegate.deferredStoreDelegate(delegateExecutor).onRecordStoreSucceeded(record.guid);
+        storeDelegate.deferredStoreDelegate(delegateExecutor).onRecordStoreSucceeded(record.guid);
         return;
       }
       if (existing != null) {
         Logger.debug(LOG_TAG, "Replacing local record.");
       }
 
       // Store a copy of the record with an updated modified time.
       Record toStore = record.copyWithIDs(record.guid, record.androidID);
       if (bumpTimestamps) {
         toStore.lastModified = now;
       }
       wbos.put(record.guid, toStore);
 
       trackRecord(toStore);
-      delegate.deferredStoreDelegate(delegateExecutor).onRecordStoreSucceeded(record.guid);
+      storeDelegate.deferredStoreDelegate(delegateExecutor).onRecordStoreSucceeded(record.guid);
     }
 
     @Override
     public void wipe(final RepositorySessionWipeDelegate delegate) {
       if (!isActive()) {
-        delegate.onWipeFailed(new InactiveSessionException(null));
+        delegate.onWipeFailed(new InactiveSessionException());
         return;
       }
 
       Logger.info(LOG_TAG, "Wiping WBORepositorySession.");
       this.wbos = new ConcurrentHashMap<String, Record>();
 
       // Wipe immediately for the convenience of test code.
       wboRepository.wbos = new ConcurrentHashMap<String, Record>();
@@ -189,17 +189,17 @@ public class WBORepository extends Repos
     @Override
     public void storeDone(long end) {
       // TODO: this is not guaranteed to be called after all of the record
       // store callbacks have completed!
       if (stats.storeBegan < 0) {
         stats.storeBegan = end;
       }
       stats.storeCompleted = end;
-      delegate.deferredStoreDelegate(delegateExecutor).onStoreCompleted(end);
+      storeDelegate.deferredStoreDelegate(delegateExecutor).onStoreCompleted(end);
     }
   }
 
   public ConcurrentHashMap<String, Record> wbos;
 
   public WBORepository(boolean bumpTimestamps) {
     super();
     this.bumpTimestamps = bumpTimestamps;
--- a/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/net/test/TestGlobalSession.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/net/test/TestGlobalSession.java
@@ -1,13 +1,15 @@
 /* Any copyright is dedicated to the Public Domain.
    http://creativecommons.org/publicdomain/zero/1.0/ */
 
 package org.mozilla.android.sync.net.test;
 
+import android.os.SystemClock;
+
 import ch.boye.httpclientandroidlib.HttpResponse;
 import ch.boye.httpclientandroidlib.ProtocolVersion;
 import ch.boye.httpclientandroidlib.message.BasicHttpResponse;
 import ch.boye.httpclientandroidlib.message.BasicStatusLine;
 import junit.framework.AssertionFailedError;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -45,31 +47,33 @@ import org.simpleframework.http.Response
 
 import java.io.IOException;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 @RunWith(TestRunner.class)
 public class TestGlobalSession {
   private int          TEST_PORT                = HTTPServerTestHelper.getTestPort();
   private final String TEST_CLUSTER_URL         = "http://localhost:" + TEST_PORT;
   private final String TEST_USERNAME            = "johndoe";
   private final String TEST_PASSWORD            = "password";
   private final String TEST_SYNC_KEY            = "abcdeabcdeabcdeabcdeabcdea";
   private final long   TEST_BACKOFF_IN_SECONDS  = 2401;
+  private final long   SYNC_DEADLINE            = SystemClock.elapsedRealtime() + TimeUnit.MINUTES.toMillis(30);
 
   public static WaitHelper getTestWaiter() {
     return WaitHelper.getTestWaiter();
   }
 
   @Test
   public void testGetSyncStagesBy() throws SyncConfigurationException, IllegalArgumentException, NonObjectJSONException, IOException, CryptoException, NoSuchStageException {
 
@@ -142,17 +146,17 @@ public class TestGlobalSession {
       final MockGlobalSessionCallback callback = new MockGlobalSessionCallback();
       SyncConfiguration config = new SyncConfiguration(TEST_USERNAME, new BasicAuthHeaderProvider(TEST_USERNAME, TEST_PASSWORD), new MockSharedPreferences(), new KeyBundle(TEST_USERNAME, TEST_SYNC_KEY));
       final GlobalSession session = new MockGlobalSession(config, callback);
 
       getTestWaiter().performWait(WaitHelper.onThreadRunnable(new Runnable() {
         @Override
         public void run() {
           try {
-            session.start();
+            session.start(SYNC_DEADLINE);
           } catch (Exception e) {
             final AssertionFailedError error = new AssertionFailedError();
             error.initCause(e);
             getTestWaiter().performNotify(error);
           }
         }
       }));
 
@@ -190,17 +194,17 @@ public class TestGlobalSession {
       SyncConfiguration config = new SyncConfiguration(TEST_USERNAME, new BasicAuthHeaderProvider(TEST_USERNAME, TEST_PASSWORD), new MockSharedPreferences(), new KeyBundle(TEST_USERNAME, TEST_SYNC_KEY));
       final GlobalSession session = new MockGlobalSession(config, callback)
                                         .withStage(Stage.fetchInfoCollections, stage);
 
       getTestWaiter().performWait(WaitHelper.onThreadRunnable(new Runnable() {
         @Override
         public void run() {
           try {
-            session.start();
+            session.start(SYNC_DEADLINE);
           } catch (Exception e) {
             final AssertionFailedError error = new AssertionFailedError();
             error.initCause(e);
             getTestWaiter().performNotify(error);
           }
         }
       }));
 
@@ -270,17 +274,17 @@ public class TestGlobalSession {
     final GlobalSession session = new MockGlobalSession(config, callback)
                                       .withStage(Stage.syncBookmarks, stage);
 
     data.startHTTPServer(server);
     WaitHelper.getTestWaiter().performWait(WaitHelper.onThreadRunnable(new Runnable() {
       @Override
       public void run() {
         try {
-          session.start();
+          session.start(SYNC_DEADLINE);
         } catch (Exception e) {
           final AssertionFailedError error = new AssertionFailedError();
           error.initCause(e);
           WaitHelper.getTestWaiter().performNotify(error);
         }
       }
     }));
     data.stopHTTPServer();
@@ -344,17 +348,17 @@ public class TestGlobalSession {
     MetaGlobal mg = session.generateNewMetaGlobal();
     assertEquals(Long.valueOf(GlobalSession.STORAGE_VERSION), mg.getStorageVersion());
     assertEquals(VersionConstants.BOOKMARKS_ENGINE_VERSION, mg.getEngines().getObject("bookmarks").getIntegerSafely("version").intValue());
     assertEquals(VersionConstants.CLIENTS_ENGINE_VERSION, mg.getEngines().getObject("clients").getIntegerSafely("version").intValue());
 
     List<String> namesList = new ArrayList<String>(mg.getEnabledEngineNames());
     Collections.sort(namesList);
     String[] names = namesList.toArray(new String[namesList.size()]);
-    String[] expected = new String[] { "bookmarks", "clients", "forms", "history", "passwords", "tabs" };
+    String[] expected = new String[] { "bookmarks", "clients", "forms", "history", "passwords", "recentHistory", "tabs" };
     assertArrayEquals(expected, names);
   }
 
   @Test
   public void testGenerateNewMetaGlobalSomePersisted() throws Exception {
     final MockGlobalSessionCallback callback = new MockGlobalSessionCallback();
     final GlobalSession session = MockPrefsGlobalSession.getSession(TEST_USERNAME, TEST_PASSWORD,
         new KeyBundle(TEST_USERNAME, TEST_SYNC_KEY), callback, null, null);
rename from mobile/android/tests/background/junit4/src/org/mozilla/android/sync/net/test/TestServer11Repository.java
rename to mobile/android/tests/background/junit4/src/org/mozilla/android/sync/net/test/TestServer15Repository.java
--- a/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/net/test/TestServer11Repository.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/net/test/TestServer15Repository.java
@@ -1,48 +1,44 @@
 /* Any copyright is dedicated to the Public Domain.
    http://creativecommons.org/publicdomain/zero/1.0/ */
 
 package org.mozilla.android.sync.net.test;
 
+import android.os.SystemClock;
+
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mozilla.gecko.background.testhelpers.TestRunner;
 import org.mozilla.gecko.sync.InfoCollections;
 import org.mozilla.gecko.sync.InfoConfiguration;
-import org.mozilla.gecko.sync.repositories.Server11Repository;
+import org.mozilla.gecko.sync.repositories.NonPersistentRepositoryStateProvider;
+import org.mozilla.gecko.sync.repositories.RepositoryStateProvider;
+import org.mozilla.gecko.sync.repositories.Server15Repository;
 
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.concurrent.TimeUnit;
 
 @RunWith(TestRunner.class)
-public class TestServer11Repository {
+public class TestServer15Repository {
 
   private static final String COLLECTION = "bookmarks";
-  private static final String COLLECTION_URL = "http://foo.com/1.1/n6ec3u5bee3tixzp2asys7bs6fve4jfw/storage";
+  private static final String COLLECTION_URL = "http://foo.com/1.5/n6ec3u5bee3tixzp2asys7bs6fve4jfw/storage";
+  private static final long SYNC_DEADLINE = SystemClock.elapsedRealtime() + TimeUnit.MINUTES.toMillis(30);
 
   protected final InfoCollections infoCollections = new InfoCollections();
   protected final InfoConfiguration infoConfiguration = new InfoConfiguration();
+  protected final RepositoryStateProvider stateProvider = new NonPersistentRepositoryStateProvider();
 
   public static void assertQueryEquals(String expected, URI u) {
     Assert.assertEquals(expected, u.getRawQuery());
   }
 
-  @SuppressWarnings("static-method")
-  @Test
-  public void testCollectionURIFull() throws URISyntaxException {
-    Server11Repository r = new Server11Repository(COLLECTION, COLLECTION_URL, null, infoCollections, infoConfiguration);
-    assertQueryEquals("full=1&newer=5000.000",              r.collectionURI(true,  5000000L, -1,    null, null, null));
-    assertQueryEquals("newer=1230.000",                     r.collectionURI(false, 1230000L, -1,    null, null, null));
-    assertQueryEquals("newer=5000.000&limit=10",            r.collectionURI(false, 5000000L, 10,    null, null, null));
-    assertQueryEquals("full=1&newer=5000.000&sort=index",   r.collectionURI(true,  5000000L,  0, "index", null, null));
-    assertQueryEquals("full=1&ids=123,abc",                 r.collectionURI(true,       -1L, -1,    null, "123,abc", null));
-  }
-
   @Test
   public void testCollectionURI() throws URISyntaxException {
-    Server11Repository noTrailingSlash = new Server11Repository(COLLECTION, COLLECTION_URL, null, infoCollections, infoConfiguration);
-    Server11Repository trailingSlash = new Server11Repository(COLLECTION, COLLECTION_URL + "/", null, infoCollections, infoConfiguration);
-    Assert.assertEquals("http://foo.com/1.1/n6ec3u5bee3tixzp2asys7bs6fve4jfw/storage/bookmarks", noTrailingSlash.collectionURI().toASCIIString());
-    Assert.assertEquals("http://foo.com/1.1/n6ec3u5bee3tixzp2asys7bs6fve4jfw/storage/bookmarks", trailingSlash.collectionURI().toASCIIString());
+    Server15Repository noTrailingSlash = new Server15Repository(COLLECTION, SYNC_DEADLINE, COLLECTION_URL, null, infoCollections, infoConfiguration, stateProvider);
+    Server15Repository trailingSlash = new Server15Repository(COLLECTION, SYNC_DEADLINE, COLLECTION_URL + "/", null, infoCollections, infoConfiguration, stateProvider);
+    Assert.assertEquals("http://foo.com/1.5/n6ec3u5bee3tixzp2asys7bs6fve4jfw/storage/bookmarks", noTrailingSlash.collectionURI().toASCIIString());
+    Assert.assertEquals("http://foo.com/1.5/n6ec3u5bee3tixzp2asys7bs6fve4jfw/storage/bookmarks", trailingSlash.collectionURI().toASCIIString());
   }
 }
--- a/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/SynchronizerHelpers.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/SynchronizerHelpers.java
@@ -1,16 +1,18 @@
 /* Any copyright is dedicated to the Public Domain.
    http://creativecommons.org/publicdomain/zero/1.0/ */
 
 package org.mozilla.android.sync.test;
 
 import android.content.Context;
 import org.mozilla.gecko.background.common.log.Logger;
 import org.mozilla.gecko.background.testhelpers.WBORepository;
+import org.mozilla.gecko.sync.CollectionConcurrentModificationException;
+import org.mozilla.gecko.sync.SyncDeadlineReachedException;
 import org.mozilla.gecko.sync.repositories.FetchFailedException;
 import org.mozilla.gecko.sync.repositories.InactiveSessionException;
 import org.mozilla.gecko.sync.repositories.InvalidSessionTransitionException;
 import org.mozilla.gecko.sync.repositories.NoStoreDelegateException;
 import org.mozilla.gecko.sync.repositories.StoreFailedException;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionBeginDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionCreationDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionFetchRecordsDelegate;
@@ -18,72 +20,116 @@ import org.mozilla.gecko.sync.repositori
 import org.mozilla.gecko.sync.repositories.domain.Record;
 
 import java.util.ArrayList;
 import java.util.concurrent.ExecutorService;
 
 public class SynchronizerHelpers {
   public static final String FAIL_SENTINEL = "Fail";
 
+  enum FailMode {
+    COLLECTION_MODIFIED,
+    DEADLINE_REACHED,
+    FETCH,
+    STORE
+  }
+
+  private static Exception getFailException(FailMode failMode) {
+    switch (failMode) {
+      case COLLECTION_MODIFIED:
+        return new CollectionConcurrentModificationException();
+      case DEADLINE_REACHED:
+        return new SyncDeadlineReachedException();
+      case FETCH:
+        return new FetchFailedException();
+      case STORE:
+        return new StoreFailedException();
+      default:
+        throw new IllegalStateException();
+    }
+  }
+
   /**
    * Store one at a time, failing if the guid contains FAIL_SENTINEL.
    */
   public static class FailFetchWBORepository extends WBORepository {
+    private final FailMode failMode;
+
+    public FailFetchWBORepository(FailMode failMode) {
+      this.failMode = failMode;
+    }
+
     @Override
     public void createSession(RepositorySessionCreationDelegate delegate,
                               Context context) {
       delegate.deferredCreationDelegate().onSessionCreated(new WBORepositorySession(this) {
         @Override
         public void fetchSince(long timestamp,
                                final RepositorySessionFetchRecordsDelegate delegate) {
           super.fetchSince(timestamp, new RepositorySessionFetchRecordsDelegate() {
             @Override
             public void onFetchedRecord(Record record) {
               if (record.guid.contains(FAIL_SENTINEL)) {
-                delegate.onFetchFailed(new FetchFailedException(), record);
+                delegate.onFetchFailed(getFailException(failMode));
               } else {
                 delegate.onFetchedRecord(record);
               }
             }
 
             @Override
-            public void onFetchFailed(Exception ex, Record record) {
-              delegate.onFetchFailed(ex, record);
+            public void onFetchFailed(Exception ex) {
+              delegate.onFetchFailed(ex);
             }
 
             @Override
             public void onFetchCompleted(long fetchEnd) {
               delegate.onFetchCompleted(fetchEnd);
             }
 
             @Override
+            public void onBatchCompleted() {
+
+            }
+
+            @Override
             public RepositorySessionFetchRecordsDelegate deferredFetchDelegate(ExecutorService executor) {
               return this;
             }
           });
         }
       });
     }
   }
 
   /**
    * Store one at a time, failing if the guid contains FAIL_SENTINEL.
    */
   public static class SerialFailStoreWBORepository extends WBORepository {
+    private final FailMode failMode;
+
+    public SerialFailStoreWBORepository(FailMode failMode) {
+      this.failMode = failMode;
+    }
+
     @Override
     public void createSession(RepositorySessionCreationDelegate delegate,
                               Context context) {
       delegate.deferredCreationDelegate().onSessionCreated(new WBORepositorySession(this) {
         @Override
         public void store(final Record record) throws NoStoreDelegateException {
-          if (delegate == null) {
+          if (storeDelegate == null) {
             throw new NoStoreDelegateException();
           }
           if (record.guid.contains(FAIL_SENTINEL)) {
-            delegate.onRecordStoreFailed(new StoreFailedException(), record.guid);
+            Exception ex = getFailException(failMode);
+            if (ex instanceof CollectionConcurrentModificationException) {
+              storeDelegate.onStoreFailed(ex);
+            } else {
+              storeDelegate.onRecordStoreFailed(ex, record.guid);
+            }
           } else {
             super.store(record);
           }
         }
       });
     }
   }
 
@@ -103,17 +149,17 @@ public class SynchronizerHelpers {
       }
 
       public void superStore(final Record record) throws NoStoreDelegateException {
         super.store(record);
       }
 
       @Override
       public void store(final Record record) throws NoStoreDelegateException {
-        if (delegate == null) {
+        if (storeDelegate == null) {
           throw new NoStoreDelegateException();
         }
         synchronized (batch) {
           batch.add(record);
           if (record.guid.contains("Fail")) {
             batchShouldFail = true;
           }
 
@@ -129,22 +175,22 @@ public class SynchronizerHelpers {
         batchShouldFail = false;
         batch.clear();
         storeWorkQueue.execute(new Runnable() {
           @Override
           public void run() {
             Logger.trace("XXX", "Notifying about batch.  Failure? " + thisBatchShouldFail);
             for (Record batchRecord : thisBatch) {
               if (thisBatchShouldFail) {
-                delegate.onRecordStoreFailed(new StoreFailedException(), batchRecord.guid);
+                storeDelegate.onRecordStoreFailed(new StoreFailedException(), batchRecord.guid);
               } else {
                 try {
                   superStore(batchRecord);
                 } catch (NoStoreDelegateException e) {
-                  delegate.onRecordStoreFailed(e, batchRecord.guid);
+                  storeDelegate.onRecordStoreFailed(e, batchRecord.guid);
                 }
               }
             }
           }
         });
       }
 
       @Override
--- a/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestRecordsChannel.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestRecordsChannel.java
@@ -1,108 +1,101 @@
 /* Any copyright is dedicated to the Public Domain.
    http://creativecommons.org/publicdomain/zero/1.0/ */
 
 package org.mozilla.android.sync.test;
 
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mozilla.android.sync.test.SynchronizerHelpers.FailFetchWBORepository;
 import org.mozilla.android.sync.test.helpers.ExpectSuccessRepositorySessionCreationDelegate;
 import org.mozilla.android.sync.test.helpers.ExpectSuccessRepositorySessionFinishDelegate;
 import org.mozilla.gecko.background.testhelpers.TestRunner;
 import org.mozilla.gecko.background.testhelpers.WBORepository;
 import org.mozilla.gecko.background.testhelpers.WaitHelper;
+import org.mozilla.gecko.sync.CollectionConcurrentModificationException;
+import org.mozilla.gecko.sync.SyncDeadlineReachedException;
 import org.mozilla.gecko.sync.repositories.InactiveSessionException;
 import org.mozilla.gecko.sync.repositories.InvalidSessionTransitionException;
-import org.mozilla.gecko.sync.repositories.Repository;
 import org.mozilla.gecko.sync.repositories.RepositorySession;
 import org.mozilla.gecko.sync.repositories.RepositorySessionBundle;
 import org.mozilla.gecko.sync.repositories.domain.BookmarkRecord;
 import org.mozilla.gecko.sync.synchronizer.RecordsChannel;
 import org.mozilla.gecko.sync.synchronizer.RecordsChannelDelegate;
 
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 @RunWith(TestRunner.class)
 public class TestRecordsChannel {
 
-  protected WBORepository remote;
-  protected WBORepository local;
+  private WBORepository sourceRepository;
+  private RepositorySession sourceSession;
+  private WBORepository sinkRepository;
+  private RepositorySession sinkSession;
 
-  protected RepositorySession source;
-  protected RepositorySession sink;
-  protected RecordsChannelDelegate rcDelegate;
-
-  protected AtomicInteger numFlowFetchFailed;
-  protected AtomicInteger numFlowStoreFailed;
-  protected AtomicInteger numFlowCompleted;
-  protected AtomicBoolean flowBeginFailed;
-  protected AtomicBoolean flowFinishFailed;
+  private RecordsChannelDelegate rcDelegate;
 
-  public void doFlow(final Repository remote, final Repository local) throws Exception {
-    WaitHelper.getTestWaiter().performWait(new Runnable() {
-      @Override
-      public void run() {
-        remote.createSession(new ExpectSuccessRepositorySessionCreationDelegate(WaitHelper.getTestWaiter()) {
-          @Override
-          public void onSessionCreated(RepositorySession session) {
-            source = session;
-            local.createSession(new ExpectSuccessRepositorySessionCreationDelegate(WaitHelper.getTestWaiter()) {
-              @Override
-              public void onSessionCreated(RepositorySession session) {
-                sink = session;
-                WaitHelper.getTestWaiter().performNotify();
-              }
-            }, null);
-          }
-        }, null);
-      }
-    });
+  private AtomicInteger numFlowFetchFailed;
+  private AtomicInteger numFlowStoreFailed;
+  private AtomicInteger numFlowCompleted;
+  private AtomicBoolean flowBeginFailed;
+  private AtomicBoolean flowFinishFailed;
 
-    assertNotNull(source);
-    assertNotNull(sink);
+  private volatile RecordsChannel recordsChannel;
+  private volatile Exception fetchException;
+  private volatile Exception storeException;
 
+  @Before
+  public void setUp() throws Exception {
     numFlowFetchFailed = new AtomicInteger(0);
     numFlowStoreFailed = new AtomicInteger(0);
     numFlowCompleted = new AtomicInteger(0);
     flowBeginFailed = new AtomicBoolean(false);
     flowFinishFailed = new AtomicBoolean(false);
 
+    // Repositories and sessions will be set/created by tests.
+    sourceRepository = null;
+    sourceSession = null;
+    sinkRepository = null;
+    sinkSession = null;
+
     rcDelegate = new RecordsChannelDelegate() {
       @Override
       public void onFlowFetchFailed(RecordsChannel recordsChannel, Exception ex) {
         numFlowFetchFailed.incrementAndGet();
+        fetchException = ex;
       }
 
       @Override
       public void onFlowStoreFailed(RecordsChannel recordsChannel, Exception ex, String recordGuid) {
         numFlowStoreFailed.incrementAndGet();
+        storeException = ex;
       }
 
       @Override
       public void onFlowFinishFailed(RecordsChannel recordsChannel, Exception ex) {
         flowFinishFailed.set(true);
         WaitHelper.getTestWaiter().performNotify();
       }
 
       @Override
       public void onFlowCompleted(RecordsChannel recordsChannel, long fetchEnd, long storeEnd) {
         numFlowCompleted.incrementAndGet();
         try {
-          sink.finish(new ExpectSuccessRepositorySessionFinishDelegate(WaitHelper.getTestWaiter()) {
+          sinkSession.finish(new ExpectSuccessRepositorySessionFinishDelegate(WaitHelper.getTestWaiter()) {
             @Override
             public void onFinishSucceeded(RepositorySession session, RepositorySessionBundle bundle) {
               try {
-                source.finish(new ExpectSuccessRepositorySessionFinishDelegate(WaitHelper.getTestWaiter()) {
+                sourceSession.finish(new ExpectSuccessRepositorySessionFinishDelegate(WaitHelper.getTestWaiter()) {
                   @Override
                   public void onFinishSucceeded(RepositorySession session, RepositorySessionBundle bundle) {
                     performNotify();
                   }
                 });
               } catch (InactiveSessionException e) {
                 WaitHelper.getTestWaiter().performNotify(e);
               }
@@ -114,116 +107,230 @@ public class TestRecordsChannel {
       }
 
       @Override
       public void onFlowBeginFailed(RecordsChannel recordsChannel, Exception ex) {
         flowBeginFailed.set(true);
         WaitHelper.getTestWaiter().performNotify();
       }
     };
+  }
 
-    final RecordsChannel rc = new RecordsChannel(source,  sink, rcDelegate);
+  private void createSessions() {
+    WaitHelper.getTestWaiter().performWait(new Runnable() {
+      @Override
+      public void run() {
+        sourceRepository.createSession(new ExpectSuccessRepositorySessionCreationDelegate(WaitHelper.getTestWaiter()) {
+          @Override
+          public void onSessionCreated(RepositorySession session) {
+            sourceSession = session;
+            sinkRepository.createSession(new ExpectSuccessRepositorySessionCreationDelegate(WaitHelper.getTestWaiter()) {
+              @Override
+              public void onSessionCreated(RepositorySession session) {
+                sinkSession = session;
+                WaitHelper.getTestWaiter().performNotify();
+              }
+            }, null);
+          }
+        }, null);
+      }
+    });
+  }
+
+  public void doFlow() throws Exception {
+    createSessions();
+    assertNotNull(sourceSession);
+    assertNotNull(sinkSession);
+    recordsChannel = new RecordsChannel(sourceSession,  sinkSession, rcDelegate);
     WaitHelper.getTestWaiter().performWait(new Runnable() {
       @Override
       public void run() {
         try {
-          rc.beginAndFlow();
+          recordsChannel.beginAndFlow();
         } catch (InvalidSessionTransitionException e) {
           WaitHelper.getTestWaiter().performNotify(e);
         }
       }
     });
   }
 
+  // NB: records in WBORepository are stored in a HashMap, so don't assume an order.
   public static final BookmarkRecord[] inbounds = new BookmarkRecord[] {
     new BookmarkRecord("inboundSucc1", "bookmarks", 1, false),
     new BookmarkRecord("inboundSucc2", "bookmarks", 1, false),
     new BookmarkRecord("inboundFail1", "bookmarks", 1, false),
     new BookmarkRecord("inboundSucc3", "bookmarks", 1, false),
     new BookmarkRecord("inboundSucc4", "bookmarks", 1, false),
     new BookmarkRecord("inboundFail2", "bookmarks", 1, false),
   };
   public static final BookmarkRecord[] outbounds = new BookmarkRecord[] {
       new BookmarkRecord("outboundSucc1", "bookmarks", 1, false),
       new BookmarkRecord("outboundSucc2", "bookmarks", 1, false),
       new BookmarkRecord("outboundSucc3", "bookmarks", 1, false),
+      new BookmarkRecord("outboundFail6", "bookmarks", 1, false),
       new BookmarkRecord("outboundSucc4", "bookmarks", 1, false),
       new BookmarkRecord("outboundSucc5", "bookmarks", 1, false),
-      new BookmarkRecord("outboundFail6", "bookmarks", 1, false),
   };
 
   protected WBORepository empty() {
     WBORepository repo = new SynchronizerHelpers.TrackingWBORepository();
     return repo;
   }
 
   protected WBORepository full() {
     WBORepository repo = new SynchronizerHelpers.TrackingWBORepository();
     for (BookmarkRecord outbound : outbounds) {
       repo.wbos.put(outbound.guid, outbound);
     }
     return repo;
   }
 
-  protected WBORepository failingFetch() {
-    WBORepository repo = new FailFetchWBORepository();
+  protected WBORepository failingFetch(SynchronizerHelpers.FailMode failMode) {
+    WBORepository repo = new FailFetchWBORepository(failMode);
+
     for (BookmarkRecord outbound : outbounds) {
       repo.wbos.put(outbound.guid, outbound);
     }
     return repo;
   }
 
   @Test
   public void testSuccess() throws Exception {
-    WBORepository source = full();
-    WBORepository sink = empty();
-    doFlow(source, sink);
+    sourceRepository = full();
+    sinkRepository = empty();
+    doFlow();
     assertEquals(1, numFlowCompleted.get());
     assertEquals(0, numFlowFetchFailed.get());
     assertEquals(0, numFlowStoreFailed.get());
-    assertEquals(source.wbos, sink.wbos);
+    assertEquals(sourceRepository.wbos, sinkRepository.wbos);
+    assertEquals(0, recordsChannel.getFetchFailureCount());
+    assertEquals(0, recordsChannel.getStoreFailureCount());
+    assertEquals(6, recordsChannel.getStoreCount());
   }
 
   @Test
   public void testFetchFail() throws Exception {
-    WBORepository source = failingFetch();
-    WBORepository sink = empty();
-    doFlow(source, sink);
+    sourceRepository = failingFetch(SynchronizerHelpers.FailMode.FETCH);
+    sinkRepository = empty();
+    doFlow();
+    assertEquals(1, numFlowCompleted.get());
+    assertTrue(numFlowFetchFailed.get() > 0);
+    assertEquals(0, numFlowStoreFailed.get());
+    assertTrue(sinkRepository.wbos.size() < 6);
+    assertTrue(recordsChannel.getFetchFailureCount() > 0);
+    assertEquals(0, recordsChannel.getStoreFailureCount());
+    assertTrue(recordsChannel.getStoreCount() < 6);
+  }
+
+  @Test
+  public void testStoreFetchFailedCollectionModified() throws Exception {
+    sourceRepository = failingFetch(SynchronizerHelpers.FailMode.COLLECTION_MODIFIED);
+    sinkRepository = empty();
+    doFlow();
     assertEquals(1, numFlowCompleted.get());
     assertTrue(numFlowFetchFailed.get() > 0);
     assertEquals(0, numFlowStoreFailed.get());
-    assertTrue(sink.wbos.size() < 6);
+    assertTrue(sinkRepository.wbos.size() < 6);
+
+    assertTrue(recordsChannel.getFetchFailureCount() > 0);
+    assertEquals(0, recordsChannel.getStoreFailureCount());
+    assertTrue(recordsChannel.getStoreCount() < sourceRepository.wbos.size());
+
+    assertEquals(CollectionConcurrentModificationException.class, fetchException.getClass());
+    final Exception ex = recordsChannel.getReflowException();
+    assertNotNull(ex);
+    assertEquals(CollectionConcurrentModificationException.class, ex.getClass());
+  }
+
+  @Test
+  public void testStoreFetchFailedDeadline() throws Exception {
+    sourceRepository = failingFetch(SynchronizerHelpers.FailMode.DEADLINE_REACHED);
+    sinkRepository = empty();
+    doFlow();
+    assertEquals(1, numFlowCompleted.get());
+    assertTrue(numFlowFetchFailed.get() > 0);
+    assertEquals(0, numFlowStoreFailed.get());
+    assertTrue(sinkRepository.wbos.size() < 6);
+
+    assertTrue(recordsChannel.getFetchFailureCount() > 0);
+    assertEquals(0, recordsChannel.getStoreFailureCount());
+    assertTrue(recordsChannel.getStoreCount() < sourceRepository.wbos.size());
+
+    assertEquals(SyncDeadlineReachedException.class, fetchException.getClass());
+    final Exception ex = recordsChannel.getReflowException();
+    assertNotNull(ex);
+    assertEquals(SyncDeadlineReachedException.class, ex.getClass());
   }
 
   @Test
   public void testStoreSerialFail() throws Exception {
-    WBORepository source = full();
-    WBORepository sink = new SynchronizerHelpers.SerialFailStoreWBORepository();
-    doFlow(source, sink);
+    sourceRepository = full();
+    sinkRepository = new SynchronizerHelpers.SerialFailStoreWBORepository(
+            SynchronizerHelpers.FailMode.STORE);
+    doFlow();
     assertEquals(1, numFlowCompleted.get());
     assertEquals(0, numFlowFetchFailed.get());
     assertEquals(1, numFlowStoreFailed.get());
-    assertEquals(5, sink.wbos.size());
+    // We will fail to store one of the records but expect flow to continue.
+    assertEquals(5, sinkRepository.wbos.size());
+
+    assertEquals(0, recordsChannel.getFetchFailureCount());
+    assertEquals(1, recordsChannel.getStoreFailureCount());
+    // Number of store attempts.
+    assertEquals(sourceRepository.wbos.size(), recordsChannel.getStoreCount());
+  }
+
+  @Test
+  public void testStoreSerialFailCollectionModified() throws Exception {
+    sourceRepository = full();
+    sinkRepository = new SynchronizerHelpers.SerialFailStoreWBORepository(
+            SynchronizerHelpers.FailMode.COLLECTION_MODIFIED);
+    doFlow();
+    assertEquals(1, numFlowCompleted.get());
+    assertEquals(0, numFlowFetchFailed.get());
+    assertEquals(1, numFlowStoreFailed.get());
+    // One of the records will fail, at which point we'll stop flowing them.
+    final int sunkenRecords = sinkRepository.wbos.size();
+    assertTrue(sunkenRecords > 0 && sunkenRecords < 6);
+
+    assertEquals(0, recordsChannel.getFetchFailureCount());
+    // RecordChannel's storeFail count is only incremented for failures of individual records.
+    assertEquals(0, recordsChannel.getStoreFailureCount());
+
+    assertEquals(CollectionConcurrentModificationException.class, storeException.getClass());
+    final Exception ex = recordsChannel.getReflowException();
+    assertNotNull(ex);
+    assertEquals(CollectionConcurrentModificationException.class, ex.getClass());
   }
 
   @Test
   public void testStoreBatchesFail() throws Exception {
-    WBORepository source = full();
-    WBORepository sink = new SynchronizerHelpers.BatchFailStoreWBORepository(3);
-    doFlow(source, sink);
+    sourceRepository = full();
+    sinkRepository = new SynchronizerHelpers.BatchFailStoreWBORepository(3);
+    doFlow();
     assertEquals(1, numFlowCompleted.get());
     assertEquals(0, numFlowFetchFailed.get());
     assertEquals(3, numFlowStoreFailed.get()); // One batch fails.
-    assertEquals(3, sink.wbos.size()); // One batch succeeds.
+    assertEquals(3, sinkRepository.wbos.size()); // One batch succeeds.
+
+    assertEquals(0, recordsChannel.getFetchFailureCount());
+    assertEquals(3, recordsChannel.getStoreFailureCount());
+    // Number of store attempts.
+    assertEquals(sourceRepository.wbos.size(), recordsChannel.getStoreCount());
   }
 
 
   @Test
   public void testStoreOneBigBatchFail() throws Exception {
-    WBORepository source = full();
-    WBORepository sink = new SynchronizerHelpers.BatchFailStoreWBORepository(50);
-    doFlow(source, sink);
+    sourceRepository = full();
+    sinkRepository = new SynchronizerHelpers.BatchFailStoreWBORepository(50);
+    doFlow();
     assertEquals(1, numFlowCompleted.get());
     assertEquals(0, numFlowFetchFailed.get());
     assertEquals(6, numFlowStoreFailed.get()); // One (big) batch fails.
-    assertEquals(0, sink.wbos.size()); // No batches succeed.
+    assertEquals(0, sinkRepository.wbos.size()); // No batches succeed.
+
+    assertEquals(0, recordsChannel.getFetchFailureCount());
+    assertEquals(6, recordsChannel.getStoreFailureCount());
+    // Number of store attempts.
+    assertEquals(sourceRepository.wbos.size(), recordsChannel.getStoreCount());
   }
 }
--- a/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestResetCommands.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestResetCommands.java
@@ -112,17 +112,17 @@ public class TestResetCommands {
     final MockServerSyncStage stageNotReset = new MockServerSyncStage() {
       @Override
       public void resetLocal() {
         no.called = true;
       }
     };
 
     stagesToRun.put(Stage.syncBookmarks, stageGetsReset);
-    stagesToRun.put(Stage.syncHistory,   stageNotReset);
+    stagesToRun.put(Stage.syncFullHistory, stageNotReset);
 
     final String resetBookmarks = "{\"args\":[\"bookmarks\"],\"command\":\"resetEngine\"}";
     ExtendedJSONObject unparsedCommand = new ExtendedJSONObject(resetBookmarks);
     CommandProcessor processor = CommandProcessor.getProcessor();
     processor.processCommand(session, unparsedCommand);
 
     assertTrue(yes.called);
     assertFalse(no.called);
rename from mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestServer11RepositorySession.java
rename to mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestServer15RepositorySession.java
--- a/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestServer11RepositorySession.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestServer15RepositorySession.java
@@ -1,54 +1,53 @@
 /* Any copyright is dedicated to the Public Domain.
    http://creativecommons.org/publicdomain/zero/1.0/ */
 
 package org.mozilla.android.sync.test;
 
+import android.os.SystemClock;
+
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mozilla.android.sync.test.SynchronizerHelpers.TrackingWBORepository;
 import org.mozilla.android.sync.test.helpers.BaseTestStorageRequestDelegate;
 import org.mozilla.android.sync.test.helpers.HTTPServerTestHelper;
 import org.mozilla.android.sync.test.helpers.MockServer;
 import org.mozilla.gecko.background.testhelpers.TestRunner;
-import org.mozilla.gecko.background.testhelpers.WaitHelper;
+import org.mozilla.gecko.sync.CollectionConcurrentModificationException;
 import org.mozilla.gecko.sync.InfoCollections;
 import org.mozilla.gecko.sync.InfoConfiguration;
-import org.mozilla.gecko.sync.JSONRecordFetcher;
 import org.mozilla.gecko.sync.Utils;
 import org.mozilla.gecko.sync.crypto.KeyBundle;
 import org.mozilla.gecko.sync.middleware.Crypto5MiddlewareRepository;
 import org.mozilla.gecko.sync.net.AuthHeaderProvider;
 import org.mozilla.gecko.sync.net.BaseResource;
 import org.mozilla.gecko.sync.net.BasicAuthHeaderProvider;
 import org.mozilla.gecko.sync.net.SyncStorageResponse;
 import org.mozilla.gecko.sync.repositories.FetchFailedException;
-import org.mozilla.gecko.sync.repositories.RepositorySession;
-import org.mozilla.gecko.sync.repositories.Server11Repository;
+import org.mozilla.gecko.sync.repositories.NonPersistentRepositoryStateProvider;
+import org.mozilla.gecko.sync.repositories.Server15Repository;
 import org.mozilla.gecko.sync.repositories.StoreFailedException;
-import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionCreationDelegate;
 import org.mozilla.gecko.sync.repositories.domain.BookmarkRecord;
 import org.mozilla.gecko.sync.repositories.domain.BookmarkRecordFactory;
-import org.mozilla.gecko.sync.stage.SafeConstrainedServer11Repository;
 import org.mozilla.gecko.sync.synchronizer.ServerLocalSynchronizer;
 import org.mozilla.gecko.sync.synchronizer.Synchronizer;
 import org.simpleframework.http.ContentType;
 import org.simpleframework.http.Request;
 import org.simpleframework.http.Response;
 
 import java.io.IOException;
-import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.TimeUnit;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 @RunWith(TestRunner.class)
-public class TestServer11RepositorySession {
+public class TestServer15RepositorySession {
 
   public class POSTMockServer extends MockServer {
     @Override
     public void handle(Request request, Response response) {
       try {
         String content = request.getContent();
         System.out.println("Content:" + content);
       } catch (IOException e) {
@@ -58,36 +57,31 @@ public class TestServer11RepositorySessi
       System.out.println("Content-Type:" + contentType);
       super.handle(request, response, 200, "{success:[]}");
     }
   }
 
   private static final int    TEST_PORT   = HTTPServerTestHelper.getTestPort();
   private static final String TEST_SERVER = "http://localhost:" + TEST_PORT + "/";
   static final String LOCAL_BASE_URL      = TEST_SERVER + "1.1/n6ec3u5bee3tixzp2asys7bs6fve4jfw/";
-  static final String LOCAL_INFO_BASE_URL = LOCAL_BASE_URL + "info/";
-  static final String LOCAL_COUNTS_URL    = LOCAL_INFO_BASE_URL + "collection_counts";
 
   // Corresponds to rnewman+atest1@mozilla.com, local.
   static final String TEST_USERNAME          = "n6ec3u5bee3tixzp2asys7bs6fve4jfw";
   static final String TEST_PASSWORD          = "passowrd";
   static final String SYNC_KEY          = "eh7ppnb82iwr5kt3z3uyi5vr44";
 
   public final AuthHeaderProvider authHeaderProvider = new BasicAuthHeaderProvider(TEST_USERNAME, TEST_PASSWORD);
   protected final InfoCollections infoCollections = new InfoCollections() {
     @Override
     public Long getTimestamp(String collection) {
       return 0L;
     }
   };
   protected final InfoConfiguration infoConfiguration = new InfoConfiguration();
 
-  // Few-second timeout so that our longer operations don't time out and cause spurious error-handling results.
-  private static final int SHORT_TIMEOUT = 10000;
-
   public AuthHeaderProvider getAuthHeaderProvider() {
     return new BasicAuthHeaderProvider(TEST_USERNAME, TEST_PASSWORD);
   }
 
   private HTTPServerTestHelper data     = new HTTPServerTestHelper();
 
   public class TestSyncStorageRequestDelegate extends
   BaseTestStorageRequestDelegate {
@@ -113,17 +107,20 @@ public class TestServer11RepositorySessi
     }
     return local;
   }
 
   protected Exception doSynchronize(MockServer server) throws Exception {
     final String COLLECTION = "test";
 
     final TrackingWBORepository local = getLocal(100);
-    final Server11Repository remote = new Server11Repository(COLLECTION, getCollectionURL(COLLECTION), authHeaderProvider, infoCollections, infoConfiguration);
+    final Server15Repository remote = new Server15Repository(
+            COLLECTION, SystemClock.elapsedRealtime() + TimeUnit.MINUTES.toMillis(30),
+            getCollectionURL(COLLECTION), authHeaderProvider, infoCollections, infoConfiguration,
+            new NonPersistentRepositoryStateProvider());
     KeyBundle collectionKey = new KeyBundle(TEST_USERNAME, SYNC_KEY);
     Crypto5MiddlewareRepository cryptoRepo = new Crypto5MiddlewareRepository(remote, collectionKey);
     cryptoRepo.recordFactory = new BookmarkRecordFactory();
 
     final Synchronizer synchronizer = new ServerLocalSynchronizer();
     synchronizer.repositoryA = cryptoRepo;
     synchronizer.repositoryB = local;
 
@@ -144,16 +141,24 @@ public class TestServer11RepositorySessi
   public void testFetchFailure() throws Exception {
     MockServer server = new MockServer(404, "error");
     Exception e = doSynchronize(server);
     assertNotNull(e);
     assertEquals(FetchFailedException.class, e.getClass());
   }
 
   @Test
+  public void testFetch412Failure() throws Exception {
+    MockServer server = new MockServer(412, "error");
+    Exception e = doSynchronize(server);
+    assertNotNull(e);
+    assertEquals(CollectionConcurrentModificationException.class, e.getClass());
+  }
+
+  @Test
   public void testStorePostSuccessWithFailingRecords() throws Exception {
     MockServer server = new MockServer(200, "{ modified: \" + " + Utils.millisecondsToDecimalSeconds(System.currentTimeMillis()) + ", " +
         "success: []," +
         "failed: { outboundFail2: [] } }");
     Exception e = doSynchronize(server);
     assertNotNull(e);
     assertEquals(StoreFailedException.class, e.getClass());
   }
@@ -170,67 +175,9 @@ public class TestServer11RepositorySessi
         return;
       }
     };
 
     Exception e = doSynchronize(server);
     assertNotNull(e);
     assertEquals(StoreFailedException.class, e.getClass());
   }
-
-  @Test
-  public void testConstraints() throws Exception {
-    MockServer server = new MockServer() {
-      @Override
-      public void handle(Request request, Response response) {
-        if (request.getMethod().equals("GET")) {
-          if (request.getPath().getPath().endsWith("/info/collection_counts")) {
-            this.handle(request, response, 200, "{\"bookmarks\": 5001}");
-          }
-        }
-        this.handle(request, response, 400, "NOOOO");
-      }
-    };
-    final JSONRecordFetcher countsFetcher = new JSONRecordFetcher(LOCAL_COUNTS_URL, getAuthHeaderProvider());
-    String collection = "bookmarks";
-    final SafeConstrainedServer11Repository remote = new SafeConstrainedServer11Repository(collection,
-        getCollectionURL(collection),
-        getAuthHeaderProvider(),
-        infoCollections,
-        infoConfiguration,
-        5000, 5000, "sortindex", countsFetcher);
-
-    data.startHTTPServer(server);
-    final AtomicBoolean out = new AtomicBoolean(false);
-
-    // Verify that shouldSkip returns true due to a fetch of too large counts,
-    // rather than due to a timeout failure waiting to fetch counts.
-    try {
-      WaitHelper.getTestWaiter().performWait(
-          SHORT_TIMEOUT,
-          new Runnable() {
-            @Override
-            public void run() {
-              remote.createSession(new RepositorySessionCreationDelegate() {
-                @Override
-                public void onSessionCreated(RepositorySession session) {
-                  out.set(session.shouldSkip());
-                  WaitHelper.getTestWaiter().performNotify();
-                }
-
-                @Override
-                public void onSessionCreateFailed(Exception ex) {
-                  WaitHelper.getTestWaiter().performNotify(ex);
-                }
-
-                @Override
-                public RepositorySessionCreationDelegate deferredCreationDelegate() {
-                  return this;
-                }
-              }, null);
-            }
-          });
-      assertTrue(out.get());
-    } finally {
-      data.stopHTTPServer();
-    }
-  }
 }
--- a/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestServerLocalSynchronizer.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestServerLocalSynchronizer.java
@@ -102,47 +102,47 @@ public class TestServerLocalSynchronizer
 
     assertEquals(12, local.wbos.size());
     assertEquals(12, remote.wbos.size());
   }
 
   @Test
   public void testLocalFetchErrors() {
     WBORepository remote = new TrackingWBORepository();
-    WBORepository local  = new FailFetchWBORepository();
+    WBORepository local  = new FailFetchWBORepository(SynchronizerHelpers.FailMode.FETCH);
 
     Synchronizer synchronizer = getSynchronizer(remote, local);
     Exception e = doSynchronize(synchronizer);
     assertNotNull(e);
     assertEquals(FetchFailedException.class, e.getClass());
 
     // Neither session gets finished successfully, so all records are dropped.
     assertEquals(6, local.wbos.size());
     assertEquals(6, remote.wbos.size());
   }
 
   @Test
   public void testRemoteFetchErrors() {
-    WBORepository remote = new FailFetchWBORepository();
+    WBORepository remote = new FailFetchWBORepository(SynchronizerHelpers.FailMode.FETCH);
     WBORepository local  = new TrackingWBORepository();
 
     Synchronizer synchronizer = getSynchronizer(remote, local);
     Exception e = doSynchronize(synchronizer);
     assertNotNull(e);
     assertEquals(FetchFailedException.class, e.getClass());
 
     // Neither session gets finished successfully, so all records are dropped.
     assertEquals(6, local.wbos.size());
     assertEquals(6, remote.wbos.size());
   }
 
   @Test
   public void testLocalSerialStoreErrorsAreIgnored() {
     WBORepository remote = new TrackingWBORepository();
-    WBORepository local  = new SerialFailStoreWBORepository();
+    WBORepository local  = new SerialFailStoreWBORepository(SynchronizerHelpers.FailMode.FETCH);
 
     Synchronizer synchronizer = getSynchronizer(remote, local);
     assertNull(doSynchronize(synchronizer));
 
     assertEquals(9,  local.wbos.size());
     assertEquals(12, remote.wbos.size());
   }
 
@@ -153,17 +153,17 @@ public class TestServerLocalSynchronizer
     Synchronizer synchronizer = getSynchronizer(new TrackingWBORepository(), new BatchFailStoreWBORepository(BATCH_SIZE));
 
     Exception e = doSynchronize(synchronizer);
     assertNull(e);
   }
 
   @Test
   public void testRemoteSerialStoreErrorsAreNotIgnored() throws Exception {
-    Synchronizer synchronizer = getSynchronizer(new SerialFailStoreWBORepository(), new TrackingWBORepository()); // Tracking so we don't send incoming records back.
+    Synchronizer synchronizer = getSynchronizer(new SerialFailStoreWBORepository(SynchronizerHelpers.FailMode.STORE), new TrackingWBORepository()); // Tracking so we don't send incoming records back.
 
     Exception e = doSynchronize(synchronizer);
     assertNotNull(e);
     assertEquals(StoreFailedException.class, e.getClass());
   }
 
   @Test
   public void testRemoteBatchStoreErrorsAreNotIgnoredManyBatches() throws Exception {
deleted file mode 100644
--- a/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestSyncConfiguration.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/* Any copyright is dedicated to the Public Domain.
-   http://creativecommons.org/publicdomain/zero/1.0/ */
-
-package org.mozilla.android.sync.test;
-
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mozilla.gecko.background.testhelpers.MockSharedPreferences;
-import org.mozilla.gecko.background.testhelpers.TestRunner;
-import org.mozilla.gecko.sync.Sync11Configuration;
-import org.mozilla.gecko.sync.SyncConfiguration;
-
-import java.net.URI;
-
-@RunWith(TestRunner.class)
-public class TestSyncConfiguration {
-  @Test
-  public void testURLs() throws Exception {
-    final MockSharedPreferences prefs = new MockSharedPreferences();
-
-    // N.B., the username isn't used in the cluster path.
-    SyncConfiguration fxaConfig = new SyncConfiguration("username", null, prefs);
-    fxaConfig.clusterURL = new URI("http://db1.oldsync.dev.lcip.org/1.1/174");
-    Assert.assertEquals("http://db1.oldsync.dev.lcip.org/1.1/174/info/collections", fxaConfig.infoCollectionsURL());
-    Assert.assertEquals("http://db1.oldsync.dev.lcip.org/1.1/174/info/collection_counts", fxaConfig.infoCollectionCountsURL());
-    Assert.assertEquals("http://db1.oldsync.dev.lcip.org/1.1/174/storage/meta/global", fxaConfig.metaURL());
-    Assert.assertEquals("http://db1.oldsync.dev.lcip.org/1.1/174/storage", fxaConfig.storageURL());
-    Assert.assertEquals("http://db1.oldsync.dev.lcip.org/1.1/174/storage/collection", fxaConfig.collectionURI("collection").toASCIIString());
-
-    SyncConfiguration oldConfig = new Sync11Configuration("username", null, prefs);
-    oldConfig.clusterURL = new URI("https://db.com/internal/");
-    Assert.assertEquals("https://db.com/internal/1.1/username/info/collections", oldConfig.infoCollectionsURL());
-    Assert.assertEquals("https://db.com/internal/1.1/username/info/collection_counts", oldConfig.infoCollectionCountsURL());
-    Assert.assertEquals("https://db.com/internal/1.1/username/storage/meta/global", oldConfig.metaURL());
-    Assert.assertEquals("https://db.com/internal/1.1/username/storage", oldConfig.storageURL());
-    Assert.assertEquals("https://db.com/internal/1.1/username/storage/collection", oldConfig.collectionURI("collection").toASCIIString());
-  }
-}
--- a/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/helpers/ExpectSuccessRepositorySessionFetchRecordsDelegate.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/helpers/ExpectSuccessRepositorySessionFetchRecordsDelegate.java
@@ -15,17 +15,17 @@ public class ExpectSuccessRepositorySess
     ExpectSuccessDelegate implements RepositorySessionFetchRecordsDelegate {
   public ArrayList<Record> fetchedRecords = new ArrayList<Record>();
 
   public ExpectSuccessRepositorySessionFetchRecordsDelegate(WaitHelper waitHelper) {
     super(waitHelper);
   }
 
   @Override
-  public void onFetchFailed(Exception ex, Record record) {
+  public void onFetchFailed(Exception ex) {
     log("Fetch failed.", ex);
     performNotify(new AssertionFailedError("onFetchFailed: fetch should not have failed."));
   }
 
   @Override
   public void onFetchedRecord(Record record) {
     fetchedRecords.add(record);
     log("Fetched record with guid '" + record.guid + "'.");
@@ -33,12 +33,17 @@ public class ExpectSuccessRepositorySess
 
   @Override
   public void onFetchCompleted(long end) {
     log("Fetch completed.");
     performNotify();
   }
 
   @Override
+  public void onBatchCompleted() {
+    log("Batch completed.");
+  }
+
+  @Override
   public RepositorySessionFetchRecordsDelegate deferredFetchDelegate(ExecutorService executor) {
     return this;
   }
 }
--- a/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/helpers/ExpectSuccessRepositorySessionStoreDelegate.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/helpers/ExpectSuccessRepositorySessionStoreDelegate.java
@@ -28,12 +28,18 @@ public class ExpectSuccessRepositorySess
   }
 
   @Override
   public void onStoreCompleted(long storeEnd) {
     log("Record store completed at " + storeEnd);
   }
 
   @Override
+  public void onStoreFailed(Exception e) {
+    log("Store failed.", e);
+    performNotify(new AssertionFailedError("onStoreFailed: store should not have failed."));
+  }
+
+  @Override
   public RepositorySessionStoreDelegate deferredStoreDelegate(ExecutorService executor) {
     return this;
   }
 }
--- a/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/helpers/MockGlobalSessionCallback.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/helpers/MockGlobalSessionCallback.java
@@ -52,16 +52,22 @@ public class MockGlobalSessionCallback i
   @Override
   public void handleError(GlobalSession globalSession, Exception ex) {
     this.calledError = true;
     this.calledErrorException = ex;
     this.testWaiter().performNotify();
   }
 
   @Override
+  public void handleIncompleteStage(Stage currentState,
+                                    GlobalSession globalSession) {
+
+  }
+
+  @Override
   public void handleStageCompleted(Stage currentState,
            GlobalSession globalSession) {
     stageCounter--;
   }
 
   @Override
   public void requestBackoff(long backoff) {
     this.calledRequestBackoff = true;
--- a/mobile/android/tests/background/junit4/src/org/mozilla/gecko/background/db/DelegatingTestContentProvider.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/background/db/DelegatingTestContentProvider.java
@@ -6,16 +6,18 @@ package org.mozilla.gecko.background.db;
 
 import android.content.ContentProvider;
 import android.content.ContentProviderOperation;
 import android.content.ContentProviderResult;
 import android.content.ContentValues;
 import android.content.OperationApplicationException;
 import android.database.Cursor;
 import android.net.Uri;
+import android.os.Bundle;
+import android.support.annotation.Nullable;
 
 import org.mozilla.gecko.db.BrowserContract;
 
 import java.util.ArrayList;
 
 /**
  * Wrap a ContentProvider, appending &test=1 to all queries.
  */
@@ -75,12 +77,18 @@ public class DelegatingTestContentProvid
         return mTargetProvider.applyBatch(operations);
     }
 
     @Override
     public int bulkInsert(Uri uri, ContentValues[] values) {
         return mTargetProvider.bulkInsert(appendTestParam(uri), values);
     }
 
+    @Nullable
+    @Override
+    public Bundle call(String method, String arg, Bundle extras) {
+        return mTargetProvider.call(method, arg, extras);
+    }
+
     public ContentProvider getTargetProvider() {
         return mTargetProvider;
     }
 }
--- a/mobile/android/tests/background/junit4/src/org/mozilla/gecko/background/testhelpers/DefaultGlobalSessionCallback.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/background/testhelpers/DefaultGlobalSessionCallback.java
@@ -40,12 +40,18 @@ public class DefaultGlobalSessionCallbac
   }
 
   @Override
   public void handleStageCompleted(Stage currentState,
                                    GlobalSession globalSession) {
   }
 
   @Override
+  public void handleIncompleteStage(Stage currentState,
+                                    GlobalSession globalSession) {
+
+  }
+
+  @Override
   public boolean shouldBackOffStorage() {
     return false;
   }
 }
--- a/mobile/android/tests/background/junit4/src/org/mozilla/gecko/background/testhelpers/WBORepository.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/background/testhelpers/WBORepository.java
@@ -120,50 +120,50 @@ public class WBORepository extends Repos
       }
       long fetchCompleted  = now();
       stats.fetchCompleted = fetchCompleted;
       delegate.deferredFetchDelegate(delegateExecutor).onFetchCompleted(fetchCompleted);
     }
 
     @Override
     public void store(final Record record) throws NoStoreDelegateException {
-      if (delegate == null) {
+      if (storeDelegate == null) {
         throw new NoStoreDelegateException();
       }
       final long now = now();
       if (stats.storeBegan < 0) {
         stats.storeBegan = now;
       }
       Record existing = wbos.get(record.guid);
       Logger.debug(LOG_TAG, "Existing record is " + (existing == null ? "<null>" : (existing.guid + ", " + existing)));
       if (existing != null &&
           existing.lastModified > record.lastModified) {
         Logger.debug(LOG_TAG, "Local record is newer. Not storing.");
-        delegate.deferredStoreDelegate(delegateExecutor).onRecordStoreSucceeded(record.guid);
+        storeDelegate.deferredStoreDelegate(delegateExecutor).onRecordStoreSucceeded(record.guid);
         return;
       }
       if (existing != null) {
         Logger.debug(LOG_TAG, "Replacing local record.");
       }
 
       // Store a copy of the record with an updated modified time.
       Record toStore = record.copyWithIDs(record.guid, record.androidID);
       if (bumpTimestamps) {
         toStore.lastModified = now;
       }
       wbos.put(record.guid, toStore);
 
       trackRecord(toStore);
-      delegate.deferredStoreDelegate(delegateExecutor).onRecordStoreSucceeded(record.guid);
+      storeDelegate.deferredStoreDelegate(delegateExecutor).onRecordStoreSucceeded(record.guid);
     }
 
     @Override
     public void wipe(final RepositorySessionWipeDelegate delegate) {
       if (!isActive()) {
-        delegate.onWipeFailed(new InactiveSessionException(null));
+        delegate.onWipeFailed(new InactiveSessionException());
         return;
       }
 
       Logger.info(LOG_TAG, "Wiping WBORepositorySession.");
       this.wbos = new ConcurrentHashMap<String, Record>();
 
       // Wipe immediately for the convenience of test code.
       wboRepository.wbos = new ConcurrentHashMap<String, Record>();
@@ -188,17 +188,17 @@ public class WBORepository extends Repos
     @Override
     public void storeDone(long end) {
       // TODO: this is not guaranteed to be called after all of the record
       // store callbacks have completed!
       if (stats.storeBegan < 0) {
         stats.storeBegan = end;
       }
       stats.storeCompleted = end;
-      delegate.deferredStoreDelegate(delegateExecutor).onStoreCompleted(end);
+      storeDelegate.deferredStoreDelegate(delegateExecutor).onStoreCompleted(end);
     }
   }
 
   public ConcurrentHashMap<String, Record> wbos;
 
   public WBORepository(boolean bumpTimestamps) {
     super();
     this.bumpTimestamps = bumpTimestamps;
--- a/mobile/android/tests/background/junit4/src/org/mozilla/gecko/db/BrowserProviderHistoryTest.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/db/BrowserProviderHistoryTest.java
@@ -3,16 +3,17 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.db;
 
 import android.content.ContentProviderClient;
 import android.content.ContentValues;
 import android.database.Cursor;
 import android.net.Uri;
+import android.os.Bundle;
 import android.os.RemoteException;
 
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mozilla.gecko.background.testhelpers.TestRunner;
 import org.robolectric.shadows.ShadowContentResolver;
@@ -254,16 +255,108 @@ public class BrowserProviderHistoryTest 
             assertTrue(true);
 
             // NB: same values as above, to ensure throwing update didn't actually change anything.
             assertHistoryAggregates(BrowserContract.History.URL + " = ?", new String[] {url},
                     2, 19, lastVisited3, 8, lastVisited3);
         }
     }
 
+    @Test
+    public void testBulkHistoryInsert() throws Exception {
+        // Test basic error conditions.
+        String historyTestUriArg = historyTestUri.toString();
+        Bundle result = historyClient.call(BrowserContract.METHOD_INSERT_HISTORY_WITH_VISITS_FROM_SYNC, historyTestUriArg, new Bundle());
+        assertNotNull(result);
+        assertNotNull(result.getSerializable(BrowserContract.METHOD_RESULT));
+
+        final Bundle data = new Bundle();
+
+        Bundle[] recordBundles = new Bundle[0];
+        data.putSerializable(BrowserContract.METHOD_PARAM_DATA, recordBundles);
+        result = historyClient.call(BrowserContract.METHOD_INSERT_HISTORY_WITH_VISITS_FROM_SYNC, historyTestUriArg, data);
+        assertNotNull(result);
+        assertNull(result.getSerializable(BrowserContract.METHOD_RESULT));
+        assertRowCount(historyClient, historyTestUri, 0);
+
+        // Test insert three history records with 10 visits each.
+        recordBundles = new Bundle[3];
+        for (int i = 0; i < 3; i++) {
+            final Bundle bundle = new Bundle();
+            bundle.putParcelable(BrowserContract.METHOD_PARAM_OBJECT, buildHistoryCV("guid" + i, "Test", "https://www.mozilla.org/" + i, 10L, 10L, 10));
+            bundle.putSerializable(BrowserContract.History.VISITS, buildHistoryVisitsCVs(10, "guid" + i, 1L, 3, false));
+            recordBundles[i] = bundle;
+        }
+        data.putSerializable(BrowserContract.METHOD_PARAM_DATA, recordBundles);
+
+        result = historyClient.call(BrowserContract.METHOD_INSERT_HISTORY_WITH_VISITS_FROM_SYNC, historyTestUriArg, data);
+        assertNotNull(result);
+        assertNull(result.getSerializable(BrowserContract.METHOD_RESULT));
+        assertRowCount(historyClient, historyTestUri, 3);
+        assertRowCount(visitsClient, visitsTestUri, 30);
+
+        // Test insert mixed data.
+        recordBundles = new Bundle[3];
+        final Bundle bundle = new Bundle();
+        bundle.putParcelable(BrowserContract.METHOD_PARAM_OBJECT, buildHistoryCV("guid4", null, "https://www.mozilla.org/1", null, null, null));
+        bundle.putSerializable(BrowserContract.History.VISITS, new ContentValues[0]);
+        recordBundles[0] = bundle;
+        final Bundle bundle2 = new Bundle();
+        bundle2.putParcelable(BrowserContract.METHOD_PARAM_OBJECT, buildHistoryCV("guid5", "Test", "https://www.mozilla.org/2", null, null, null));
+        bundle2.putSerializable(BrowserContract.History.VISITS, new ContentValues[0]);
+        recordBundles[1] = bundle2;
+        final Bundle bundle3 = new Bundle();
+        bundle3.putParcelable(BrowserContract.METHOD_PARAM_OBJECT, buildHistoryCV("guid6", "Test", "https://www.mozilla.org/3", 5L, 5L, 5));
+        bundle3.putSerializable(BrowserContract.History.VISITS, buildHistoryVisitsCVs(5, "guid6", 1L, 2, false));
+        recordBundles[2] = bundle3;
+        data.putSerializable(BrowserContract.METHOD_PARAM_DATA, recordBundles);
+
+        result = historyClient.call(BrowserContract.METHOD_INSERT_HISTORY_WITH_VISITS_FROM_SYNC, historyTestUriArg, data);
+        assertNotNull(result);
+        assertNull(result.getSerializable(BrowserContract.METHOD_RESULT));
+        assertRowCount(historyClient, historyTestUri, 6);
+        assertRowCount(visitsClient, visitsTestUri, 35);
+
+        assertHistoryAggregates(BrowserContract.History.URL + " = ?", new String[] {"https://www.mozilla.org/3"},
+                5, 0, 0, 5, 5);
+    }
+
+    private ContentValues[] buildHistoryVisitsCVs(int numberOfVisits, String guid, long baseDate, int visitType, boolean isLocal) {
+        final ContentValues[] visits = new ContentValues[numberOfVisits];
+        for (int i = 0; i < numberOfVisits; i++) {
+            final ContentValues visit = new ContentValues();
+            visit.put(BrowserContract.Visits.HISTORY_GUID, guid);
+            visit.put(BrowserContract.Visits.DATE_VISITED, baseDate + i);
+            visit.put(BrowserContract.Visits.VISIT_TYPE, visitType);
+            visit.put(BrowserContract.Visits.IS_LOCAL, isLocal ? BrowserContract.Visits.VISIT_IS_LOCAL : BrowserContract.Visits.VISIT_IS_REMOTE);
+            visits[i] = visit;
+        }
+        return visits;
+    }
+
+    private ContentValues buildHistoryCV(String guid, String title, String url, Long lastVisited, Long remoteLastVisited, Integer visits) {
+        ContentValues cv = new ContentValues();
+        cv.put(BrowserContract.History.GUID, guid);
+        if (title != null) {
+            cv.put(BrowserContract.History.TITLE, title);
+        }
+        cv.put(BrowserContract.History.URL, url);
+        if (lastVisited != null) {
+            cv.put(BrowserContract.History.DATE_LAST_VISITED, lastVisited);
+        }
+        if (remoteLastVisited != null) {
+            cv.put(BrowserContract.History.REMOTE_DATE_LAST_VISITED, remoteLastVisited);
+        }
+        if (visits != null) {
+            cv.put(BrowserContract.History.VISITS, visits);
+            cv.put(BrowserContract.History.REMOTE_VISITS, visits);
+        }
+        return cv;
+    }
+
     private void assertHistoryAggregates(String selection, String[] selectionArg, int visits, int localVisits, long localLastVisited, int remoteVisits, long remoteLastVisited) throws Exception {
         final Cursor c = historyClient.query(historyTestUri, new String[] {
                 BrowserContract.History.VISITS,
                 BrowserContract.History.LOCAL_VISITS,
                 BrowserContract.History.REMOTE_VISITS,
                 BrowserContract.History.LOCAL_DATE_LAST_VISITED,
                 BrowserContract.History.REMOTE_DATE_LAST_VISITED
         }, selection, selectionArg, null);
--- a/mobile/android/tests/background/junit4/src/org/mozilla/gecko/db/BrowserProviderHistoryVisitsTestBase.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/db/BrowserProviderHistoryVisitsTestBase.java
@@ -17,18 +17,17 @@ import org.robolectric.shadows.ShadowCon
 import java.util.UUID;
 
 public class BrowserProviderHistoryVisitsTestBase {
     /* package-private */ ShadowContentResolver contentResolver;
     /* package-private */ ContentProviderClient historyClient;
     /* package-private */ ContentProviderClient visitsClient;
     /* package-private */ Uri historyTestUri;
     /* package-private */ Uri visitsTestUri;
-
-    private BrowserProvider provider;
+    /* package-private */ BrowserProvider provider;
 
     @Before
     public void setUp() throws Exception {
         provider = new BrowserProvider();
         provider.onCreate();
         ShadowContentResolver.registerProvider(BrowserContract.AUTHORITY, new DelegatingTestContentProvider(provider));
 
         contentResolver = new ShadowContentResolver();
@@ -46,32 +45,39 @@ public class BrowserProviderHistoryVisit
         provider.shutdown();
     }
 
     /* package-private */  Uri testUri(Uri baseUri) {
         return baseUri.buildUpon().appendQueryParameter(BrowserContract.PARAM_IS_TEST, "1").build();
     }
 
     /* package-private */  Uri insertHistoryItem(String url, String guid) throws RemoteException {
-        return insertHistoryItem(url, guid, System.currentTimeMillis(), null, null);
+        return insertHistoryItem(url, guid, System.currentTimeMillis(), null, null, null);
     }
 
     /* package-private */  Uri insertHistoryItem(String url, String guid, Long lastVisited, Integer visitCount) throws RemoteException {
-        return insertHistoryItem(url, guid, lastVisited, visitCount, null);
+        return insertHistoryItem(url, guid, lastVisited, visitCount, null, null);
     }
 
     /* package-private */  Uri insertHistoryItem(String url, String guid, Long lastVisited, Integer visitCount, String title) throws RemoteException {
+        return insertHistoryItem(url, guid, lastVisited, visitCount, null, title);
+    }
+
+    /* package-private */  Uri insertHistoryItem(String url, String guid, Long lastVisited, Integer visitCount, Integer remoteVisits, String title) throws RemoteException {
         ContentValues historyItem = new ContentValues();
         historyItem.put(BrowserContract.History.URL, url);
         if (guid != null) {
             historyItem.put(BrowserContract.History.GUID, guid);
         }
         if (visitCount != null) {
             historyItem.put(BrowserContract.History.VISITS, visitCount);
         }
+        if (remoteVisits != null) {
+            historyItem.put(BrowserContract.History.REMOTE_VISITS, remoteVisits);
+        }
         historyItem.put(BrowserContract.History.DATE_LAST_VISITED, lastVisited);
         if (title != null) {
             historyItem.put(BrowserContract.History.TITLE, title);
         }
 
         return historyClient.insert(historyTestUri, historyItem);
     }
 }
new file mode 100644
--- /dev/null
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/sync/middleware/BufferingMiddlewareRepositorySessionTest.java
@@ -0,0 +1,178 @@
+/* Any copyright is dedicated to the Public Domain.
+   http://creativecommons.org/publicdomain/zero/1.0/ */
+
+package org.mozilla.gecko.sync.middleware;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mozilla.gecko.background.testhelpers.MockRecord;
+import org.mozilla.gecko.sync.middleware.storage.BufferStorage;
+import org.mozilla.gecko.sync.middleware.storage.MemoryBufferStorage;
+import org.mozilla.gecko.sync.repositories.Repository;
+import org.mozilla.gecko.sync.repositories.RepositorySession;
+import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
+import org.mozilla.gecko.sync.repositories.domain.Record;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+public class BufferingMiddlewareRepositorySessionTest {
+    private RepositorySession innerRepositorySession;
+    private BufferingMiddlewareRepositorySession bufferingSession;
+    private BufferingMiddlewareRepositorySession bufferingSessionMocked;
+    private BufferStorage bufferStorage;
+    private BufferStorage bufferStorageMocked;
+
+    @Before
+    public void setUp() throws Exception {
+        BufferingMiddlewareRepository bufferingRepository;
+        Repository innerRepositoy;
+
+        innerRepositoy = mock(Repository.class);
+        innerRepositorySession = mock(RepositorySession.class);
+        bufferingRepository = new BufferingMiddlewareRepository(
+                0L,
+                new MemoryBufferStorage(),
+                innerRepositoy
+        );
+
+        bufferStorage = new MemoryBufferStorage();
+        bufferStorageMocked = mock(MemoryBufferStorage.class);
+
+        bufferingSession = new BufferingMiddlewareRepositorySession(
+                innerRepositorySession, bufferingRepository, 0L,
+                bufferStorage);
+
+        bufferingSessionMocked = new BufferingMiddlewareRepositorySession(
+                innerRep