Merge m-c to inbound. a=merge
authorRyan VanderMeulen <ryanvm@gmail.com>
Wed, 31 Aug 2016 09:54:24 -0400
changeset 353363 1a4c20fec8312d7bc6972973e01456fd8cfdb887
parent 353362 9e20e6e53428e544810f60b8222b60d3c2f52459 (current diff)
parent 353301 b38f935eb81130e8a1bb3b559efd8e30af3895a3 (diff)
child 353364 43cb9ff43f567ee8f132d42ee3d57a64110eb38a
push id6570
push userraliiev@mozilla.com
push dateMon, 14 Nov 2016 12:26:13 +0000
treeherdermozilla-beta@f455459b2ae5 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone51.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge m-c to inbound. a=merge
mobile/android/base/resources/color-large-v11/tab_new_tab_strip_colors.xml
mobile/android/base/resources/color-large-v11/tab_strip_item_bg.xml
mobile/android/base/resources/color-large-v11/tab_strip_item_title.xml
mobile/android/base/resources/color-large-v11/tabs_counter_text_color.xml
mobile/android/base/resources/drawable-large-v11/tab_strip_button.xml
mobile/android/base/resources/drawable-large-v11/tab_strip_divider.xml
mobile/android/base/resources/layout-large-v11/tab_strip.xml
mobile/android/base/resources/layout-large-v11/tab_strip_inner.xml
mobile/android/base/resources/layout-large-v11/tab_strip_item.xml
mobile/android/base/resources/layout-large-v11/tab_strip_item_view.xml
mobile/android/base/resources/values/layout.xml
taskcluster/ci/legacy/tasks/tests/eslint-gecko.yml
tools/lint/eslint-formatter.js
tools/lint/eslint/eslint-plugin-mozilla/docs/balanced-listeners.rst
tools/lint/eslint/eslint-plugin-mozilla/docs/import-browserjs-globals.rst
tools/lint/eslint/eslint-plugin-mozilla/docs/import-globals.rst
tools/lint/eslint/eslint-plugin-mozilla/docs/import-headjs-globals.rst
tools/lint/eslint/eslint-plugin-mozilla/docs/index.rst
tools/lint/eslint/eslint-plugin-mozilla/docs/mark-test-function-used.rst
tools/lint/eslint/eslint-plugin-mozilla/docs/no-aArgs.rst
tools/lint/eslint/eslint-plugin-mozilla/docs/no-cpows-in-tests.rst
tools/lint/eslint/eslint-plugin-mozilla/docs/no-single-arg-cu-import.rst
tools/lint/eslint/eslint-plugin-mozilla/docs/reject-importGlobalProperties.rst
tools/lint/eslint/eslint-plugin-mozilla/docs/reject-some-requires.rst
tools/lint/eslint/eslint-plugin-mozilla/docs/var-only-at-top-level.rst
tools/lint/eslint/eslint-plugin-mozilla/moz.build
--- a/browser/base/content/tabbrowser.xml
+++ b/browser/base/content/tabbrowser.xml
@@ -1607,23 +1607,30 @@
             listener.destroy();
 
             let oldUserTypedValue = aBrowser.userTypedValue;
             let hadStartedLoad = aBrowser.didStartLoadSinceLastUserTyping();
 
             // Make sure the browser is destroyed so it unregisters from observer notifications
             aBrowser.destroy();
 
-            // Make sure to restore the original droppedLinkHandler.
+            // Make sure to restore the original droppedLinkHandler and
+            // relatedBrowser.
             let droppedLinkHandler = aBrowser.droppedLinkHandler;
+            let relatedBrowser = aBrowser.relatedBrowser;
 
             // Change the "remote" attribute.
             let parent = aBrowser.parentNode;
             parent.removeChild(aBrowser);
             aBrowser.setAttribute("remote", aShouldBeRemote ? "true" : "false");
+
+            // NB: This works with the hack in the browser constructor that
+            // turns this normal property into a field.
+            aBrowser.relatedBrowser = relatedBrowser;
+
             parent.appendChild(aBrowser);
 
             aBrowser.userTypedValue = oldUserTypedValue;
             if (hadStartedLoad) {
               aBrowser.urlbarChangeTracker.startedLoad();
             }
 
             aBrowser.droppedLinkHandler = droppedLinkHandler;
--- a/build/moz.configure/toolchain.configure
+++ b/build/moz.configure/toolchain.configure
@@ -773,17 +773,17 @@ def check_have_64_bit(have_64_bit, compi
     if have_64_bit != compiler_have_64_bit:
         configure_error('The target compiler does not agree with configure '
                         'about the target bitness.')
 
 
 @depends(c_compiler)
 def default_debug_flags(compiler_info):
     # Debug info is ON by default.
-    if compiler_info.type == 'msvc':
+    if compiler_info.type in ('msvc', 'clang-cl'):
         return '-Zi'
     return '-g'
 
 option(env='MOZ_DEBUG_FLAGS',
        nargs=1,
        help='Debug compiler flags')
 
 imply_option('--enable-debug-symbols',
--- a/devtools/client/shared/test/unit/test_parseDeclarations.js
+++ b/devtools/client/shared/test/unit/test_parseDeclarations.js
@@ -348,16 +348,23 @@ const TEST_DATA = [
                 offsets: [4, 18], commentOffsets: [0, 21]}]
   },
 
   // Regression test for bug 1287620.
   {
     input: "color: blue \\9 no\\_need",
     expected: [{name: "color", value: "blue \\9 no_need", priority: "", offsets: [0, 23]}]
   },
+
+  // Regression test for bug 1297890 - don't paste tokens.
+  {
+    parseComments: true,
+    input: "stroke-dasharray: 1/*ThisIsAComment*/2;",
+    expected: [{name: "stroke-dasharray", value: "1 2", priority: "", offsets: [0, 39]}]
+  },
 ];
 
 function run_test() {
   run_basic_tests();
   run_comment_tests();
 }
 
 // Test parseDeclarations.
--- a/devtools/client/webconsole/new-console-output/components/message-container.js
+++ b/devtools/client/webconsole/new-console-output/components/message-container.js
@@ -73,17 +73,17 @@ function getMessageComponent(message) {
           return componentMap.get("EvaluationResult");
         // @TODO this is probably not the right behavior, but works for now.
         // Chrome doesn't distinguish between page errors and log messages. We
         // may want to remove the PageError component and just handle errors
         // with ConsoleApiCall.
         case MESSAGE_TYPE.LOG:
           return componentMap.get("PageError");
         default:
-          componentMap.get("DefaultRenderer");
+          return componentMap.get("DefaultRenderer");
       }
   }
 
   return componentMap.get("DefaultRenderer");
 }
 
 module.exports.MessageContainer = MessageContainer;
 
--- a/devtools/client/webconsole/new-console-output/components/message-types/console-api-call.js
+++ b/devtools/client/webconsole/new-console-output/components/message-types/console-api-call.js
@@ -40,16 +40,25 @@ function ConsoleApiCall(props) {
   } else if (message.parameters) {
     messageBody = message.parameters.map((grip, key) => GripMessageBody({grip, key}));
   } else {
     messageBody = message.messageText;
   }
 
   const icon = MessageIcon({level});
   const repeat = MessageRepeat({repeat: message.repeat});
+  const shouldRenderFrame = frame && frame.source !== "debugger eval code";
+  const location = dom.span({ className: "message-location devtools-monospace" },
+    shouldRenderFrame ? FrameView({
+      frame,
+      onClick: onViewSourceInDebugger,
+      showEmptyPathAsHost: true,
+      sourceMapService
+    }) : null
+  );
 
   let collapse = "";
   let attachment = "";
   if (stacktrace) {
     attachment = dom.div({className: "stacktrace devtools-monospace"},
       StackTrace({
         stacktrace: stacktrace,
         onViewSourceInDebugger: onViewSourceInDebugger
@@ -78,39 +87,29 @@ function ConsoleApiCall(props) {
   if (level) {
     classes.push(level);
   }
 
   if (open === true) {
     classes.push("open");
   }
 
-  const shouldRenderFrame = frame && frame.source !== "debugger eval code";
   return dom.div({
     className: classes.join(" ")
   },
     // @TODO add timestamp
     // @TODO add indent if necessary
     icon,
     collapse,
     dom.span({className: "message-body-wrapper"},
-      dom.span({},
-        dom.span({className: "message-flex-body"},
-          dom.span({className: "message-body devtools-monospace"},
-            messageBody
-          ),
-          repeat,
-          dom.span({ className: "message-location devtools-monospace" },
-            shouldRenderFrame ? FrameView({
-              frame,
-              onClick: onViewSourceInDebugger,
-              showEmptyPathAsHost: true,
-              sourceMapService
-            }) : null
-          )
+      dom.span({className: "message-flex-body"},
+        dom.span({className: "message-body devtools-monospace"},
+          messageBody
         ),
-        attachment
-      )
+        repeat,
+        location
+      ),
+      attachment
     )
   );
 }
 
 module.exports.ConsoleApiCall = ConsoleApiCall;
--- a/devtools/client/webconsole/new-console-output/components/message-types/console-command.js
+++ b/devtools/client/webconsole/new-console-output/components/message-types/console-command.js
@@ -42,15 +42,19 @@ function ConsoleCommand(props) {
 
   return dom.div({
     className: classes.join(" "),
     ariaLive: "off",
   },
     // @TODO add timestamp
     // @TODO add indent if necessary
     icon,
-    dom.span({className: "message-body-wrapper message-body devtools-monospace"},
-      dom.span({}, message.messageText)
+    dom.span({ className: "message-body-wrapper" },
+      dom.span({ className: "message-flex-body" },
+        dom.span({ className: "message-body devtools-monospace" },
+          message.messageText
+        )
+      )
     )
   );
 }
 
 module.exports.ConsoleCommand = ConsoleCommand;
--- a/devtools/client/webconsole/new-console-output/components/message-types/evaluation-result.js
+++ b/devtools/client/webconsole/new-console-output/components/message-types/evaluation-result.js
@@ -37,18 +37,19 @@ function EvaluationResult(props) {
   }
 
   return dom.div({
     className: classes.join(" ")
   },
     // @TODO add timestamp
     // @TODO add indent if needed with console.group
     icon,
-    dom.span(
-      {className: "message-body-wrapper message-body devtools-monospace"},
-      dom.span({},
-        GripMessageBody({grip: message.parameters})
+    dom.span({ className: "message-body-wrapper" },
+      dom.span({ className: "message-flex-body" },
+        dom.span({ className: "message-body devtools-monospace" },
+          GripMessageBody({grip: message.parameters})
+        )
       )
     )
   );
 }
 
 module.exports.EvaluationResult = EvaluationResult;
--- a/devtools/client/webconsole/new-console-output/components/message-types/page-error.js
+++ b/devtools/client/webconsole/new-console-output/components/message-types/page-error.js
@@ -7,49 +7,60 @@
 "use strict";
 
 // React & Redux
 const {
   createFactory,
   DOM: dom,
   PropTypes
 } = require("devtools/client/shared/vendor/react");
+const FrameView = createFactory(require("devtools/client/shared/components/frame"));
 const MessageRepeat = createFactory(require("devtools/client/webconsole/new-console-output/components/message-repeat").MessageRepeat);
 const MessageIcon = createFactory(require("devtools/client/webconsole/new-console-output/components/message-icon").MessageIcon);
 
 PageError.displayName = "PageError";
 
 PageError.propTypes = {
   message: PropTypes.object.isRequired,
 };
 
 function PageError(props) {
-  const { message } = props;
-  const {source, level} = message;
+  const { message, sourceMapService, onViewSourceInDebugger } = props;
+  const { source, level, frame } = message;
 
   const repeat = MessageRepeat({repeat: message.repeat});
   const icon = MessageIcon({level});
+  const shouldRenderFrame = frame && frame.source !== "debugger eval code";
+  const location = dom.span({ className: "message-location devtools-monospace" },
+    shouldRenderFrame ? FrameView({
+      frame,
+      onClick: onViewSourceInDebugger,
+      showEmptyPathAsHost: true,
+      sourceMapService
+    }) : null
+  );
 
   const classes = ["message"];
 
   if (source) {
     classes.push(source);
   }
 
   if (level) {
     classes.push(level);
   }
 
   return dom.div({
     className: classes.join(" ")
   },
     icon,
-    dom.span(
-      {className: "message-body-wrapper message-body devtools-monospace"},
-      dom.span({},
-        message.messageText
+    dom.span({ className: "message-body-wrapper" },
+      dom.span({ className: "message-flex-body" },
+        dom.span({ className: "message-body devtools-monospace" },
+          message.messageText
+        ),
+        repeat
       )
-    ),
-    repeat
+    )
   );
 }
 
 module.exports.PageError = PageError;
--- a/devtools/client/webconsole/new-console-output/test/components/evaluation-result.test.js
+++ b/devtools/client/webconsole/new-console-output/test/components/evaluation-result.test.js
@@ -20,11 +20,11 @@ describe("EvaluationResult component:", 
     const rendered = renderComponent(EvaluationResult, props);
 
     const messageBody = getMessageBody(rendered);
     expect(messageBody.textContent).toBe("Date1970-01-01T00:00:00.000Z");
   });
 });
 
 function getMessageBody(rendered) {
-  const queryPath = "div.message.cm-s-mozilla span.message-body-wrapper.message-body.devtools-monospace";
+  const queryPath = "div.message span.message-body-wrapper span.message-body";
   return rendered.querySelector(queryPath);
 }
--- a/devtools/client/webconsole/new-console-output/test/components/page-error.test.js
+++ b/devtools/client/webconsole/new-console-output/test/components/page-error.test.js
@@ -18,11 +18,11 @@ describe("PageError component:", () => {
     const rendered = renderComponent(PageError, {message});
 
     const messageBody = getMessageBody(rendered);
     expect(messageBody.textContent).toBe("ReferenceError: asdf is not defined");
   });
 });
 
 function getMessageBody(rendered) {
-  const queryPath = "div.message span.message-body-wrapper.message-body.devtools-monospace";
+  const queryPath = "div.message span.message-body-wrapper span.message-body";
   return rendered.querySelector(queryPath);
 }
--- a/devtools/client/webconsole/new-console-output/utils/messages.js
+++ b/devtools/client/webconsole/new-console-output/utils/messages.js
@@ -111,21 +111,28 @@ function transformPacket(packet) {
       let { pageError } = packet;
       let level = MESSAGE_LEVEL.ERROR;
       if (pageError.warning || pageError.strict) {
         level = MESSAGE_LEVEL.WARN;
       } else if (pageError.info) {
         level = MESSAGE_LEVEL.INFO;
       }
 
+      const frame = {
+        source: pageError.sourceName,
+        line: pageError.lineNumber,
+        column: pageError.columnNumber
+      };
+
       return new ConsoleMessage({
         source: MESSAGE_SOURCE.JAVASCRIPT,
         type: MESSAGE_TYPE.LOG,
         level,
         messageText: pageError.errorMessage,
+        frame,
       });
     }
 
     case "evaluationResult":
     default: {
       let { result } = packet;
 
       return new ConsoleMessage({
--- a/devtools/shared/css-parsing-utils.js
+++ b/devtools/shared/css-parsing-utils.js
@@ -362,16 +362,18 @@ function parseDeclarationsInternal(isCss
                                                 token.endOffset - 2);
         let newDecls = parseCommentDeclarations(isCssPropertyKnown, commentText,
                                                 token.startOffset,
                                                 token.endOffset);
 
         // Insert the new declarations just before the final element.
         let lastDecl = declarations.pop();
         declarations = [...declarations, ...newDecls, lastDecl];
+      } else {
+        current += " ";
       }
     } else {
       current += inputString.substring(token.startOffset, token.endOffset);
     }
   }
 
   // Handle whatever trailing properties or values might still be there
   if (current) {
--- a/docshell/test/test_bug590573.html
+++ b/docshell/test/test_bug590573.html
@@ -118,109 +118,115 @@ function dumpSHistory(theWindow)
     }
   }
 
   return sh;
 }
 
 var popup = window.open('file_bug590573_1.html');
 
+var gTestContinuation = null;
 var loads = 0;
 function pageLoad()
 {
   loads++;
   dump('pageLoad(loads=' + loads + ', page location=' + popup.location + ')\n');
 
   dumpSHistory(window);
 
-  if (loads == 1) {
-    is(popup.scrollY, 0, "test 1");
-    popup.scroll(0, 100);
-
-    popup.history.pushState('', '', '?pushed');
-    is(popup.scrollY, 100, "test 2");
-    popup.scroll(0, 200); // set state-2's position to 200
-
-    popup.history.back();
-    is(popup.scrollY, 100, "test 3");
-    popup.scroll(0, 150); // set original page's position to 150
-
-    popup.history.forward();
-    is(popup.scrollY, 200, "test 4");
-
-    popup.history.back();
-    is(popup.scrollY, 150, "test 5");
-
-    popup.history.forward();
-    is(popup.scrollY, 200, "test 6");
-
-    // At this point, the history looks like:
-    //   PATH                         POSITION
-    //   file_bug590573_1.html        150       <-- oldest
-    //   file_bug590573_1.html?pushed 200       <-- newest, current
-
-    // Now test that the scroll position is persisted when we have real
-    // navigations involved.  First, we need to spin the event loop so that the
-    // navigation doesn't replace our current history entry.
-
-    setTimeout(pageLoad, 0);
-  }
-  else if (loads == 2) {
-    page2LoadCallbackEnabled = true;
-    popup.location = 'file_bug590573_2.html';
+  if (!gTestContinuation) {
+    gTestContinuation = testBody();
   }
-  else if (loads == 3) {
-    ok(popup.location.href.match('file_bug590573_2.html$'),
-       "Location was " + popup.location +
-       " but should end with file_bug590573_2.html");
-
-    is(popup.scrollY, 0, "test 7");
-    popup.scroll(0, 300);
-
-    // We need to spin the event loop again before we go back, otherwise the
-    // scroll positions don't get updated properly.
-    setTimeout(pageLoad, 0);
-  }
-  else if (loads == 4) {
-    page1PageShowCallbackEnabled = true;
-    popup.history.back();
-  }
-  else if (loads == 5) {
-    // Spin the event loop again so that we get the right scroll positions.
-    setTimeout(pageLoad, 0);
-  }
-  else if (loads == 6) {
-    is(popup.location.search, "?pushed");
-    ok(popup.document.getElementById('div1'), 'page should have div1.');
-
-    is(popup.scrollY, 200, "test 8");
-
-    popup.history.back();
-    is(popup.scrollY, 150, "test 9");
-    popup.history.forward();
-
-    is(popup.scrollY, 200, "test 10");
-
-    // Spin one last time...
-    setTimeout(pageLoad, 0);
-  }
-  else if (loads == 7) {
-    page2PageShowCallbackEnabled = true;
-    popup.history.forward();
-  }
-  else if (loads == 8) {
-    // Bug 821821, on Android tegras we get 299 instead of 300 sometimes
-    if (popup.scrollY >= 299 && popup.scrollY <= 300) {
-      is(1, 1, "test 11");
-    } else {
-      is(1, 0, "test 11, got " + popup.scrollY + " for popup.scrollY instead of 299|300");
-    }
-    popup.close();
+  var ret = gTestContinuation.next();
+  if (ret.done) {
     SimpleTest.finish();
   }
-  else {
-    ok(false, "Got extra load!");
+}
+
+function* testBody()
+{
+  is(popup.scrollY, 0, "test 1");
+  popup.scroll(0, 100);
+
+  popup.history.pushState('', '', '?pushed');
+  is(popup.scrollY, 100, "test 2");
+  popup.scroll(0, 200); // set state-2's position to 200
+
+  popup.history.back();
+  is(popup.scrollY, 100, "test 3");
+  popup.scroll(0, 150); // set original page's position to 150
+
+  popup.history.forward();
+  is(popup.scrollY, 200, "test 4");
+
+  popup.history.back();
+  is(popup.scrollY, 150, "test 5");
+
+  popup.history.forward();
+  is(popup.scrollY, 200, "test 6");
+
+  // At this point, the history looks like:
+  //   PATH                         POSITION
+  //   file_bug590573_1.html        150       <-- oldest
+  //   file_bug590573_1.html?pushed 200       <-- newest, current
+
+  // Now test that the scroll position is persisted when we have real
+  // navigations involved.  First, we need to spin the event loop so that the
+  // navigation doesn't replace our current history entry.
+
+  setTimeout(pageLoad, 0);
+  yield;
+
+  page2LoadCallbackEnabled = true;
+  popup.location = 'file_bug590573_2.html';
+  yield;
+
+  ok(popup.location.href.match('file_bug590573_2.html$'),
+     "Location was " + popup.location +
+     " but should end with file_bug590573_2.html");
+
+  is(popup.scrollY, 0, "test 7");
+  popup.scroll(0, 300);
+
+  // We need to spin the event loop again before we go back, otherwise the
+  // scroll positions don't get updated properly.
+  setTimeout(pageLoad, 0);
+  yield;
+
+  page1PageShowCallbackEnabled = true;
+  popup.history.back();
+  yield;
+
+  // Spin the event loop again so that we get the right scroll positions.
+  setTimeout(pageLoad, 0);
+  yield;
+
+  is(popup.location.search, "?pushed");
+  ok(popup.document.getElementById('div1'), 'page should have div1.');
+
+  is(popup.scrollY, 200, "test 8");
+
+  popup.history.back();
+  is(popup.scrollY, 150, "test 9");
+  popup.history.forward();
+
+  is(popup.scrollY, 200, "test 10");
+
+  // Spin one last time...
+  setTimeout(pageLoad, 0);
+  yield;
+
+  page2PageShowCallbackEnabled = true;
+  popup.history.forward();
+  yield;
+
+  // Bug 821821, on Android tegras we get 299 instead of 300 sometimes
+  if (popup.scrollY >= 299 && popup.scrollY <= 300) {
+    is(1, 1, "test 11");
+  } else {
+    is(1, 0, "test 11, got " + popup.scrollY + " for popup.scrollY instead of 299|300");
   }
+  popup.close();
 }
 </script>
 
 </body>
 </html>
--- a/dom/events/EventStateManager.cpp
+++ b/dom/events/EventStateManager.cpp
@@ -521,18 +521,27 @@ EventStateManager::PreHandleEvent(nsPres
   if (!aEvent) {
     NS_ERROR("aEvent is null.  This should never happen.");
     return NS_ERROR_NULL_POINTER;
   }
 
   NS_WARN_IF_FALSE(!aTargetFrame ||
                    !aTargetFrame->GetContent() ||
                    aTargetFrame->GetContent() == aTargetContent ||
-                   aTargetFrame->GetContent()->GetFlattenedTreeParent() == aTargetContent,
+                   aTargetFrame->GetContent()->GetFlattenedTreeParent() == aTargetContent ||
+                   aTargetFrame->IsGeneratedContentFrame(),
                    "aTargetFrame should be related with aTargetContent");
+#if DEBUG
+  if (aTargetFrame && aTargetFrame->IsGeneratedContentFrame()) {
+    nsCOMPtr<nsIContent> targetContent;
+    aTargetFrame->GetContentForEvent(aEvent, getter_AddRefs(targetContent));
+    MOZ_ASSERT(aTargetContent == targetContent,
+               "Unexpected target for generated content frame!");
+  }
+#endif
 
   mCurrentTarget = aTargetFrame;
   mCurrentTargetContent = nullptr;
 
   // Do not take account eMouseEnterIntoWidget/ExitFromWidget so that loading
   // a page when user is not active doesn't change the state to active.
   WidgetMouseEvent* mouseEvent = aEvent->AsMouseEvent();
   if (aEvent->IsTrusted() &&
--- a/dom/ipc/ContentParent.cpp
+++ b/dom/ipc/ContentParent.cpp
@@ -1700,26 +1700,27 @@ ContentParent::AllocateLayerTreeId(TabPa
 bool
 ContentParent::AllocateLayerTreeId(ContentParent* aContent,
                                    TabParent* aTopLevel, const TabId& aTabId,
                                    uint64_t* aId)
 {
   GPUProcessManager* gpu = GPUProcessManager::Get();
 
   *aId = gpu->AllocateLayerTreeId();
+
+  if (!aContent || !aTopLevel) {
+    return false;
+  }
+
   gpu->MapLayerTreeId(*aId, aContent->OtherPid());
 
   if (!gfxPlatform::AsyncPanZoomEnabled()) {
     return true;
   }
 
-  if (!aContent || !aTopLevel) {
-    return false;
-  }
-
   return aContent->SendNotifyLayerAllocated(aTabId, *aId);
 }
 
 bool
 ContentParent::RecvAllocateLayerTreeId(const ContentParentId& aCpId,
                                        const TabId& aTabId, uint64_t* aId)
 {
   // Protect against spoofing by a compromised child. aCpId must either
--- a/dom/media/android/AndroidMediaPluginHost.cpp
+++ b/dom/media/android/AndroidMediaPluginHost.cpp
@@ -171,20 +171,16 @@ static const char* GetOmxLibraryName()
 
   if (!IsOmxSupported())
     return nullptr;
 
 #if defined(ANDROID) && !defined(MOZ_WIDGET_GONK)
   if (version >= 17) {
     return "libomxpluginkk.so";
   }
-  else if (version < 14) {
-    // Below Honeycomb not supported
-    return nullptr;
-  }
 
   // Ice Cream Sandwich and Jellybean
   return "libomxplugin.so";
 
 #elif defined(ANDROID) && defined(MOZ_WIDGET_GONK)
   return "libomxplugin.so";
 #else
   return nullptr;
--- a/dom/media/mediasink/VideoSink.cpp
+++ b/dom/media/mediasink/VideoSink.cpp
@@ -177,19 +177,24 @@ VideoSink::Start(int64_t aStartTime, con
     // test_streams_element_capture.html to time out.
     RefPtr<GenericPromise> p = mAudioSink->OnEnded(TrackInfo::kVideoTrack);
     if (p) {
       RefPtr<VideoSink> self = this;
       mVideoSinkEndRequest.Begin(p->Then(mOwnerThread, __func__,
         [self] () {
           self->mVideoSinkEndRequest.Complete();
           self->TryUpdateRenderedVideoFrames();
+          // It is possible the video queue size is 0 and we have no frames to
+          // render. However, we need to call MaybeResolveEndPromise() to ensure
+          // mEndPromiseHolder is resolved.
+          self->MaybeResolveEndPromise();
         }, [self] () {
           self->mVideoSinkEndRequest.Complete();
           self->TryUpdateRenderedVideoFrames();
+          self->MaybeResolveEndPromise();
         }));
     }
 
     ConnectListener();
     // Run the render loop at least once so we can resolve the end promise
     // when video duration is 0.
     UpdateRenderedVideoFrames();
   }
@@ -413,22 +418,17 @@ VideoSink::UpdateRenderedVideoFrames()
 
   // The presentation end time of the last video frame displayed is either
   // the end time of the current frame, or if we dropped all frames in the
   // queue, the end time of the last frame we removed from the queue.
   RefPtr<MediaData> currentFrame = VideoQueue().PeekFront();
   mVideoFrameEndTime = std::max(mVideoFrameEndTime,
     currentFrame ? currentFrame->GetEndTime() : lastDisplayedFrameEndTime);
 
-  // All frames are rendered, Let's resolve the promise.
-  if (VideoQueue().IsFinished() &&
-      VideoQueue().GetSize() <= 1 &&
-      !mVideoSinkEndRequest.Exists()) {
-    mEndPromiseHolder.ResolveIfExists(true, __func__);
-  }
+  MaybeResolveEndPromise();
 
   RenderVideoFrames(mVideoQueueSendToCompositorSize, clockTime, nowTime);
 
   // Get the timestamp of the next frame. Schedule the next update at
   // the start time of the next frame. If we don't have a next frame,
   // we will run render loops again upon incoming frames.
   nsTArray<RefPtr<MediaData>> frames;
   VideoQueue().GetFirstElements(2, &frames);
@@ -444,16 +444,28 @@ VideoSink::UpdateRenderedVideoFrames()
   mUpdateScheduler.Ensure(target, [self] () {
     self->UpdateRenderedVideoFramesByTimer();
   }, [self] () {
     self->UpdateRenderedVideoFramesByTimer();
   });
 }
 
 void
+VideoSink::MaybeResolveEndPromise()
+{
+  AssertOwnerThread();
+  // All frames are rendered, Let's resolve the promise.
+  if (VideoQueue().IsFinished() &&
+      VideoQueue().GetSize() <= 1 &&
+      !mVideoSinkEndRequest.Exists()) {
+    mEndPromiseHolder.ResolveIfExists(true, __func__);
+  }
+}
+
+void
 VideoSink::DumpDebugInfo()
 {
   AssertOwnerThread();
   DUMP_LOG(
     "IsStarted=%d IsPlaying=%d, VideoQueue: finished=%d size=%d, "
     "mVideoFrameEndTime=%lld mHasVideo=%d mVideoSinkEndRequest.Exists()=%d "
     "mEndPromiseHolder.IsEmpty()=%d",
     IsStarted(), IsPlaying(), VideoQueue().IsFinished(), VideoQueue().GetSize(),
--- a/dom/media/mediasink/VideoSink.h
+++ b/dom/media/mediasink/VideoSink.h
@@ -94,16 +94,18 @@ private:
 
   // If we have video, display a video frame if it's time for display has
   // arrived, otherwise sleep until it's time for the next frame. Update the
   // current frame time as appropriate, and trigger ready state update.
   // Called on the shared state machine thread.
   void UpdateRenderedVideoFrames();
   void UpdateRenderedVideoFramesByTimer();
 
+  void MaybeResolveEndPromise();
+
   void AssertOwnerThread() const
   {
     MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
   }
 
   MediaQueue<MediaData>& VideoQueue() const {
     return mVideoQueue;
   }
--- a/dom/media/webaudio/AudioBuffer.cpp
+++ b/dom/media/webaudio/AudioBuffer.cpp
@@ -366,22 +366,28 @@ AudioBuffer::StealJSArrayDataIntoSharedC
 
   // "2. Detach all ArrayBuffers for arrays previously returned by
   // getChannelData on this AudioBuffer."
   // "3. Retain the underlying data buffers from those ArrayBuffers and return
   // references to them to the invoker."
   RefPtr<ThreadSharedFloatArrayBufferList> result =
     new ThreadSharedFloatArrayBufferList(mJSChannels.Length());
   for (uint32_t i = 0; i < mJSChannels.Length(); ++i) {
+    if (mJSChannels[i]) {
+      JS::ExposeObjectToActiveJS(mJSChannels[i]);
+    }
     JS::Rooted<JSObject*> arrayBufferView(aJSContext, mJSChannels[i]);
     bool isSharedMemory;
     JS::Rooted<JSObject*> arrayBuffer(aJSContext,
                                       JS_GetArrayBufferViewBuffer(aJSContext,
                                                                   arrayBufferView,
                                                                   &isSharedMemory));
+    if (arrayBuffer) {
+      JS::ExposeObjectToActiveJS(arrayBuffer);
+    }
     // The channel data arrays should all have originated in
     // RestoreJSChannelData, where they are created unshared.
     MOZ_ASSERT(!isSharedMemory);
     auto stolenData = arrayBuffer
       ? static_cast<float*>(JS_StealArrayBufferContents(aJSContext,
                                                         arrayBuffer))
       : nullptr;
     if (stolenData) {
--- a/dom/plugins/base/nsPluginInstanceOwner.cpp
+++ b/dom/plugins/base/nsPluginInstanceOwner.cpp
@@ -207,21 +207,16 @@ already_AddRefed<ImageContainer>
 nsPluginInstanceOwner::GetImageContainer()
 {
   if (!mInstance)
     return nullptr;
 
   RefPtr<ImageContainer> container;
 
 #if MOZ_WIDGET_ANDROID
-  // Right now we only draw with Gecko layers on Honeycomb and higher. See Paint()
-  // for what we do on other versions.
-  if (AndroidBridge::Bridge()->GetAPIVersion() < 11)
-    return nullptr;
-
   LayoutDeviceRect r = GetPluginRect();
 
   // NotifySize() causes Flash to do a bunch of stuff like ask for surfaces to render
   // into, set y-flip flags, etc, so we do this at the beginning.
   float resolution = mPluginFrame->PresContext()->PresShell()->GetCumulativeResolution();
   ScreenSize screenSize = (r * LayoutDeviceToScreenScale(resolution)).Size();
   mInstance->NotifySize(nsIntSize::Truncate(screenSize.width, screenSize.height));
 
--- a/dom/tests/mochitest/general/historyframes.html
+++ b/dom/tests/mochitest/general/historyframes.html
@@ -24,27 +24,27 @@ function is() { testWin.is.apply(testWin
 var gFrame = null;
 
 var gState = null;
 
 window.addEventListener("popstate", function(aEvent) {
   gState = aEvent.state;
 }, false);
 
-function waitForLoad(aCallback) {
+function waitForLoad() {
   function listener() {
     gFrame.removeEventListener("load", listener, false);
-    SimpleTest.executeSoon(aCallback);
+    SimpleTest.executeSoon(continue_test);
   }
 
   gFrame.addEventListener("load", listener, false);
 }
 
-function loadContent(aURL, aCallback) {
-  waitForLoad(aCallback);
+function loadContent(aURL) {
+  waitForLoad();
 
   gFrame.src = aURL;
 }
 
 function getURL() {
   return gFrame.contentDocument.documentURI;
 }
 
@@ -56,52 +56,61 @@ var START = "data:text/html,<p%20id='tex
 var URL1 = "data:text/html,<p%20id='text'>Test1</p>";
 var URL2 = "data:text/html,<p%20id='text'>Test2</p>";
 
 function run_test() {
   window.history.pushState("START", window.location);
 
   gFrame = document.getElementById("iframe");
 
-  test_basic_inner_navigation();
+  continue_test();
+}
+
+function* test_body()
+{
+  yield* test_basic_inner_navigation();
+  yield* test_state_navigation();
 }
 
-function end_test() {
-  testWin.done();
+var gTestContinuation = null;
+
+function continue_test() {
+  if (!gTestContinuation) {
+    gTestContinuation = test_body();
+  }
+  var ret = gTestContinuation.next();
+  if (ret.done) {
+    testWin.done();
+  }
 }
 
-function test_basic_inner_navigation() {
+function* test_basic_inner_navigation() {
   // Navigate the inner frame a few times
-  loadContent(URL1, function() {
-    is(getURL(), URL1, "URL should be correct");
-    is(getContent(), "Test1", "Page should be correct");
+  yield loadContent(URL1);
+  is(getURL(), URL1, "URL should be correct");
+  is(getContent(), "Test1", "Page should be correct");
 
-    loadContent(URL2, function() {
-      is(getURL(), URL2, "URL should be correct");
-      is(getContent(), "Test2", "Page should be correct");
+  yield loadContent(URL2);
+
+  is(getURL(), URL2, "URL should be correct");
+  is(getContent(), "Test2", "Page should be correct");
 
-      // Test that history is working
-      waitForLoad(function() {
-        is(getURL(), URL1, "URL should be correct");
-        is(getContent(), "Test1", "Page should be correct");
+  // Test that history is working
+  window.history.back();
+  yield waitForLoad();
+  is(getURL(), URL1, "URL should be correct");
+  is(getContent(), "Test1", "Page should be correct");
 
-        waitForLoad(function() {
-          is(getURL(), URL2, "URL should be correct");
-          is(getContent(), "Test2", "Page should be correct");
-
-          test_state_navigation();
-        });
-        window.history.forward();
-      });
-      window.history.back();
-    });
-  });
+  window.history.forward();
+  yield waitForLoad();
+  is(getURL(), URL2, "URL should be correct");
+  is(getContent(), "Test2", "Page should be correct");
 }
 
-function test_state_navigation() {
+function* test_state_navigation() {
   window.history.pushState("STATE1", window.location);
 
   is(getURL(), URL2, "URL should be correct");
   is(getContent(), "Test2", "Page should be correct");
 
   window.history.pushState("STATE2", window.location);
 
   is(getURL(), URL2, "URL should be correct");
@@ -121,32 +130,27 @@ function test_state_navigation() {
 
   window.history.back();
   window.history.back();
 
   is(gState, "START", "State should be correct");
   is(getURL(), URL2, "URL should be correct");
   is(getContent(), "Test2", "Page should be correct");
 
-  waitForLoad(function() {
-    is(getURL(), URL1, "URL should be correct");
-    is(getContent(), "Test1", "Page should be correct");
+  window.history.back();
+  is(gState, "START", "State should be correct");
+  yield waitForLoad();
 
-    waitForLoad(function() {
-      is(gState, "START", "State should be correct");
-      is(getURL(), START, "URL should be correct");
-      is(getContent(), "Start", "Page should be correct");
-
-      end_test();
-    });
-
-    window.history.back();
-
-    is(gState, "START", "State should be correct");
-  });
+  is(getURL(), URL1, "URL should be correct");
+  is(getContent(), "Test1", "Page should be correct");
 
   window.history.back();
+  is(gState, "START", "State should be correct after going back twice");
+  yield waitForLoad();
+
   is(gState, "START", "State should be correct");
+  is(getURL(), START, "URL should be correct");
+  is(getContent(), "Start", "Page should be correct");
 }
 </script>
 </pre>
 </body>
 </html>
--- a/gfx/gl/AndroidSurfaceTexture.cpp
+++ b/gfx/gl/AndroidSurfaceTexture.cpp
@@ -41,35 +41,25 @@ public:
     if (NS_IsMainThread()) {
       mCallback->Run();
       return;
     }
     NS_DispatchToMainThread(mCallback);
   }
 };
 
-static bool
-IsSTSupported()
-{
-  return AndroidBridge::Bridge()->GetAPIVersion() >= 14; /* ICS */
-}
-
 already_AddRefed<AndroidSurfaceTexture>
 AndroidSurfaceTexture::Create()
 {
   return Create(nullptr, 0);
 }
 
 already_AddRefed<AndroidSurfaceTexture>
 AndroidSurfaceTexture::Create(GLContext* aContext, GLuint aTexture)
 {
-  if (!IsSTSupported()) {
-    return nullptr;
-  }
-
   RefPtr<AndroidSurfaceTexture> st = new AndroidSurfaceTexture();
   if (!st->Init(aContext, aTexture)) {
     printf_stderr("Failed to initialize AndroidSurfaceTexture");
     st = nullptr;
   }
 
   return st.forget();
 }
--- a/ipc/chromium/src/chrome/common/ipc_channel_posix.cc
+++ b/ipc/chromium/src/chrome/common/ipc_channel_posix.cc
@@ -574,17 +574,22 @@ bool Channel::ChannelImpl::ProcessOutgoi
     Message* msg = output_queue_.front();
 
     struct msghdr msgh = {0};
 
     static const int tmp = CMSG_SPACE(sizeof(
         int[FileDescriptorSet::MAX_DESCRIPTORS_PER_MESSAGE]));
     char buf[tmp];
 
-    if (partial_write_iter_.isNothing() &&
+    if (partial_write_iter_.isNothing()) {
+      Pickle::BufferList::IterImpl iter(msg->Buffers());
+      partial_write_iter_.emplace(iter);
+    }
+
+    if (partial_write_iter_.value().Data() == msg->Buffers().Start() &&
         !msg->file_descriptor_set()->empty()) {
       // This is the first chunk of a message which has descriptors to send
       struct cmsghdr *cmsg;
       const unsigned num_fds = msg->file_descriptor_set()->size();
 
       if (num_fds > FileDescriptorSet::MAX_DESCRIPTORS_PER_MESSAGE) {
         CHROMIUM_LOG(FATAL) << "Too many file descriptors!";
         // This should not be reached.
@@ -606,21 +611,16 @@ bool Channel::ChannelImpl::ProcessOutgoi
       msg->set_fd_cookie(++last_pending_fd_id_);
 #endif
     }
 
     struct iovec iov[kMaxIOVecSize];
     size_t iov_count = 0;
     size_t amt_to_write = 0;
 
-    if (partial_write_iter_.isNothing()) {
-      Pickle::BufferList::IterImpl iter(msg->Buffers());
-      partial_write_iter_.emplace(iter);
-    }
-
     // How much of this message have we written so far?
     Pickle::BufferList::IterImpl iter = partial_write_iter_.value();
 
     // Store the unwritten part of the first segment to write into the iovec.
     iov[0].iov_base = const_cast<char*>(iter.Data());
     iov[0].iov_len = iter.RemainingInSegment();
     amt_to_write += iov[0].iov_len;
     iter.Advance(msg->Buffers(), iov[0].iov_len);
--- a/ipc/mscom/DispatchForwarder.h
+++ b/ipc/mscom/DispatchForwarder.h
@@ -10,17 +10,17 @@
 #include <oaidl.h>
 
 #include "mozilla/mscom/Interceptor.h"
 #include "mozilla/mscom/Ptr.h"
 
 namespace mozilla {
 namespace mscom {
 
-class DispatchForwarder : public IDispatch
+class DispatchForwarder final : public IDispatch
 {
 public:
   static HRESULT Create(IInterceptor* aInterceptor,
                         STAUniquePtr<IDispatch>& aTarget, IUnknown** aOutput);
 
   // IUnknown
   STDMETHODIMP QueryInterface(REFIID riid, void** ppv) override;
   STDMETHODIMP_(ULONG) AddRef() override;
--- a/ipc/mscom/InterceptorLog.cpp
+++ b/ipc/mscom/InterceptorLog.cpp
@@ -38,21 +38,24 @@ using mozilla::MutexAutoLock;
 using mozilla::NewNonOwningRunnableMethod;
 using mozilla::services::GetObserverService;
 using mozilla::StaticAutoPtr;
 using mozilla::TimeDuration;
 using mozilla::TimeStamp;
 
 namespace {
 
-class ShutdownEvent : public nsIObserver
+class ShutdownEvent final : public nsIObserver
 {
 public:
   NS_DECL_ISUPPORTS
   NS_DECL_NSIOBSERVER
+
+private:
+  ~ShutdownEvent() {}
 };
 
 NS_IMPL_ISUPPORTS(ShutdownEvent, nsIObserver)
 
 class Logger
 {
 public:
   explicit Logger(const nsACString& aLeafBaseName);
--- a/ipc/mscom/MainThreadHandoff.h
+++ b/ipc/mscom/MainThreadHandoff.h
@@ -15,18 +15,18 @@
 #include "mozilla/Mutex.h"
 #include "nsTArray.h"
 
 namespace mozilla {
 namespace mscom {
 
 struct ArrayData;
 
-class MainThreadHandoff : public IInterceptorSink
-                        , public ICallFrameWalker
+class MainThreadHandoff final : public IInterceptorSink
+                              , public ICallFrameWalker
 {
 public:
   static HRESULT Create(IInterceptorSink** aOutput);
 
   template <typename Interface>
   static HRESULT WrapInterface(STAUniquePtr<Interface> aTargetInterface,
                                Interface** aOutInterface)
   {
--- a/layout/generic/nsColumnSetFrame.cpp
+++ b/layout/generic/nsColumnSetFrame.cpp
@@ -78,17 +78,17 @@ nsColumnSetFrame::PaintColumnRule(nsRend
     ruleStyle = colStyle->mColumnRuleStyle;
 
   nsPresContext* presContext = PresContext();
   nscoord ruleWidth = colStyle->GetComputedColumnRuleWidth();
   if (!ruleWidth)
     return;
 
   nscolor ruleColor =
-    GetVisitedDependentColor(eCSSProperty__moz_column_rule_color);
+    GetVisitedDependentColor(eCSSProperty_column_rule_color);
 
   // In order to re-use a large amount of code, we treat the column rule as a border.
   // We create a new border style object and fill in all the details of the column rule as
   // the left border. PaintBorder() does all the rendering for us, so we not
   // only save an enormous amount of code but we'll support all the line styles that
   // we support on borders!
   nsStyleBorder border(presContext);
   Sides skipSides;
--- a/layout/generic/nsPluginFrame.cpp
+++ b/layout/generic/nsPluginFrame.cpp
@@ -1383,29 +1383,28 @@ LayerState
 nsPluginFrame::GetLayerState(nsDisplayListBuilder* aBuilder,
                              LayerManager* aManager)
 {
   if (!mInstanceOwner)
     return LAYER_NONE;
 
 #ifdef MOZ_WIDGET_ANDROID
   // We always want a layer on Honeycomb and later
-  if (AndroidBridge::Bridge()->GetAPIVersion() >= 11)
-    return LAYER_ACTIVE;
-#endif
-
+  return LAYER_ACTIVE;
+#else
   if (mInstanceOwner->NeedsScrollImageLayer()) {
     return LAYER_ACTIVE;
   }
 
   if (!mInstanceOwner->UseAsyncRendering()) {
     return LAYER_NONE;
   }
 
   return LAYER_ACTIVE_FORCE;
+#endif
 }
 
 class PluginFrameDidCompositeObserver final : public ClientLayerManager::
   DidCompositeObserver
 {
 public:
   PluginFrameDidCompositeObserver(nsPluginInstanceOwner* aOwner, ClientLayerManager* aLayerManager)
     : mInstanceOwner(aOwner),
--- a/layout/style/Declaration.cpp
+++ b/layout/style/Declaration.cpp
@@ -737,17 +737,17 @@ Declaration::GetValue(nsCSSPropertyID aP
     case eCSSProperty_border_top:
     case eCSSProperty_border_right:
     case eCSSProperty_border_bottom:
     case eCSSProperty_border_left:
     case eCSSProperty_border_inline_start:
     case eCSSProperty_border_inline_end:
     case eCSSProperty_border_block_start:
     case eCSSProperty_border_block_end:
-    case eCSSProperty__moz_column_rule:
+    case eCSSProperty_column_rule:
     case eCSSProperty_outline: {
       const nsCSSPropertyID* subprops =
         nsCSSProps::SubpropertyEntryFor(aProperty);
       MOZ_ASSERT(StringEndsWith(nsCSSProps::GetStringValue(subprops[2]),
                                 NS_LITERAL_CSTRING("-color")),
                  "third subprop must be the color property");
       const nsCSSValue *colorValue = data->ValueFor(subprops[2]);
       bool isMozUseTextColor =
@@ -1143,17 +1143,17 @@ Declaration::GetValue(nsCSSPropertyID aP
       const nsCSSValue &midValue =
         *data->ValueFor(eCSSProperty_marker_mid);
       const nsCSSValue &startValue =
         *data->ValueFor(eCSSProperty_marker_start);
       if (endValue == midValue && midValue == startValue)
         AppendValueToString(eCSSProperty_marker_end, aValue, aSerialization);
       break;
     }
-    case eCSSProperty__moz_columns: {
+    case eCSSProperty_columns: {
       // Two values, column-count and column-width, separated by a space.
       const nsCSSPropertyID* subprops =
         nsCSSProps::SubpropertyEntryFor(aProperty);
       AppendValueToString(subprops[0], aValue, aSerialization);
       aValue.Append(char16_t(' '));
       AppendValueToString(subprops[1], aValue, aSerialization);
       break;
     }
--- a/layout/style/StyleAnimationValue.cpp
+++ b/layout/style/StyleAnimationValue.cpp
@@ -3660,17 +3660,17 @@ StyleAnimationValue::ExtractComputedValu
               GetComputedBorder().side_);                                     \
           break;
         BORDER_WIDTH_CASE(eCSSProperty_border_bottom_width, bottom)
         BORDER_WIDTH_CASE(eCSSProperty_border_left_width, left)
         BORDER_WIDTH_CASE(eCSSProperty_border_right_width, right)
         BORDER_WIDTH_CASE(eCSSProperty_border_top_width, top)
         #undef BORDER_WIDTH_CASE
 
-        case eCSSProperty__moz_column_rule_width:
+        case eCSSProperty_column_rule_width:
           aComputedValue.SetCoordValue(
             static_cast<const nsStyleColumn*>(styleStruct)->
               GetComputedColumnRuleWidth());
           break;
 
         case eCSSProperty_border_bottom_color:
           ExtractBorderColor(aStyleContext, styleStruct, NS_SIDE_BOTTOM,
                              aComputedValue);
@@ -3693,30 +3693,30 @@ StyleAnimationValue::ExtractComputedValu
             static_cast<const nsStyleOutline*>(styleStruct);
           nscolor color;
           if (!styleOutline->GetOutlineColor(color))
             color = aStyleContext->StyleColor()->mColor;
           aComputedValue.SetColorValue(color);
           break;
         }
 
-        case eCSSProperty__moz_column_rule_color: {
+        case eCSSProperty_column_rule_color: {
           const nsStyleColumn *styleColumn =
             static_cast<const nsStyleColumn*>(styleStruct);
           nscolor color;
           if (styleColumn->mColumnRuleColorIsForeground) {
             color = aStyleContext->StyleColor()->mColor;
           } else {
             color = styleColumn->mColumnRuleColor;
           }
           aComputedValue.SetColorValue(color);
           break;
         }
 
-        case eCSSProperty__moz_column_count: {
+        case eCSSProperty_column_count: {
           const nsStyleColumn *styleColumn =
             static_cast<const nsStyleColumn*>(styleStruct);
           if (styleColumn->mColumnCount == NS_STYLE_COLUMN_COUNT_AUTO) {
             aComputedValue.SetAutoValue();
           } else {
             aComputedValue.SetIntValue(styleColumn->mColumnCount,
                                        eUnit_Integer);
           }
--- a/layout/style/nsCSSParser.cpp
+++ b/layout/style/nsCSSParser.cpp
@@ -7453,19 +7453,19 @@ static const nsCSSPropertyID kBorderBloc
   eCSSProperty_border_block_start_color
 };
 static const nsCSSPropertyID kBorderBlockEndIDs[] = {
   eCSSProperty_border_block_end_width,
   eCSSProperty_border_block_end_style,
   eCSSProperty_border_block_end_color
 };
 static const nsCSSPropertyID kColumnRuleIDs[] = {
-  eCSSProperty__moz_column_rule_width,
-  eCSSProperty__moz_column_rule_style,
-  eCSSProperty__moz_column_rule_color
+  eCSSProperty_column_rule_width,
+  eCSSProperty_column_rule_style,
+  eCSSProperty_column_rule_color
 };
 
 bool
 CSSParserImpl::ParseEnum(nsCSSValue& aValue,
                          const KTableEntry aKeywordTable[])
 {
   nsSubstring* ident = NextIdent();
   if (nullptr == ident) {
@@ -11635,19 +11635,19 @@ CSSParserImpl::ParsePropertyByFunction(n
     return ParseBoxCornerRadius(aPropID);
 
   case eCSSProperty_box_shadow:
   case eCSSProperty_text_shadow:
     return ParseShadowList(aPropID);
 
   case eCSSProperty_clip:
     return ParseRect(eCSSProperty_clip);
-  case eCSSProperty__moz_columns:
+  case eCSSProperty_columns:
     return ParseColumns();
-  case eCSSProperty__moz_column_rule:
+  case eCSSProperty_column_rule:
     return ParseBorderSide(kColumnRuleIDs, false);
   case eCSSProperty_content:
     return ParseContent();
   case eCSSProperty_counter_increment:
   case eCSSProperty_counter_reset:
     return ParseCounterData(aPropID);
   case eCSSProperty_cursor:
     return ParseCursor();
@@ -13797,18 +13797,18 @@ CSSParserImpl::ParseColumns()
 {
   // We use a similar "fake value" hack to ParseListStyle, because
   // "auto" is acceptable for both column-count and column-width.
   // If the fake "auto" value is found, and one of the real values isn't,
   // that means the fake auto value is meant for the real value we didn't
   // find.
   static const nsCSSPropertyID columnIDs[] = {
     eCSSPropertyExtra_x_auto_value,
-    eCSSProperty__moz_column_count,
-    eCSSProperty__moz_column_width
+    eCSSProperty_column_count,
+    eCSSProperty_column_width
   };
   const int32_t numProps = MOZ_ARRAY_LENGTH(columnIDs);
 
   nsCSSValue values[numProps];
   int32_t found = ParseChoice(values, columnIDs, numProps);
   if (found < 1) {
     return false;
   }
--- a/layout/style/nsCSSPropList.h
+++ b/layout/style/nsCSSPropList.h
@@ -1478,100 +1478,100 @@ CSS_PROP_SVG(
     CSS_PROPERTY_PARSE_VALUE,
     "",
     VARIANT_HK,
     kColorInterpolationKTable,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Discrete)
 CSS_PROP_COLUMN(
     -moz-column-count,
-    _moz_column_count,
+    column_count,
     CSS_PROP_DOMPROP_PREFIXED(ColumnCount),
     CSS_PROPERTY_PARSE_VALUE |
         // Need to reject 0 in addition to negatives.  If we accept 0, we
         // need to change NS_STYLE_COLUMN_COUNT_AUTO to something else.
         CSS_PROPERTY_VALUE_AT_LEAST_ONE,
     "",
     VARIANT_AHI,
     nullptr,
     offsetof(nsStyleColumn, mColumnCount),
     eStyleAnimType_Custom)
 CSS_PROP_COLUMN(
     -moz-column-fill,
-    _moz_column_fill,
+    column_fill,
     CSS_PROP_DOMPROP_PREFIXED(ColumnFill),
     CSS_PROPERTY_PARSE_VALUE,
     "",
     VARIANT_HK,
     kColumnFillKTable,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_None)
 CSS_PROP_COLUMN(
     -moz-column-gap,
-    _moz_column_gap,
+    column_gap,
     CSS_PROP_DOMPROP_PREFIXED(ColumnGap),
     CSS_PROPERTY_PARSE_VALUE |
         CSS_PROPERTY_VALUE_NONNEGATIVE,
     "",
     VARIANT_HL | VARIANT_NORMAL | VARIANT_CALC,
     nullptr,
     offsetof(nsStyleColumn, mColumnGap),
     eStyleAnimType_Coord)
 CSS_PROP_SHORTHAND(
     -moz-column-rule,
-    _moz_column_rule,
+    column_rule,
     CSS_PROP_DOMPROP_PREFIXED(ColumnRule),
     CSS_PROPERTY_PARSE_FUNCTION,
     "")
 CSS_PROP_COLUMN(
     -moz-column-rule-color,
-    _moz_column_rule_color,
+    column_rule_color,
     CSS_PROP_DOMPROP_PREFIXED(ColumnRuleColor),
     CSS_PROPERTY_PARSE_VALUE |
         CSS_PROPERTY_IGNORED_WHEN_COLORS_DISABLED,
     "",
     VARIANT_HCK,
     kBorderColorKTable,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Custom)
 CSS_PROP_COLUMN(
     -moz-column-rule-style,
-    _moz_column_rule_style,
+    column_rule_style,
     CSS_PROP_DOMPROP_PREFIXED(ColumnRuleStyle),
     CSS_PROPERTY_PARSE_VALUE,
     "",
     VARIANT_HK,
     kBorderStyleKTable,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_None)
 CSS_PROP_COLUMN(
     -moz-column-rule-width,
-    _moz_column_rule_width,
+    column_rule_width,
     CSS_PROP_DOMPROP_PREFIXED(ColumnRuleWidth),
     CSS_PROPERTY_PARSE_VALUE |
         CSS_PROPERTY_VALUE_NONNEGATIVE,
     "",
     VARIANT_HKL | VARIANT_CALC,
     kBorderWidthKTable,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Custom)
 CSS_PROP_COLUMN(
     -moz-column-width,
-    _moz_column_width,
+    column_width,
     CSS_PROP_DOMPROP_PREFIXED(ColumnWidth),
     CSS_PROPERTY_PARSE_VALUE |
         CSS_PROPERTY_VALUE_NONNEGATIVE,
     "",
     VARIANT_AHL | VARIANT_CALC,
     nullptr,
     offsetof(nsStyleColumn, mColumnWidth),
     eStyleAnimType_Coord)
 CSS_PROP_SHORTHAND(
     -moz-columns,
-    _moz_columns,
+    columns,
     CSS_PROP_DOMPROP_PREFIXED(Columns),
     CSS_PROPERTY_PARSE_FUNCTION,
     "")
 CSS_PROP_DISPLAY(
     contain,
     contain,
     Contain,
     CSS_PROPERTY_PARSE_VALUE |
--- a/layout/style/nsCSSProps.cpp
+++ b/layout/style/nsCSSProps.cpp
@@ -2837,27 +2837,27 @@ static const nsCSSPropertyID gOutlineSub
   // It also depends on the color being third.
   eCSSProperty_outline_width,
   eCSSProperty_outline_style,
   eCSSProperty_outline_color,
   eCSSProperty_UNKNOWN
 };
 
 static const nsCSSPropertyID gColumnsSubpropTable[] = {
-  eCSSProperty__moz_column_count,
-  eCSSProperty__moz_column_width,
+  eCSSProperty_column_count,
+  eCSSProperty_column_width,
   eCSSProperty_UNKNOWN
 };
 
 static const nsCSSPropertyID gColumnRuleSubpropTable[] = {
   // nsCSSDeclaration.cpp outputs the subproperties in this order.
   // It also depends on the color being third.
-  eCSSProperty__moz_column_rule_width,
-  eCSSProperty__moz_column_rule_style,
-  eCSSProperty__moz_column_rule_color,
+  eCSSProperty_column_rule_width,
+  eCSSProperty_column_rule_style,
+  eCSSProperty_column_rule_color,
   eCSSProperty_UNKNOWN
 };
 
 static const nsCSSPropertyID gFlexSubpropTable[] = {
   eCSSProperty_flex_grow,
   eCSSProperty_flex_shrink,
   eCSSProperty_flex_basis,
   eCSSProperty_UNKNOWN
--- a/layout/style/nsComputedDOMStylePropertyList.h
+++ b/layout/style/nsComputedDOMStylePropertyList.h
@@ -270,24 +270,24 @@ COMPUTED_STYLE_PROP(border_left_colors, 
 COMPUTED_STYLE_PROP(border_right_colors,           BorderRightColors)
 COMPUTED_STYLE_PROP(border_top_colors,             BorderTopColors)
 COMPUTED_STYLE_PROP(box_align,                     BoxAlign)
 COMPUTED_STYLE_PROP(box_direction,                 BoxDirection)
 COMPUTED_STYLE_PROP(box_flex,                      BoxFlex)
 COMPUTED_STYLE_PROP(box_ordinal_group,             BoxOrdinalGroup)
 COMPUTED_STYLE_PROP(box_orient,                    BoxOrient)
 COMPUTED_STYLE_PROP(box_pack,                      BoxPack)
-COMPUTED_STYLE_PROP(_moz_column_count,             ColumnCount)
-COMPUTED_STYLE_PROP(_moz_column_fill,              ColumnFill)
-COMPUTED_STYLE_PROP(_moz_column_gap,               ColumnGap)
-//// COMPUTED_STYLE_PROP(_moz_column_rule,         ColumnRule)
-COMPUTED_STYLE_PROP(_moz_column_rule_color,        ColumnRuleColor)
-COMPUTED_STYLE_PROP(_moz_column_rule_style,        ColumnRuleStyle)
-COMPUTED_STYLE_PROP(_moz_column_rule_width,        ColumnRuleWidth)
-COMPUTED_STYLE_PROP(_moz_column_width,             ColumnWidth)
+COMPUTED_STYLE_PROP(column_count,                  ColumnCount)
+COMPUTED_STYLE_PROP(column_fill,                   ColumnFill)
+COMPUTED_STYLE_PROP(column_gap,                    ColumnGap)
+//// COMPUTED_STYLE_PROP(column_rule,              ColumnRule)
+COMPUTED_STYLE_PROP(column_rule_color,             ColumnRuleColor)
+COMPUTED_STYLE_PROP(column_rule_style,             ColumnRuleStyle)
+COMPUTED_STYLE_PROP(column_rule_width,             ColumnRuleWidth)
+COMPUTED_STYLE_PROP(column_width,                  ColumnWidth)
 COMPUTED_STYLE_PROP(float_edge,                    FloatEdge)
 COMPUTED_STYLE_PROP(force_broken_image_icon,       ForceBrokenImageIcon)
 COMPUTED_STYLE_PROP(image_region,                  ImageRegion)
 COMPUTED_STYLE_PROP(orient,                        Orient)
 COMPUTED_STYLE_PROP(osx_font_smoothing,            OsxFontSmoothing)
 COMPUTED_STYLE_PROP(_moz_outline_radius_bottomLeft, OutlineRadiusBottomLeft)
 COMPUTED_STYLE_PROP(_moz_outline_radius_bottomRight,OutlineRadiusBottomRight)
 COMPUTED_STYLE_PROP(_moz_outline_radius_topLeft,    OutlineRadiusTopLeft)
--- a/layout/style/nsStyleContext.cpp
+++ b/layout/style/nsStyleContext.cpp
@@ -1512,17 +1512,17 @@ nsStyleContext::GetVisitedDependentColor
 {
   NS_ASSERTION(aProperty == eCSSProperty_color ||
                aProperty == eCSSProperty_background_color ||
                aProperty == eCSSProperty_border_top_color ||
                aProperty == eCSSProperty_border_right_color ||
                aProperty == eCSSProperty_border_bottom_color ||
                aProperty == eCSSProperty_border_left_color ||
                aProperty == eCSSProperty_outline_color ||
-               aProperty == eCSSProperty__moz_column_rule_color ||
+               aProperty == eCSSProperty_column_rule_color ||
                aProperty == eCSSProperty_text_decoration_color ||
                aProperty == eCSSProperty_text_emphasis_color ||
                aProperty == eCSSProperty__webkit_text_fill_color ||
                aProperty == eCSSProperty__webkit_text_stroke_color ||
                aProperty == eCSSProperty_fill ||
                aProperty == eCSSProperty_stroke,
                "we need to add to nsStyleContext::CalcStyleDifference");
 
--- a/mfbt/BufferList.h
+++ b/mfbt/BufferList.h
@@ -223,16 +223,17 @@ class BufferList : private AllocPolicy
     bool Done() const
     {
       return mData == mDataEnd;
     }
   };
 
   // Special convenience method that returns Iter().Data().
   char* Start() { return mSegments[0].mData; }
+  const char* Start() const { return mSegments[0].mData; }
 
   IterImpl Iter() const { return IterImpl(*this); }
 
   // Copies aSize bytes from aData into the BufferList. The storage for these
   // bytes may be split across multiple buffers. Size() is increased by aSize.
   inline bool WriteBytes(const char* aData, size_t aSize);
 
   // Copies possibly non-contiguous byte range starting at aIter into
--- a/mobile/android/base/android-services.mozbuild
+++ b/mobile/android/base/android-services.mozbuild
@@ -888,16 +888,17 @@ sync_java_files = [TOPSRCDIR + '/mobile/
     'sync/delegates/KeyUploadDelegate.java',
     'sync/delegates/MetaGlobalDelegate.java',
     'sync/delegates/WipeServerDelegate.java',
     'sync/EngineSettings.java',
     'sync/ExtendedJSONObject.java',
     'sync/GlobalSession.java',
     'sync/HTTPFailureException.java',
     'sync/InfoCollections.java',
+    'sync/InfoConfiguration.java',
     'sync/InfoCounts.java',
     'sync/JSONRecordFetcher.java',
     'sync/KeyBundleProvider.java',
     'sync/MetaGlobal.java',
     'sync/MetaGlobalException.java',
     'sync/MetaGlobalMissingEnginesException.java',
     'sync/MetaGlobalNotSetException.java',
     'sync/middleware/Crypto5MiddlewareRepository.java',
@@ -1009,32 +1010,40 @@ sync_java_files = [TOPSRCDIR + '/mobile/
     'sync/repositories/Repository.java',
     'sync/repositories/RepositorySession.java',
     'sync/repositories/RepositorySessionBundle.java',
     'sync/repositories/Server11Repository.java',
     'sync/repositories/Server11RepositorySession.java',
     'sync/repositories/StoreFailedException.java',
     'sync/repositories/StoreTracker.java',
     'sync/repositories/StoreTrackingRepositorySession.java',
+    'sync/repositories/uploaders/BatchingUploader.java',
+    'sync/repositories/uploaders/BatchMeta.java',
+    'sync/repositories/uploaders/BufferSizeTracker.java',
+    'sync/repositories/uploaders/MayUploadProvider.java',
+    'sync/repositories/uploaders/Payload.java',
+    'sync/repositories/uploaders/PayloadUploadDelegate.java',
+    'sync/repositories/uploaders/RecordUploadRunnable.java',
     'sync/Server11PreviousPostFailedException.java',
     'sync/Server11RecordPostFailedException.java',
     'sync/setup/activities/ActivityUtils.java',
     'sync/setup/activities/WebURLFinder.java',
     'sync/setup/Constants.java',
     'sync/setup/InvalidSyncKeyException.java',
     'sync/SharedPreferencesClientsDataDelegate.java',
     'sync/stage/AbstractNonRepositorySyncStage.java',
     'sync/stage/AbstractSessionManagingSyncStage.java',
     'sync/stage/AndroidBrowserBookmarksServerSyncStage.java',
     'sync/stage/AndroidBrowserHistoryServerSyncStage.java',
     'sync/stage/CheckPreconditionsStage.java',
     'sync/stage/CompletedStage.java',
     'sync/stage/EnsureCrypto5KeysStage.java',
     'sync/stage/FennecTabsServerSyncStage.java',
     'sync/stage/FetchInfoCollectionsStage.java',
+    'sync/stage/FetchInfoConfigurationStage.java',
     'sync/stage/FetchMetaGlobalStage.java',
     'sync/stage/FormHistoryServerSyncStage.java',
     'sync/stage/GlobalSyncStage.java',
     'sync/stage/NoSuchStageException.java',
     'sync/stage/PasswordsServerSyncStage.java',
     'sync/stage/SafeConstrainedServer11Repository.java',
     'sync/stage/ServerSyncStage.java',
     'sync/stage/SyncClientsEngineStage.java',
--- a/mobile/android/base/java/org/mozilla/gecko/BrowserApp.java
+++ b/mobile/android/base/java/org/mozilla/gecko/BrowserApp.java
@@ -678,16 +678,18 @@ public class BrowserApp extends GeckoApp
             mBrowserSearch.setUserVisibleHint(false);
         }
 
         setBrowserToolbarListeners();
 
         mFindInPageBar = (FindInPageBar) findViewById(R.id.find_in_page);
         mMediaCastingBar = (MediaCastingBar) findViewById(R.id.media_casting);
 
+        mDoorhangerOverlay = findViewById(R.id.doorhanger_overlay);
+
         EventDispatcher.getInstance().registerGeckoThreadListener((GeckoEventListener)this,
             "Gecko:DelayedStartup",
             "Menu:Open",
             "Menu:Update",
             "LightweightTheme:Update",
             "Search:Keyword",
             "Prompt:ShowTop",
             "Video:Play");
@@ -983,19 +985,16 @@ public class BrowserApp extends GeckoApp
             return;
         }
 
         super.onBackPressed();
     }
 
     @Override
     public void onAttachedToWindow() {
-        mDoorhangerOverlay = findViewById(R.id.doorhanger_overlay);
-        mDoorhangerOverlay.setVisibility(View.VISIBLE);
-
         // We can't show the first run experience until Gecko has finished initialization (bug 1077583).
         checkFirstrun(this, new SafeIntent(getIntent()));
     }
 
     @Override
     protected void processTabQueue() {
         if (TabQueueHelper.TAB_QUEUE_ENABLED && mInitialized) {
             ThreadUtils.postToBackgroundThread(new Runnable() {
--- a/mobile/android/base/java/org/mozilla/gecko/media/RemoteManager.java
+++ b/mobile/android/base/java/org/mozilla/gecko/media/RemoteManager.java
@@ -23,17 +23,17 @@ import java.util.LinkedList;
 import java.util.List;
 
 public final class RemoteManager implements IBinder.DeathRecipient {
     private static final String LOGTAG = "GeckoRemoteManager";
     private static final boolean DEBUG = false;
     private static RemoteManager sRemoteManager = null;
 
     public synchronized static RemoteManager getInstance() {
-        if (sRemoteManager == null){
+        if (sRemoteManager == null) {
             sRemoteManager = new RemoteManager();
         }
 
         sRemoteManager.init();
         return sRemoteManager;
     }
 
     private List<CodecProxy> mProxies = new LinkedList<CodecProxy>();
rename from mobile/android/base/resources/color-large-v11/tab_new_tab_strip_colors.xml
rename to mobile/android/base/resources/color/tab_new_tab_strip_colors.xml
rename from mobile/android/base/resources/color-large-v11/tab_strip_item_bg.xml
rename to mobile/android/base/resources/color/tab_strip_item_bg.xml
rename from mobile/android/base/resources/color-large-v11/tab_strip_item_title.xml
rename to mobile/android/base/resources/color/tab_strip_item_title.xml
rename from mobile/android/base/resources/color-large-v11/tabs_counter_text_color.xml
rename to mobile/android/base/resources/color/tabs_counter_text_color.xml
rename from mobile/android/base/resources/drawable-large-v11/tab_strip_button.xml
rename to mobile/android/base/resources/drawable/tab_strip_button.xml
rename from mobile/android/base/resources/drawable-large-v11/tab_strip_divider.xml
rename to mobile/android/base/resources/drawable/tab_strip_divider.xml
--- a/mobile/android/base/resources/layout/gecko_app.xml
+++ b/mobile/android/base/resources/layout/gecko_app.xml
@@ -88,17 +88,16 @@
                           android:layout_height="match_parent"/>
 
             </FrameLayout>
 
             <View android:id="@+id/doorhanger_overlay"
                   android:layout_width="match_parent"
                   android:layout_height="match_parent"
                   android:background="@color/dark_transparent_overlay"
-                  android:visibility="gone"
                   android:alpha="0"
                   android:layerType="hardware"/>
 
         </RelativeLayout>
 
         <org.mozilla.gecko.FindInPageBar android:id="@+id/find_in_page"
                                          android:layout_width="match_parent"
                                          android:layout_height="wrap_content"
rename from mobile/android/base/resources/layout-large-v11/tab_strip.xml
rename to mobile/android/base/resources/layout/tab_strip.xml
rename from mobile/android/base/resources/layout-large-v11/tab_strip_inner.xml
rename to mobile/android/base/resources/layout/tab_strip_inner.xml
rename from mobile/android/base/resources/layout-large-v11/tab_strip_item.xml
rename to mobile/android/base/resources/layout/tab_strip_item.xml
rename from mobile/android/base/resources/layout-large-v11/tab_strip_item_view.xml
rename to mobile/android/base/resources/layout/tab_strip_item_view.xml
deleted file mode 100644
--- a/mobile/android/base/resources/values/layout.xml
+++ /dev/null
@@ -1,12 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!-- This Source Code Form is subject to the terms of the Mozilla Public
-   - License, v. 2.0. If a copy of the MPL was not distributed with this
-   - file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
-
-<resources>
-    <!-- These items are v11+ resources but are referenced in code shipped with
-         API 9 builds. Since v11+ resources don't ship on API 9 builds, in order
-         for the resource ID to be found (and thus compilation to succeed), we
-         provide dummy values below. -->
-    <item type="layout" name="tab_strip">@null</item>
-</resources>
\ No newline at end of file
--- a/mobile/android/geckoview/src/main/java/org/mozilla/gecko/notifications/NotificationHelper.java
+++ b/mobile/android/geckoview/src/main/java/org/mozilla/gecko/notifications/NotificationHelper.java
@@ -213,17 +213,17 @@ public final class NotificationHelper im
                 builder.appendQueryParameter(ACTION_ID_ATTR, action.getString(ACTION_ID_ATTR));
             } else {
                 Log.i(LOGTAG, "button event with no name");
             }
         } catch (JSONException ex) {
             Log.i(LOGTAG, "buildNotificationPendingIntent, error parsing", ex);
         }
         final Intent notificationIntent = buildNotificationIntent(message, builder);
-        PendingIntent res = PendingIntent.getActivity(mContext, 0, notificationIntent, PendingIntent.FLAG_UPDATE_CURRENT);
+        PendingIntent res = PendingIntent.getBroadcast(mContext, 0, notificationIntent, PendingIntent.FLAG_UPDATE_CURRENT);
         return res;
     }
 
     private void showNotification(JSONObject message) {
         NotificationCompat.Builder builder = new NotificationCompat.Builder(mContext);
 
         // These attributes are required
         final String id;
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/GlobalSession.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/GlobalSession.java
@@ -27,16 +27,17 @@ import org.mozilla.gecko.sync.net.SyncSt
 import org.mozilla.gecko.sync.net.SyncStorageResponse;
 import org.mozilla.gecko.sync.stage.AndroidBrowserBookmarksServerSyncStage;
 import org.mozilla.gecko.sync.stage.AndroidBrowserHistoryServerSyncStage;
 import org.mozilla.gecko.sync.stage.CheckPreconditionsStage;
 import org.mozilla.gecko.sync.stage.CompletedStage;
 import org.mozilla.gecko.sync.stage.EnsureCrypto5KeysStage;
 import org.mozilla.gecko.sync.stage.FennecTabsServerSyncStage;
 import org.mozilla.gecko.sync.stage.FetchInfoCollectionsStage;
+import org.mozilla.gecko.sync.stage.FetchInfoConfigurationStage;
 import org.mozilla.gecko.sync.stage.FetchMetaGlobalStage;
 import org.mozilla.gecko.sync.stage.FormHistoryServerSyncStage;
 import org.mozilla.gecko.sync.stage.GlobalSyncStage;
 import org.mozilla.gecko.sync.stage.GlobalSyncStage.Stage;
 import org.mozilla.gecko.sync.stage.NoSuchStageException;
 import org.mozilla.gecko.sync.stage.PasswordsServerSyncStage;
 import org.mozilla.gecko.sync.stage.SyncClientsEngineStage;
 import org.mozilla.gecko.sync.stage.UploadMetaGlobalStage;
@@ -172,16 +173,18 @@ public class GlobalSession implements Ht
   }
 
   protected void prepareStages() {
     Map<Stage, GlobalSyncStage> stages = new EnumMap<Stage, GlobalSyncStage>(Stage.class);
 
     stages.put(Stage.checkPreconditions,      new CheckPreconditionsStage());
     stages.put(Stage.fetchInfoCollections,    new FetchInfoCollectionsStage());
     stages.put(Stage.fetchMetaGlobal,         new FetchMetaGlobalStage());
+    stages.put(Stage.fetchInfoConfiguration,  new FetchInfoConfigurationStage(
+            config.infoConfigurationURL(), getAuthHeaderProvider()));
     stages.put(Stage.ensureKeysStage,         new EnsureCrypto5KeysStage());
 
     stages.put(Stage.syncClientsEngine,       new SyncClientsEngineStage());
 
     stages.put(Stage.syncTabs,                new FennecTabsServerSyncStage());
     stages.put(Stage.syncPasswords,           new PasswordsServerSyncStage());
     stages.put(Stage.syncBookmarks,           new AndroidBrowserBookmarksServerSyncStage());
     stages.put(Stage.syncHistory,             new AndroidBrowserHistoryServerSyncStage());
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/InfoConfiguration.java
@@ -0,0 +1,93 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync;
+
+import android.util.Log;
+
+import org.mozilla.gecko.background.common.log.Logger;
+
+/**
+ * Wraps and provides access to configuration data returned from info/configuration.
+ * Docs: https://docs.services.mozilla.com/storage/apis-1.5.html#general-info
+ *
+ * - <bold>max_request_bytes</bold>: the maximum size in bytes of the overall
+ *   HTTP request body that will be accepted by the server.
+ *
+ * - <bold>max_post_records</bold>: the maximum number of records that can be
+ *   uploaded to a collection in a single POST request.
+ *
+ * - <bold>max_post_bytes</bold>: the maximum combined size in bytes of the
+ *   record payloads that can be uploaded to a collection in a single
+ *   POST request.
+ *
+ * - <bold>max_total_records</bold>: the maximum number of records that can be
+ *   uploaded to a collection as part of a batched upload.
+ *
+ * - <bold>max_total_bytes</bold>: the maximum combined size in bytes of the
+ *   record payloads that can be uploaded to a collection as part of
+ *   a batched upload.
+ */
+public class InfoConfiguration {
+    private static final String LOG_TAG = "InfoConfiguration";
+
+    public static final String MAX_REQUEST_BYTES = "max_request_bytes";
+    public static final String MAX_POST_RECORDS = "max_post_records";
+    public static final String MAX_POST_BYTES = "max_post_bytes";
+    public static final String MAX_TOTAL_RECORDS = "max_total_records";
+    public static final String MAX_TOTAL_BYTES = "max_total_bytes";
+
+    private static final long DEFAULT_MAX_REQUEST_BYTES = 1048576;
+    private static final long DEFAULT_MAX_POST_RECORDS = 100;
+    private static final long DEFAULT_MAX_POST_BYTES = 1048576;
+    private static final long DEFAULT_MAX_TOTAL_RECORDS = 10000;
+    private static final long DEFAULT_MAX_TOTAL_BYTES = 104857600;
+
+    // While int's upper range is (2^31-1), which in bytes is equivalent to 2.147 GB, let's be optimistic
+    // about the future and use long here, so that this code works if the server decides its clients are
+    // all on fiber and have congress-library sized bookmark collections.
+    // Record counts are long for the sake of simplicity.
+    public final long maxRequestBytes;
+    public final long maxPostRecords;
+    public final long maxPostBytes;
+    public final long maxTotalRecords;
+    public final long maxTotalBytes;
+
+    public InfoConfiguration() {
+        Logger.debug(LOG_TAG, "info/configuration is unavailable, using defaults");
+
+        maxRequestBytes = DEFAULT_MAX_REQUEST_BYTES;
+        maxPostRecords = DEFAULT_MAX_POST_RECORDS;
+        maxPostBytes = DEFAULT_MAX_POST_BYTES;
+        maxTotalRecords = DEFAULT_MAX_TOTAL_RECORDS;
+        maxTotalBytes = DEFAULT_MAX_TOTAL_BYTES;
+    }
+
+    public InfoConfiguration(final ExtendedJSONObject record) {
+        Logger.debug(LOG_TAG, "info/configuration is " + record.toJSONString());
+
+        maxRequestBytes = getValueFromRecord(record, MAX_REQUEST_BYTES, DEFAULT_MAX_REQUEST_BYTES);
+        maxPostRecords = getValueFromRecord(record, MAX_POST_RECORDS, DEFAULT_MAX_POST_RECORDS);
+        maxPostBytes = getValueFromRecord(record, MAX_POST_BYTES, DEFAULT_MAX_POST_BYTES);
+        maxTotalRecords = getValueFromRecord(record, MAX_TOTAL_RECORDS, DEFAULT_MAX_TOTAL_RECORDS);
+        maxTotalBytes = getValueFromRecord(record, MAX_TOTAL_BYTES, DEFAULT_MAX_TOTAL_BYTES);
+    }
+
+    private static Long getValueFromRecord(ExtendedJSONObject record, String key, long defaultValue) {
+        if (!record.containsKey(key)) {
+            return defaultValue;
+        }
+
+        try {
+            Long val = record.getLong(key);
+            if (val == null) {
+                return defaultValue;
+            }
+            return val;
+        } catch (NumberFormatException e) {
+            Log.w(LOG_TAG, "Could not parse key " + key + " from record: " + record, e);
+            return defaultValue;
+        }
+    }
+}
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/SyncConfiguration.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/SyncConfiguration.java
@@ -25,16 +25,18 @@ import android.content.SharedPreferences
 
 public class SyncConfiguration {
   private static final String LOG_TAG = "SyncConfiguration";
 
   // These must be set in GlobalSession's constructor.
   public URI             clusterURL;
   public KeyBundle       syncKeyBundle;
 
+  public InfoConfiguration infoConfiguration;
+
   public CollectionKeys  collectionKeys;
   public InfoCollections infoCollections;
   public MetaGlobal      metaGlobal;
   public String          syncID;
 
   protected final String username;
 
   /**
@@ -361,16 +363,20 @@ public class SyncConfiguration {
   protected String infoBaseURL() {
     return clusterURL + "/info/";
   }
 
   public String infoCollectionsURL() {
     return infoBaseURL() + "collections";
   }
 
+  public String infoConfigurationURL() {
+    return infoBaseURL() + "configuration";
+  }
+
   public String infoCollectionCountsURL() {
     return infoBaseURL() + "collection_counts";
   }
 
   public String metaURL() {
     return storageURL() + "/meta/global";
   }
 
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/delegates/JSONRecordFetchDelegate.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/delegates/JSONRecordFetchDelegate.java
@@ -4,15 +4,16 @@
 
 package org.mozilla.gecko.sync.delegates;
 
 import org.mozilla.gecko.sync.ExtendedJSONObject;
 import org.mozilla.gecko.sync.net.SyncStorageResponse;
 
 /**
  * A fairly generic delegate to handle fetches of single JSON object blobs, as
- * provided by <code>info/collections</code> and <code>info/collection_counts</code>.
+ * provided by <code>info/configuration</code>, <code>info/collections</code>
+ * and <code>info/collection_counts</code>.
  */
 public interface JSONRecordFetchDelegate {
   public void handleSuccess(ExtendedJSONObject body);
   public void handleFailure(SyncStorageResponse response);
   public void handleError(Exception e);
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/net/SyncResponse.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/net/SyncResponse.java
@@ -4,34 +4,42 @@
 
 package org.mozilla.gecko.sync.net;
 
 import org.mozilla.gecko.sync.Utils;
 
 import ch.boye.httpclientandroidlib.HttpResponse;
 
 public class SyncResponse extends MozResponse {
+  public static final String X_WEAVE_BACKOFF = "x-weave-backoff";
+  public static final String X_BACKOFF = "x-backoff";
+  public static final String X_LAST_MODIFIED = "x-last-modified";
+  public static final String X_WEAVE_TIMESTAMP = "x-weave-timestamp";
+  public static final String X_WEAVE_RECORDS = "x-weave-records";
+  public static final String X_WEAVE_QUOTA_REMAINING = "x-weave-quota-remaining";
+  public static final String X_WEAVE_ALERT = "x-weave-alert";
+
   public SyncResponse(HttpResponse res) {
     super(res);
   }
 
   /**
    * @return A number of seconds, or -1 if the 'X-Weave-Backoff' header was not
    *         present.
    */
   public int weaveBackoffInSeconds() throws NumberFormatException {
-    return this.getIntegerHeader("x-weave-backoff");
+    return this.getIntegerHeader(X_WEAVE_BACKOFF);
   }
 
   /**
    * @return A number of seconds, or -1 if the 'X-Backoff' header was not
    *         present.
    */
   public int xBackoffInSeconds() throws NumberFormatException {
-    return this.getIntegerHeader("x-backoff");
+    return this.getIntegerHeader(X_BACKOFF);
   }
 
   /**
    * Extract a number of seconds, or -1 if none of the specified headers were present.
    *
    * @param includeRetryAfter
    *          if <code>true</code>, the Retry-After header is excluded. This is
    *          useful for processing non-error responses where a Retry-After
@@ -75,41 +83,46 @@ public class SyncResponse extends MozRes
     long totalBackoff = totalBackoffInSeconds(true);
     if (totalBackoff < 0) {
       return -1;
     } else {
       return 1000 * totalBackoff;
     }
   }
 
+  public long normalizedWeaveTimestamp() {
+    return normalizedTimestampForHeader(X_WEAVE_TIMESTAMP);
+  }
+
   /**
-   * The timestamp returned from a Sync server is a decimal number of seconds,
+   * Timestamps returned from a Sync server are decimal numbers of seconds,
    * e.g., 1323393518.04.
    *
    * We want milliseconds since epoch.
    *
    * @return milliseconds since the epoch, as a long, or -1 if the header
    *         was missing or invalid.
    */
-  public long normalizedWeaveTimestamp() {
-    String h = "x-weave-timestamp";
-    if (!this.hasHeader(h)) {
+  public long normalizedTimestampForHeader(String header) {
+    if (!this.hasHeader(header)) {
       return -1;
     }
 
-    return Utils.decimalSecondsToMilliseconds(this.response.getFirstHeader(h).getValue());
+    return Utils.decimalSecondsToMilliseconds(
+            this.response.getFirstHeader(header).getValue()
+    );
   }
 
   public int weaveRecords() throws NumberFormatException {
-    return this.getIntegerHeader("x-weave-records");
+    return this.getIntegerHeader(X_WEAVE_RECORDS);
   }
 
   public int weaveQuotaRemaining() throws NumberFormatException {
-    return this.getIntegerHeader("x-weave-quota-remaining");
+    return this.getIntegerHeader(X_WEAVE_QUOTA_REMAINING);
   }
 
   public String weaveAlert() {
-    if (this.hasHeader("x-weave-alert")) {
-      return this.response.getFirstHeader("x-weave-alert").getValue();
+    if (this.hasHeader(X_WEAVE_ALERT)) {
+      return this.response.getFirstHeader(X_WEAVE_ALERT).getValue();
     }
     return null;
   }
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/net/SyncStorageRequest.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/net/SyncStorageRequest.java
@@ -115,17 +115,19 @@ public class SyncStorageRequest implemen
     }
 
     @Override
     public void handleHttpResponse(HttpResponse response) {
       Logger.debug(LOG_TAG, "SyncStorageResourceDelegate handling response: " + response.getStatusLine() + ".");
       SyncStorageRequestDelegate d = this.request.delegate;
       SyncStorageResponse res = new SyncStorageResponse(response);
       // It is the responsibility of the delegate handlers to completely consume the response.
-      if (res.wasSuccessful()) {
+      // In context of a Sync storage response, success is either a 200 OK or 202 Accepted.
+      // 202 is returned during uploads of data in a batching mode, indicating that more is expected.
+      if (res.getStatusCode() == 200 || res.getStatusCode() == 202) {
         d.handleRequestSuccess(res);
       } else {
         Logger.warn(LOG_TAG, "HTTP request failed.");
         try {
           Logger.warn(LOG_TAG, "HTTP response body: " + res.getErrorMessage());
         } catch (Exception e) {
           Logger.error(LOG_TAG, "Can't fetch HTTP response body.", e);
         }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/net/SyncStorageResponse.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/net/SyncStorageResponse.java
@@ -62,11 +62,24 @@ public class SyncStorageResponse extends
   public SyncStorageResponse(HttpResponse res) {
     super(res);
   }
 
   public String getErrorMessage() throws IllegalStateException, IOException {
     return SyncStorageResponse.getServerErrorMessage(this.body().trim());
   }
 
+  /**
+   * This header gives the last-modified time of the target resource as seen during processing of
+   * the request, and will be included in all success responses (200, 201, 204).
+   * When given in response to a write request, this will be equal to the server’s current time and
+   * to the new last-modified time of any BSOs created or changed by the request.
+   */
+  public String getLastModified() {
+    if (!response.containsHeader(X_LAST_MODIFIED)) {
+      return null;
+    }
+    return response.getFirstHeader(X_LAST_MODIFIED).getValue();
+  }
+
   // TODO: Content-Type and Content-Length validation.
 
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/ConstrainedServer11Repository.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/ConstrainedServer11Repository.java
@@ -2,31 +2,32 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.repositories;
 
 import java.net.URISyntaxException;
 
 import org.mozilla.gecko.sync.InfoCollections;
+import org.mozilla.gecko.sync.InfoConfiguration;
 import org.mozilla.gecko.sync.net.AuthHeaderProvider;
 
 /**
  * A kind of Server11Repository that supports explicit setting of limit and sort on operations.
  *
  * @author rnewman
  *
  */
 public class ConstrainedServer11Repository extends Server11Repository {
 
   private String sort = null;
   private long limit  = -1;
 
-  public ConstrainedServer11Repository(String collection, String storageURL, AuthHeaderProvider authHeaderProvider, InfoCollections infoCollections, long limit, String sort) throws URISyntaxException {
-    super(collection, storageURL, authHeaderProvider, infoCollections);
+  public ConstrainedServer11Repository(String collection, String storageURL, AuthHeaderProvider authHeaderProvider, InfoCollections infoCollections, InfoConfiguration infoConfiguration, long limit, String sort) throws URISyntaxException {
+    super(collection, storageURL, authHeaderProvider, infoCollections, infoConfiguration);
     this.limit = limit;
     this.sort  = sort;
   }
 
   @Override
   protected String getDefaultSort() {
     return sort;
   }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server11Repository.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server11Repository.java
@@ -4,57 +4,63 @@
 
 package org.mozilla.gecko.sync.repositories;
 
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 
 import org.mozilla.gecko.sync.InfoCollections;
+import org.mozilla.gecko.sync.InfoConfiguration;
 import org.mozilla.gecko.sync.Utils;
 import org.mozilla.gecko.sync.net.AuthHeaderProvider;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionCreationDelegate;
 
 import android.content.Context;
+import android.support.annotation.NonNull;
+import android.support.annotation.Nullable;
 
 /**
  * A Server11Repository implements fetching and storing against the Sync 1.1 API.
  * It doesn't do crypto: that's the job of the middleware.
  *
  * @author rnewman
  */
 public class Server11Repository extends Repository {
   protected String collection;
   protected URI collectionURI;
   protected final AuthHeaderProvider authHeaderProvider;
   protected final InfoCollections infoCollections;
 
+  private final InfoConfiguration infoConfiguration;
+
   /**
    * Construct a new repository that fetches and stores against the Sync 1.1. API.
    *
    * @param collection name.
    * @param storageURL full URL to storage endpoint.
    * @param authHeaderProvider to use in requests; may be null.
    * @param infoCollections instance; must not be null.
    * @throws URISyntaxException
    */
-  public Server11Repository(String collection, String storageURL, AuthHeaderProvider authHeaderProvider, InfoCollections infoCollections) throws URISyntaxException {
+  public Server11Repository(@NonNull String collection, @NonNull String storageURL, AuthHeaderProvider authHeaderProvider, @NonNull InfoCollections infoCollections, @NonNull InfoConfiguration infoConfiguration) throws URISyntaxException {
     if (collection == null) {
       throw new IllegalArgumentException("collection must not be null");
     }
     if (storageURL == null) {
       throw new IllegalArgumentException("storageURL must not be null");
     }
     if (infoCollections == null) {
       throw new IllegalArgumentException("infoCollections must not be null");
     }
     this.collection = collection;
     this.collectionURI = new URI(storageURL + (storageURL.endsWith("/") ? collection : "/" + collection));
     this.authHeaderProvider = authHeaderProvider;
     this.infoCollections = infoCollections;
+    this.infoConfiguration = infoConfiguration;
   }
 
   @Override
   public void createSession(RepositorySessionCreationDelegate delegate,
                             Context context) {
     delegate.onSessionCreated(new Server11RepositorySession(this));
   }
 
@@ -114,9 +120,18 @@ public class Server11Repository extends 
 
   public AuthHeaderProvider getAuthHeaderProvider() {
     return authHeaderProvider;
   }
 
   public boolean updateNeeded(long lastSyncTimestamp) {
     return infoCollections.updateNeeded(collection, lastSyncTimestamp);
   }
+
+  @Nullable
+  public Long getCollectionLastModified() {
+    return infoCollections.getTimestamp(collection);
+  }
+
+  public InfoConfiguration getInfoConfiguration() {
+    return infoConfiguration;
+  }
 }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server11RepositorySession.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/Server11RepositorySession.java
@@ -1,102 +1,40 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.repositories;
 
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.UnsupportedEncodingException;
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.Set;
-import java.util.concurrent.atomic.AtomicLong;
 
-import org.json.simple.JSONArray;
 import org.mozilla.gecko.background.common.log.Logger;
 import org.mozilla.gecko.sync.CryptoRecord;
 import org.mozilla.gecko.sync.DelayedWorkTracker;
-import org.mozilla.gecko.sync.ExtendedJSONObject;
 import org.mozilla.gecko.sync.HTTPFailureException;
-import org.mozilla.gecko.sync.Server11PreviousPostFailedException;
-import org.mozilla.gecko.sync.Server11RecordPostFailedException;
-import org.mozilla.gecko.sync.UnexpectedJSONException;
 import org.mozilla.gecko.sync.crypto.KeyBundle;
 import org.mozilla.gecko.sync.net.AuthHeaderProvider;
+import org.mozilla.gecko.sync.net.SyncResponse;
 import org.mozilla.gecko.sync.net.SyncStorageCollectionRequest;
-import org.mozilla.gecko.sync.net.SyncStorageRequest;
-import org.mozilla.gecko.sync.net.SyncStorageRequestDelegate;
 import org.mozilla.gecko.sync.net.SyncStorageResponse;
 import org.mozilla.gecko.sync.net.WBOCollectionRequestDelegate;
-import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionBeginDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionFetchRecordsDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionGuidsSinceDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionWipeDelegate;
 import org.mozilla.gecko.sync.repositories.domain.Record;
-
-import ch.boye.httpclientandroidlib.entity.ContentProducer;
-import ch.boye.httpclientandroidlib.entity.EntityTemplate;
+import org.mozilla.gecko.sync.repositories.uploaders.BatchingUploader;
 
 public class Server11RepositorySession extends RepositorySession {
-  private static byte[] recordsStart;
-  private static byte[] recordSeparator;
-  private static byte[] recordsEnd;
-
-  static {
-    try {
-      recordsStart    = "[\n".getBytes("UTF-8");
-      recordSeparator = ",\n".getBytes("UTF-8");
-      recordsEnd      = "\n]\n".getBytes("UTF-8");
-    } catch (UnsupportedEncodingException e) {
-      // These won't fail.
-    }
-  }
-
   public static final String LOG_TAG = "Server11Session";
 
-  private static final int UPLOAD_BYTE_THRESHOLD = 1024 * 1024;    // 1MB.
-  private static final int UPLOAD_ITEM_THRESHOLD = 50;
-  private static final int PER_RECORD_OVERHEAD   = 2;              // Comma, newline.
-  // {}, newlines, but we get to skip one record overhead.
-  private static final int PER_BATCH_OVERHEAD    = 5 - PER_RECORD_OVERHEAD;
-
-  /**
-   * Return the X-Weave-Timestamp header from <code>response</code>, or the
-   * current time if it is missing.
-   * <p>
-   * <b>Warning:</b> this can cause the timestamp of <code>response</code> to
-   * cross domains (from server clock to local clock), which could cause records
-   * to be skipped on account of clock drift. This should never happen, because
-   * <i>every</i> server response should have a well-formed X-Weave-Timestamp
-   * header.
-   *
-   * @param response
-   *          The <code>SyncStorageResponse</code> to interrogate.
-   * @return Normalized timestamp in milliseconds.
-   */
-  public static long getNormalizedTimestamp(SyncStorageResponse response) {
-    long normalizedTimestamp = -1;
-    try {
-      normalizedTimestamp = response.normalizedWeaveTimestamp();
-    } catch (NumberFormatException e) {
-      Logger.warn(LOG_TAG, "Malformed X-Weave-Timestamp header received.", e);
-    }
-    if (-1 == normalizedTimestamp) {
-      Logger.warn(LOG_TAG, "Computing stand-in timestamp from local clock. Clock drift could cause records to be skipped.");
-      normalizedTimestamp = System.currentTimeMillis();
-    }
-    return normalizedTimestamp;
-  }
-
   /**
    * Used to track outstanding requests, so that we can abort them as needed.
    */
   private final Set<SyncStorageCollectionRequest> pending = Collections.synchronizedSet(new HashSet<SyncStorageCollectionRequest>());
 
   @Override
   public void abort() {
     super.abort();
@@ -145,17 +83,18 @@ public class Server11RepositorySession e
       return null;
     }
 
     @Override
     public void handleRequestSuccess(SyncStorageResponse response) {
       Logger.debug(LOG_TAG, "Fetch done.");
       removeRequestFromPending();
 
-      final long normalizedTimestamp = getNormalizedTimestamp(response);
+      // This will change overall and will use X_LAST_MODIFIED in Bug 730142.
+      final long normalizedTimestamp = response.normalizedTimestampForHeader(SyncResponse.X_WEAVE_TIMESTAMP);
       Logger.debug(LOG_TAG, "Fetch completed. Timestamp is " + normalizedTimestamp);
 
       // When we're done processing other events, finish.
       workTracker.delayWorkItem(new Runnable() {
         @Override
         public void run() {
           Logger.debug(LOG_TAG, "Delayed onFetchCompleted running.");
           // TODO: verify number of returned records.
@@ -200,37 +139,36 @@ public class Server11RepositorySession e
 
     // TODO: this implies that we've screwed up our inheritance chain somehow.
     @Override
     public KeyBundle keyBundle() {
       return null;
     }
   }
 
-
   Server11Repository serverRepository;
-  AtomicLong uploadTimestamp = new AtomicLong(0);
-
-  private void bumpUploadTimestamp(long ts) {
-    while (true) {
-      long existing = uploadTimestamp.get();
-      if (existing > ts) {
-        return;
-      }
-      if (uploadTimestamp.compareAndSet(existing, ts)) {
-        return;
-      }
-    }
-  }
+  private BatchingUploader uploader;
 
   public Server11RepositorySession(Repository repository) {
     super(repository);
     serverRepository = (Server11Repository) repository;
   }
 
+  public Server11Repository getServerRepository() {
+    return serverRepository;
+  }
+
+  @Override
+  public void setStoreDelegate(RepositorySessionStoreDelegate delegate) {
+    this.delegate = delegate;
+
+    // Now that we have the delegate, we can initialize our uploader.
+    this.uploader = new BatchingUploader(this, storeWorkQueue, delegate);
+  }
+
   private String flattenIDs(String[] guids) {
     // Consider using Utils.toDelimitedString if and when the signature changes
     // to Collection<String> guids.
     if (guids.length == 0) {
       return "";
     }
     if (guids.length == 1) {
       return guids[0];
@@ -309,309 +247,39 @@ public class Server11RepositorySession e
   public void wipe(RepositorySessionWipeDelegate delegate) {
     if (!isActive()) {
       delegate.onWipeFailed(new InactiveSessionException(null));
       return;
     }
     // TODO: implement wipe.
   }
 
-  protected Object recordsBufferMonitor = new Object();
-
-  /**
-   * Data of outbound records.
-   * <p>
-   * We buffer the data (rather than the <code>Record</code>) so that we can
-   * flush the buffer based on outgoing transmission size.
-   * <p>
-   * Access should be synchronized on <code>recordsBufferMonitor</code>.
-   */
-  protected ArrayList<byte[]> recordsBuffer = new ArrayList<byte[]>();
-
-  /**
-   * GUIDs of outbound records.
-   * <p>
-   * Used to fail entire outgoing uploads.
-   * <p>
-   * Access should be synchronized on <code>recordsBufferMonitor</code>.
-   */
-  protected ArrayList<String> recordGuidsBuffer = new ArrayList<String>();
-  protected int byteCount = PER_BATCH_OVERHEAD;
-
   @Override
   public void store(Record record) throws NoStoreDelegateException {
     if (delegate == null) {
       throw new NoStoreDelegateException();
     }
-    this.enqueue(record);
-  }
-
-  /**
-   * Batch incoming records until some reasonable threshold (e.g., 50),
-   * some size limit is hit (probably way less than 3MB!), or storeDone
-   * is received.
-   * @param record
-   */
-  protected void enqueue(Record record) {
-    // JSONify and store the bytes, rather than the record.
-    byte[] json = record.toJSONBytes();
-    int delta   = json.length;
-    synchronized (recordsBufferMonitor) {
-      if ((delta + byteCount     > UPLOAD_BYTE_THRESHOLD) ||
-          (recordsBuffer.size() >= UPLOAD_ITEM_THRESHOLD)) {
 
-        // POST the existing contents, then enqueue.
-        flush();
-      }
-      recordsBuffer.add(json);
-      recordGuidsBuffer.add(record.guid);
-      byteCount += PER_RECORD_OVERHEAD + delta;
+    // If delegate was set, this shouldn't happen.
+    if (uploader == null) {
+      throw new IllegalStateException("Uploader haven't been initialized");
     }
-  }
 
-  // Asynchronously upload records.
-  // Must be locked!
-  protected void flush() {
-    if (recordsBuffer.size() > 0) {
-      final ArrayList<byte[]> outgoing = recordsBuffer;
-      final ArrayList<String> outgoingGuids = recordGuidsBuffer;
-      RepositorySessionStoreDelegate uploadDelegate = this.delegate;
-      storeWorkQueue.execute(new RecordUploadRunnable(uploadDelegate, outgoing, outgoingGuids, byteCount));
-
-      recordsBuffer = new ArrayList<byte[]>();
-      recordGuidsBuffer = new ArrayList<String>();
-      byteCount = PER_BATCH_OVERHEAD;
-    }
+    uploader.process(record);
   }
 
   @Override
   public void storeDone() {
     Logger.debug(LOG_TAG, "storeDone().");
-    synchronized (recordsBufferMonitor) {
-      flush();
-      // Do this in a Runnable so that the timestamp is grabbed after any upload.
-      final Runnable r = new Runnable() {
-        @Override
-        public void run() {
-          synchronized (recordsBufferMonitor) {
-            final long end = uploadTimestamp.get();
-            Logger.debug(LOG_TAG, "Calling storeDone with " + end);
-            storeDone(end);
-          }
-        }
-      };
-      storeWorkQueue.execute(r);
-    }
-  }
 
-  /**
-   * <code>true</code> if a record upload has failed this session.
-   * <p>
-   * This is only set in begin and possibly by <code>RecordUploadRunnable</code>.
-   * Since those are executed serially, we can use an unsynchronized
-   * volatile boolean here.
-   */
-  protected volatile boolean recordUploadFailed;
-
-  @Override
-  public void begin(RepositorySessionBeginDelegate delegate) throws InvalidSessionTransitionException {
-    recordUploadFailed = false;
-    super.begin(delegate);
-  }
-
-  /**
-   * Make an HTTP request, and convert HTTP request delegate callbacks into
-   * store callbacks within the context of this RepositorySession.
-   *
-   * @author rnewman
-   *
-   */
-  protected class RecordUploadRunnable implements Runnable, SyncStorageRequestDelegate {
-
-    public final String LOG_TAG = "RecordUploadRunnable";
-    private final ArrayList<byte[]> outgoing;
-    private ArrayList<String> outgoingGuids;
-    private final long byteCount;
-
-    public RecordUploadRunnable(RepositorySessionStoreDelegate storeDelegate,
-                                ArrayList<byte[]> outgoing,
-                                ArrayList<String> outgoingGuids,
-                                long byteCount) {
-      Logger.debug(LOG_TAG, "Preparing record upload for " +
-                  outgoing.size() + " records (" +
-                  byteCount + " bytes).");
-      this.outgoing = outgoing;
-      this.outgoingGuids = outgoingGuids;
-      this.byteCount = byteCount;
-    }
-
-    @Override
-    public AuthHeaderProvider getAuthHeaderProvider() {
-      return serverRepository.getAuthHeaderProvider();
-    }
-
-    @Override
-    public String ifUnmodifiedSince() {
-      return null;
+    // If delegate was set, this shouldn't happen.
+    if (uploader == null) {
+      throw new IllegalStateException("Uploader haven't been initialized");
     }
 
-    @Override
-    public void handleRequestSuccess(SyncStorageResponse response) {
-      Logger.trace(LOG_TAG, "POST of " + outgoing.size() + " records done.");
-
-      ExtendedJSONObject body;
-      try {
-        body = response.jsonObjectBody(); // jsonObjectBody() throws or returns non-null.
-      } catch (Exception e) {
-        Logger.error(LOG_TAG, "Got exception parsing POST success body.", e);
-        this.handleRequestError(e);
-        return;
-      }
-
-      // Be defensive when logging timestamp.
-      if (body.containsKey("modified")) {
-        Long modified = body.getTimestamp("modified");
-        if (modified != null) {
-          Logger.trace(LOG_TAG, "POST request success. Modified timestamp: " + modified);
-        } else {
-          Logger.warn(LOG_TAG, "POST success body contains malformed 'modified': " + body.toJSONString());
-        }
-      } else {
-        Logger.warn(LOG_TAG, "POST success body does not contain key 'modified': " + body.toJSONString());
-      }
-
-      try {
-        JSONArray          success = body.getArray("success");
-        if ((success != null) &&
-            (success.size() > 0)) {
-          Logger.trace(LOG_TAG, "Successful records: " + success.toString());
-          for (Object o : success) {
-            try {
-              delegate.onRecordStoreSucceeded((String) o);
-            } catch (ClassCastException e) {
-              Logger.error(LOG_TAG, "Got exception parsing POST success guid.", e);
-              // Not much to be done.
-            }
-          }
-
-          long normalizedTimestamp = getNormalizedTimestamp(response);
-          Logger.trace(LOG_TAG, "Passing back upload X-Weave-Timestamp: " + normalizedTimestamp);
-          bumpUploadTimestamp(normalizedTimestamp);
-        }
-        success = null; // Want to GC this ASAP.
-
-        ExtendedJSONObject failed  = body.getObject("failed");
-        if ((failed != null) &&
-            (failed.object.size() > 0)) {
-          Logger.debug(LOG_TAG, "Failed records: " + failed.object.toString());
-          Exception ex = new Server11RecordPostFailedException();
-          for (String guid : failed.keySet()) {
-            delegate.onRecordStoreFailed(ex, guid);
-          }
-        }
-        failed = null; // Want to GC this ASAP.
-      } catch (UnexpectedJSONException e) {
-        Logger.error(LOG_TAG, "Got exception processing success/failed in POST success body.", e);
-        // TODO
-        return;
-      }
-      Logger.debug(LOG_TAG, "POST of " + outgoing.size() + " records handled.");
-    }
-
-    @Override
-    public void handleRequestFailure(SyncStorageResponse response) {
-      // TODO: call session.interpretHTTPFailure.
-      this.handleRequestError(new HTTPFailureException(response));
-    }
-
-    @Override
-    public void handleRequestError(final Exception ex) {
-      Logger.warn(LOG_TAG, "Got request error.", ex);
-
-      recordUploadFailed = true;
-      ArrayList<String> failedOutgoingGuids = outgoingGuids;
-      outgoingGuids = null; // Want to GC this ASAP.
-      for (String guid : failedOutgoingGuids) {
-        delegate.onRecordStoreFailed(ex, guid);
-      }
-      return;
-    }
-
-    public class ByteArraysContentProducer implements ContentProducer {
-
-      ArrayList<byte[]> outgoing;
-      public ByteArraysContentProducer(ArrayList<byte[]> arrays) {
-        outgoing = arrays;
-      }
-
-      @Override
-      public void writeTo(OutputStream outstream) throws IOException {
-        int count = outgoing.size();
-        outstream.write(recordsStart);
-        outstream.write(outgoing.get(0));
-        for (int i = 1; i < count; ++i) {
-          outstream.write(recordSeparator);
-          outstream.write(outgoing.get(i));
-        }
-        outstream.write(recordsEnd);
-      }
-    }
-
-    public class ByteArraysEntity extends EntityTemplate {
-      private final long count;
-      public ByteArraysEntity(ArrayList<byte[]> arrays, long totalBytes) {
-        super(new ByteArraysContentProducer(arrays));
-        this.count = totalBytes;
-        this.setContentType("application/json");
-        // charset is set in BaseResource.
-      }
-
-      @Override
-      public long getContentLength() {
-        return count;
-      }
-
-      @Override
-      public boolean isRepeatable() {
-        return true;
-      }
-    }
-
-    public ByteArraysEntity getBodyEntity() {
-      ByteArraysEntity body = new ByteArraysEntity(outgoing, byteCount);
-      return body;
-    }
-
-    @Override
-    public void run() {
-      if (recordUploadFailed) {
-        Logger.info(LOG_TAG, "Previous record upload failed.  Failing all records and not retrying.");
-        Exception ex = new Server11PreviousPostFailedException();
-        for (String guid : outgoingGuids) {
-          delegate.onRecordStoreFailed(ex, guid);
-        }
-        return;
-      }
-
-      if (outgoing == null ||
-          outgoing.size() == 0) {
-        Logger.debug(LOG_TAG, "No items: RecordUploadRunnable returning immediately.");
-        return;
-      }
-
-      URI u = serverRepository.collectionURI();
-      SyncStorageRequest request = new SyncStorageRequest(u);
-
-      request.delegate = this;
-
-      // We don't want the task queue to proceed until this request completes.
-      // Fortunately, BaseResource is currently synchronous.
-      // If that ever changes, you'll need to block here.
-      ByteArraysEntity body = getBodyEntity();
-      request.post(body);
-    }
+    uploader.noMoreRecordsToUpload();
   }
 
   @Override
   public boolean dataAvailable() {
     return serverRepository.updateNeeded(getLastSyncTimestamp());
   }
 }
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/BatchMeta.java
@@ -0,0 +1,165 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.repositories.uploaders;
+
+import android.support.annotation.CheckResult;
+import android.support.annotation.NonNull;
+import android.support.annotation.Nullable;
+
+import org.mozilla.gecko.background.common.log.Logger;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.mozilla.gecko.sync.repositories.uploaders.BatchingUploader.TokenModifiedException;
+import org.mozilla.gecko.sync.repositories.uploaders.BatchingUploader.LastModifiedChangedUnexpectedly;
+import org.mozilla.gecko.sync.repositories.uploaders.BatchingUploader.LastModifiedDidNotChange;
+
+/**
+ * Keeps track of token, Last-Modified value and GUIDs of succeeded records.
+ */
+/* @ThreadSafe */
+public class BatchMeta extends BufferSizeTracker {
+    private static final String LOG_TAG = "BatchMeta";
+
+    // Will be set once first payload upload succeeds. We don't expect this to change until we
+    // commit the batch, and which point it must change.
+    /* @GuardedBy("this") */ private Long lastModified;
+
+    // Will be set once first payload upload succeeds. We don't expect this to ever change until
+    // a commit succeeds, at which point this gets set to null.
+    /* @GuardedBy("this") */ private String token;
+
+    /* @GuardedBy("accessLock") */ private boolean isUnlimited = false;
+
+    // Accessed by synchronously running threads.
+    /* @GuardedBy("accessLock") */ private final List<String> successRecordGuids = new ArrayList<>();
+
+    /* @GuardedBy("accessLock") */ private boolean needsCommit = false;
+
+    protected final Long collectionLastModified;
+
+    public BatchMeta(@NonNull Object payloadLock, long maxBytes, long maxRecords, @Nullable Long collectionLastModified) {
+        super(payloadLock, maxBytes, maxRecords);
+        this.collectionLastModified = collectionLastModified;
+    }
+
+    protected void setIsUnlimited(boolean isUnlimited) {
+        synchronized (accessLock) {
+            this.isUnlimited = isUnlimited;
+        }
+    }
+
+    @Override
+    protected boolean canFit(long recordDeltaByteCount) {
+        synchronized (accessLock) {
+            return isUnlimited || super.canFit(recordDeltaByteCount);
+        }
+    }
+
+    @Override
+    @CheckResult
+    protected boolean addAndEstimateIfFull(long recordDeltaByteCount) {
+        synchronized (accessLock) {
+            needsCommit = true;
+            boolean isFull = super.addAndEstimateIfFull(recordDeltaByteCount);
+            return !isUnlimited && isFull;
+        }
+    }
+
+    protected boolean needToCommit() {
+        synchronized (accessLock) {
+            return needsCommit;
+        }
+    }
+
+    protected synchronized String getToken() {
+        return token;
+    }
+
+    protected synchronized void setToken(final String newToken, boolean isCommit) throws TokenModifiedException {
+        // Set token once in a batching mode.
+        // In a non-batching mode, this.token and newToken will be null, and this is a no-op.
+        if (token == null) {
+            token = newToken;
+            return;
+        }
+
+        // Sanity checks.
+        if (isCommit) {
+            // We expect token to be null when commit payload succeeds.
+            if (newToken != null) {
+                throw new TokenModifiedException();
+            } else {
+                token = null;
+            }
+            return;
+        }
+
+        // We expect new token to always equal current token for non-commit payloads.
+        if (!token.equals(newToken)) {
+            throw new TokenModifiedException();
+        }
+    }
+
+    protected synchronized Long getLastModified() {
+        if (lastModified == null) {
+            return collectionLastModified;
+        }
+        return lastModified;
+    }
+
+    protected synchronized void setLastModified(final Long newLastModified, final boolean expectedToChange) throws LastModifiedChangedUnexpectedly, LastModifiedDidNotChange {
+        if (lastModified == null) {
+            lastModified = newLastModified;
+            return;
+        }
+
+        if (!expectedToChange && !lastModified.equals(newLastModified)) {
+            Logger.debug(LOG_TAG, "Last-Modified timestamp changed when we didn't expect it");
+            throw new LastModifiedChangedUnexpectedly();
+
+        } else if (expectedToChange && lastModified.equals(newLastModified)) {
+            Logger.debug(LOG_TAG, "Last-Modified timestamp did not change when we expected it to");
+            throw new LastModifiedDidNotChange();
+
+        } else {
+            lastModified = newLastModified;
+        }
+    }
+
+    protected ArrayList<String> getSuccessRecordGuids() {
+        synchronized (accessLock) {
+            return new ArrayList<>(this.successRecordGuids);
+        }
+    }
+
+    protected void recordSucceeded(final String recordGuid) {
+        // Sanity check.
+        if (recordGuid == null) {
+            throw new IllegalStateException();
+        }
+
+        synchronized (accessLock) {
+            successRecordGuids.add(recordGuid);
+        }
+    }
+
+    @Override
+    protected boolean canFitRecordByteDelta(long byteDelta, long recordCount, long byteCount) {
+        return isUnlimited || super.canFitRecordByteDelta(byteDelta, recordCount, byteCount);
+    }
+
+    @Override
+    protected void reset() {
+        synchronized (accessLock) {
+            super.reset();
+            token = null;
+            lastModified = null;
+            successRecordGuids.clear();
+            needsCommit = false;
+        }
+    }
+}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/BatchingUploader.java
@@ -0,0 +1,344 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.repositories.uploaders;
+
+import android.net.Uri;
+import android.support.annotation.VisibleForTesting;
+
+import org.mozilla.gecko.background.common.log.Logger;
+import org.mozilla.gecko.sync.InfoConfiguration;
+import org.mozilla.gecko.sync.Server11RecordPostFailedException;
+import org.mozilla.gecko.sync.net.SyncResponse;
+import org.mozilla.gecko.sync.net.SyncStorageResponse;
+import org.mozilla.gecko.sync.repositories.Server11RepositorySession;
+import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
+import org.mozilla.gecko.sync.repositories.domain.Record;
+
+import java.util.ArrayList;
+import java.util.concurrent.Executor;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Uploader which implements batching introduced in Sync 1.5.
+ *
+ * Batch vs payload terminology:
+ * - batch is comprised of a series of payloads, which are all committed at the same time.
+ * -- identified via a "batch token", which is returned after first payload for the batch has been uploaded.
+ * - payload is a collection of records which are uploaded together. Associated with a batch.
+ * -- last payload, identified via commit=true, commits the batch.
+ *
+ * Limits for how many records can fit into a payload and into a batch are defined in the passed-in
+ * InfoConfiguration object.
+ *
+ * If we can't fit everything we'd like to upload into one batch (according to max-total-* limits),
+ * then we commit that batch, and start a new one. There are no explicit limits on total number of
+ * batches we might use, although at some point we'll start to run into storage limit errors from the API.
+ *
+ * Once we go past using one batch this uploader is no longer "atomic". Partial state is exposed
+ * to other clients after our first batch is committed and before our last batch is committed.
+ * However, our per-batch limits are high, X-I-U-S mechanics help protect downloading clients
+ * (as long as they implement X-I-U-S) with 412 error codes in case of interleaving upload and download,
+ * and most mobile clients will not be uploading large-enough amounts of data (especially structured
+ * data, such as bookmarks).
+ *
+ * Last-Modified header returned with the first batch payload POST success is maintained for a batch,
+ * to guard against concurrent-modification errors (different uploader commits before we're done).
+ *
+ * Non-batching mode notes:
+ * We also support Sync servers which don't enable batching for uploads. In this case, we respect
+ * payload limits for individual uploads, and every upload is considered a commit. Batching limits
+ * do not apply, and batch token is irrelevant.
+ * We do keep track of Last-Modified and send along X-I-U-S with our uploads, to protect against
+ * concurrent modifications by other clients.
+ */
+public class BatchingUploader {
+    private static final String LOG_TAG = "BatchingUploader";
+
+    private final Uri collectionUri;
+
+    private volatile boolean recordUploadFailed = false;
+
+    private final BatchMeta batchMeta;
+    private final Payload payload;
+
+    // Accessed by synchronously running threads, OK to not synchronize and just make it volatile.
+    private volatile Boolean inBatchingMode;
+
+    // Used to ensure we have thread-safe access to the following:
+    // - byte and record counts in both Payload and BatchMeta objects
+    // - buffers in the Payload object
+    private final Object payloadLock = new Object();
+
+    protected Executor workQueue;
+    protected final RepositorySessionStoreDelegate sessionStoreDelegate;
+    protected final Server11RepositorySession repositorySession;
+
+    protected AtomicLong uploadTimestamp = new AtomicLong(0);
+
+    protected static final int PER_RECORD_OVERHEAD_BYTE_COUNT = RecordUploadRunnable.RECORD_SEPARATOR.length;
+    protected static final int PER_PAYLOAD_OVERHEAD_BYTE_COUNT = RecordUploadRunnable.RECORDS_END.length;
+
+    // Sanity check. RECORD_SEPARATOR and RECORD_START are assumed to be of the same length.
+    static {
+        if (RecordUploadRunnable.RECORD_SEPARATOR.length != RecordUploadRunnable.RECORDS_START.length) {
+            throw new IllegalStateException("Separator and start tokens must be of the same length");
+        }
+    }
+
+    public BatchingUploader(final Server11RepositorySession repositorySession, final Executor workQueue, final RepositorySessionStoreDelegate sessionStoreDelegate) {
+        this.repositorySession = repositorySession;
+        this.workQueue = workQueue;
+        this.sessionStoreDelegate = sessionStoreDelegate;
+        this.collectionUri = Uri.parse(repositorySession.getServerRepository().collectionURI().toString());
+
+        InfoConfiguration config = repositorySession.getServerRepository().getInfoConfiguration();
+        this.batchMeta = new BatchMeta(
+                payloadLock, config.maxTotalBytes, config.maxTotalRecords,
+                repositorySession.getServerRepository().getCollectionLastModified()
+        );
+        this.payload = new Payload(payloadLock, config.maxPostBytes, config.maxPostRecords);
+    }
+
+    public void process(final Record record) {
+        final String guid = record.guid;
+        final byte[] recordBytes = record.toJSONBytes();
+        final long recordDeltaByteCount = recordBytes.length + PER_RECORD_OVERHEAD_BYTE_COUNT;
+
+        Logger.debug(LOG_TAG, "Processing a record with guid: " + guid);
+
+        // We can't upload individual records which exceed our payload byte limit.
+        if ((recordDeltaByteCount + PER_PAYLOAD_OVERHEAD_BYTE_COUNT) > payload.maxBytes) {
+            sessionStoreDelegate.onRecordStoreFailed(new RecordTooLargeToUpload(), guid);
+            return;
+        }
+
+        synchronized (payloadLock) {
+            final boolean canFitRecordIntoBatch = batchMeta.canFit(recordDeltaByteCount);
+            final boolean canFitRecordIntoPayload = payload.canFit(recordDeltaByteCount);
+
+            // Record fits!
+            if (canFitRecordIntoBatch && canFitRecordIntoPayload) {
+                Logger.debug(LOG_TAG, "Record fits into the current batch and payload");
+                addAndFlushIfNecessary(recordDeltaByteCount, recordBytes, guid);
+
+            // Payload won't fit the record.
+            } else if (canFitRecordIntoBatch) {
+                Logger.debug(LOG_TAG, "Current payload won't fit incoming record, uploading payload.");
+                flush(false, false);
+
+                Logger.debug(LOG_TAG, "Recording the incoming record into a new payload");
+
+                // Keep track of the overflow record.
+                addAndFlushIfNecessary(recordDeltaByteCount, recordBytes, guid);
+
+            // Batch won't fit the record.
+            } else {
+                Logger.debug(LOG_TAG, "Current batch won't fit incoming record, committing batch.");
+                flush(true, false);
+
+                Logger.debug(LOG_TAG, "Recording the incoming record into a new batch");
+                batchMeta.reset();
+
+                // Keep track of the overflow record.
+                addAndFlushIfNecessary(recordDeltaByteCount, recordBytes, guid);
+            }
+        }
+    }
+
+    // Convenience function used from the process method; caller must hold a payloadLock.
+    private void addAndFlushIfNecessary(long byteCount, byte[] recordBytes, String guid) {
+        boolean isPayloadFull = payload.addAndEstimateIfFull(byteCount, recordBytes, guid);
+        boolean isBatchFull = batchMeta.addAndEstimateIfFull(byteCount);
+
+        // Preemptive commit batch or upload a payload if they're estimated to be full.
+        if (isBatchFull) {
+            flush(true, false);
+            batchMeta.reset();
+        } else if (isPayloadFull) {
+            flush(false, false);
+        }
+    }
+
+    public void noMoreRecordsToUpload() {
+        Logger.debug(LOG_TAG, "Received 'no more records to upload' signal.");
+
+        // Run this after the last payload succeeds, so that we know for sure if we're in a batching
+        // mode and need to commit with a potentially empty payload.
+        workQueue.execute(new Runnable() {
+            @Override
+            public void run() {
+                commitIfNecessaryAfterLastPayload();
+            }
+        });
+    }
+
+    @VisibleForTesting
+    protected void commitIfNecessaryAfterLastPayload() {
+        // Must be called after last payload upload finishes.
+        synchronized (payload) {
+            // If we have any pending records in the Payload, flush them!
+            if (!payload.isEmpty()) {
+                flush(true, true);
+
+            // If we have an empty payload but need to commit the batch in the batching mode, flush!
+            } else if (batchMeta.needToCommit() && Boolean.TRUE.equals(inBatchingMode)) {
+                flush(true, true);
+
+            // Otherwise, we're done.
+            } else {
+                finished(uploadTimestamp);
+            }
+        }
+    }
+
+    /**
+     * We've been told by our upload delegate that a payload succeeded.
+     * Depending on the type of payload and batch mode status, inform our delegate of progress.
+     *
+     * @param response success response to our commit post
+     * @param isCommit was this a commit upload?
+     * @param isLastPayload was this a very last payload we'll upload?
+     */
+    public void payloadSucceeded(final SyncStorageResponse response, final boolean isCommit, final boolean isLastPayload) {
+        // Sanity check.
+        if (inBatchingMode == null) {
+            throw new IllegalStateException("Can't process payload success until we know if we're in a batching mode");
+        }
+
+        // We consider records to have been committed if we're not in a batching mode or this was a commit.
+        // If records have been committed, notify our store delegate.
+        if (!inBatchingMode || isCommit) {
+            for (String guid : batchMeta.getSuccessRecordGuids()) {
+                sessionStoreDelegate.onRecordStoreSucceeded(guid);
+            }
+        }
+
+        // If this was our very last commit, we're done storing records.
+        // Get Last-Modified timestamp from the response, and pass it upstream.
+        if (isLastPayload) {
+            finished(response.normalizedTimestampForHeader(SyncResponse.X_LAST_MODIFIED));
+        }
+    }
+
+    public void lastPayloadFailed() {
+        finished(uploadTimestamp);
+    }
+
+    private void finished(long lastModifiedTimestamp) {
+        bumpTimestampTo(uploadTimestamp, lastModifiedTimestamp);
+        finished(uploadTimestamp);
+    }
+
+    private void finished(AtomicLong lastModifiedTimestamp) {
+        repositorySession.storeDone(lastModifiedTimestamp.get());
+    }
+
+    public BatchMeta getCurrentBatch() {
+        return batchMeta;
+    }
+
+    public void setInBatchingMode(boolean inBatchingMode) {
+        this.inBatchingMode = inBatchingMode;
+
+        // If we know for sure that we're not in a batching mode,
+        // consider our batch to be of unlimited size.
+        this.batchMeta.setIsUnlimited(!inBatchingMode);
+    }
+
+    public Boolean getInBatchingMode() {
+        return inBatchingMode;
+    }
+
+    public void setLastModified(final Long lastModified, final boolean isCommit) throws BatchingUploaderException {
+        // Sanity check.
+        if (inBatchingMode == null) {
+            throw new IllegalStateException("Can't process Last-Modified before we know we're in a batching mode.");
+        }
+
+        // In non-batching mode, every time we receive a Last-Modified timestamp, we expect it to change
+        // since records are "committed" (become visible to other clients) on every payload.
+        // In batching mode, we only expect Last-Modified to change when we commit a batch.
+        batchMeta.setLastModified(lastModified, isCommit || !inBatchingMode);
+    }
+
+    public void recordSucceeded(final String recordGuid) {
+        Logger.debug(LOG_TAG, "Record store succeeded: " + recordGuid);
+        batchMeta.recordSucceeded(recordGuid);
+    }
+
+    public void recordFailed(final String recordGuid) {
+        recordFailed(new Server11RecordPostFailedException(), recordGuid);
+    }
+
+    public void recordFailed(final Exception e, final String recordGuid) {
+        Logger.debug(LOG_TAG, "Record store failed for guid " + recordGuid + " with exception: " + e.toString());
+        recordUploadFailed = true;
+        sessionStoreDelegate.onRecordStoreFailed(e, recordGuid);
+    }
+
+    public Server11RepositorySession getRepositorySession() {
+        return repositorySession;
+    }
+
+    private static void bumpTimestampTo(final AtomicLong current, long newValue) {
+        while (true) {
+            long existing = current.get();
+            if (existing > newValue) {
+                return;
+            }
+            if (current.compareAndSet(existing, newValue)) {
+                return;
+            }
+        }
+    }
+
+    private void flush(final boolean isCommit, final boolean isLastPayload) {
+        final ArrayList<byte[]> outgoing;
+        final ArrayList<String> outgoingGuids;
+        final long byteCount;
+
+        // Even though payload object itself is thread-safe, we want to ensure we get these altogether
+        // as a "unit". Another approach would be to create a wrapper object for these values, but this works.
+        synchronized (payloadLock) {
+            outgoing = payload.getRecordsBuffer();
+            outgoingGuids = payload.getRecordGuidsBuffer();
+            byteCount = payload.getByteCount();
+        }
+
+        workQueue.execute(new RecordUploadRunnable(
+                new BatchingAtomicUploaderMayUploadProvider(),
+                collectionUri,
+                batchMeta,
+                new PayloadUploadDelegate(this, outgoingGuids, isCommit, isLastPayload),
+                outgoing,
+                byteCount,
+                isCommit
+        ));
+
+        payload.reset();
+    }
+
+    private class BatchingAtomicUploaderMayUploadProvider implements MayUploadProvider {
+        public boolean mayUpload() {
+            return !recordUploadFailed;
+        }
+    }
+
+    public static class BatchingUploaderException extends Exception {
+        private static final long serialVersionUID = 1L;
+    }
+    public static class RecordTooLargeToUpload extends BatchingUploaderException {
+        private static final long serialVersionUID = 1L;
+    }
+    public static class LastModifiedDidNotChange extends BatchingUploaderException {
+        private static final long serialVersionUID = 1L;
+    }
+    public static class LastModifiedChangedUnexpectedly extends BatchingUploaderException {
+        private static final long serialVersionUID = 1L;
+    }
+    public static class TokenModifiedException extends BatchingUploaderException {
+        private static final long serialVersionUID = 1L;
+    };
+}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/BufferSizeTracker.java
@@ -0,0 +1,103 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.repositories.uploaders;
+
+import android.support.annotation.CallSuper;
+import android.support.annotation.CheckResult;
+
+/**
+ * Implements functionality shared by BatchMeta and Payload objects, namely:
+ * - keeping track of byte and record counts
+ * - incrementing those counts when records are added
+ * - checking if a record can fit
+ */
+/* @ThreadSafe */
+public abstract class BufferSizeTracker {
+    protected final Object accessLock;
+
+    /* @GuardedBy("accessLock") */ private long byteCount = BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT;
+    /* @GuardedBy("accessLock") */ private long recordCount = 0;
+    /* @GuardedBy("accessLock") */ protected Long smallestRecordByteCount;
+
+    protected final long maxBytes;
+    protected final long maxRecords;
+
+    public BufferSizeTracker(Object accessLock, long maxBytes, long maxRecords) {
+        this.accessLock = accessLock;
+        this.maxBytes = maxBytes;
+        this.maxRecords = maxRecords;
+    }
+
+    @CallSuper
+    protected boolean canFit(long recordDeltaByteCount) {
+        synchronized (accessLock) {
+            return canFitRecordByteDelta(recordDeltaByteCount, recordCount, byteCount);
+        }
+    }
+
+    protected boolean isEmpty() {
+        synchronized (accessLock) {
+            return recordCount == 0;
+        }
+    }
+
+    /**
+     * Adds a record and returns a boolean indicating whether batch is estimated to be full afterwards.
+     */
+    @CheckResult
+    protected boolean addAndEstimateIfFull(long recordDeltaByteCount) {
+        synchronized (accessLock) {
+            // Sanity check. Calling this method when buffer won't fit the record is an error.
+            if (!canFitRecordByteDelta(recordDeltaByteCount, recordCount, byteCount)) {
+                throw new IllegalStateException("Buffer size exceeded");
+            }
+
+            byteCount += recordDeltaByteCount;
+            recordCount += 1;
+
+            if (smallestRecordByteCount == null || smallestRecordByteCount > recordDeltaByteCount) {
+                smallestRecordByteCount = recordDeltaByteCount;
+            }
+
+            // See if we're full or nearly full after adding a record.
+            // We're halving smallestRecordByteCount because we're erring
+            // on the side of "can hopefully fit". We're trying to upload as soon as we know we
+            // should, but we also need to be mindful of minimizing total number of uploads we make.
+            return !canFitRecordByteDelta(smallestRecordByteCount / 2, recordCount, byteCount);
+        }
+    }
+
+    protected long getByteCount() {
+        synchronized (accessLock) {
+            // Ensure we account for payload overhead twice when the batch is empty.
+            // Payload overhead is either RECORDS_START ("[") or RECORDS_END ("]"),
+            // and for an empty payload we need account for both ("[]").
+            if (recordCount == 0) {
+                return byteCount + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT;
+            }
+            return byteCount;
+        }
+    }
+
+    protected long getRecordCount() {
+        synchronized (accessLock) {
+            return recordCount;
+        }
+    }
+
+    @CallSuper
+    protected void reset() {
+        synchronized (accessLock) {
+            byteCount = BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT;
+            recordCount = 0;
+        }
+    }
+
+    @CallSuper
+    protected boolean canFitRecordByteDelta(long byteDelta, long recordCount, long byteCount) {
+        return recordCount < maxRecords
+                && (byteCount + byteDelta) <= maxBytes;
+    }
+}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/MayUploadProvider.java
@@ -0,0 +1,9 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.repositories.uploaders;
+
+public interface MayUploadProvider {
+    boolean mayUpload();
+}
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/Payload.java
@@ -0,0 +1,66 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.repositories.uploaders;
+
+import android.support.annotation.CheckResult;
+
+import java.util.ArrayList;
+
+/**
+ * Owns per-payload record byte and recordGuid buffers.
+ */
+/* @ThreadSafe */
+public class Payload extends BufferSizeTracker {
+    // Data of outbound records.
+    /* @GuardedBy("accessLock") */ private final ArrayList<byte[]> recordsBuffer = new ArrayList<>();
+
+    // GUIDs of outbound records. Used to fail entire payloads.
+    /* @GuardedBy("accessLock") */ private final ArrayList<String> recordGuidsBuffer = new ArrayList<>();
+
+    public Payload(Object payloadLock, long maxBytes, long maxRecords) {
+        super(payloadLock, maxBytes, maxRecords);
+    }
+
+    @Override
+    protected boolean addAndEstimateIfFull(long recordDelta) {
+        throw new UnsupportedOperationException();
+    }
+
+    @CheckResult
+    protected boolean addAndEstimateIfFull(long recordDelta, byte[] recordBytes, String guid) {
+        synchronized (accessLock) {
+            recordsBuffer.add(recordBytes);
+            recordGuidsBuffer.add(guid);
+            return super.addAndEstimateIfFull(recordDelta);
+        }
+    }
+
+    @Override
+    protected void reset() {
+        synchronized (accessLock) {
+            super.reset();
+            recordsBuffer.clear();
+            recordGuidsBuffer.clear();
+        }
+    }
+
+    protected ArrayList<byte[]> getRecordsBuffer() {
+        synchronized (accessLock) {
+            return new ArrayList<>(recordsBuffer);
+        }
+    }
+
+    protected ArrayList<String> getRecordGuidsBuffer() {
+        synchronized (accessLock) {
+            return new ArrayList<>(recordGuidsBuffer);
+        }
+    }
+
+    protected boolean isEmpty() {
+        synchronized (accessLock) {
+            return recordsBuffer.isEmpty();
+        }
+    }
+}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/PayloadUploadDelegate.java
@@ -0,0 +1,185 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.repositories.uploaders;
+
+import org.json.simple.JSONArray;
+import org.mozilla.gecko.background.common.log.Logger;
+import org.mozilla.gecko.sync.ExtendedJSONObject;
+import org.mozilla.gecko.sync.HTTPFailureException;
+import org.mozilla.gecko.sync.NonArrayJSONException;
+import org.mozilla.gecko.sync.NonObjectJSONException;
+import org.mozilla.gecko.sync.Utils;
+import org.mozilla.gecko.sync.net.AuthHeaderProvider;
+import org.mozilla.gecko.sync.net.SyncResponse;
+import org.mozilla.gecko.sync.net.SyncStorageRequestDelegate;
+import org.mozilla.gecko.sync.net.SyncStorageResponse;
+
+import java.util.ArrayList;
+
+public class PayloadUploadDelegate implements SyncStorageRequestDelegate {
+    private static final String LOG_TAG = "PayloadUploadDelegate";
+
+    private static final String KEY_BATCH = "batch";
+
+    private final BatchingUploader uploader;
+    private ArrayList<String> postedRecordGuids;
+    private final boolean isCommit;
+    private final boolean isLastPayload;
+
+    public PayloadUploadDelegate(BatchingUploader uploader, ArrayList<String> postedRecordGuids, boolean isCommit, boolean isLastPayload) {
+        this.uploader = uploader;
+        this.postedRecordGuids = postedRecordGuids;
+        this.isCommit = isCommit;
+        this.isLastPayload = isLastPayload;
+    }
+
+    @Override
+    public AuthHeaderProvider getAuthHeaderProvider() {
+        return uploader.getRepositorySession().getServerRepository().getAuthHeaderProvider();
+    }
+
+    @Override
+    public String ifUnmodifiedSince() {
+        final Long lastModified = uploader.getCurrentBatch().getLastModified();
+        if (lastModified == null) {
+            return null;
+        }
+        return Utils.millisecondsToDecimalSecondsString(lastModified);
+    }
+
+    @Override
+    public void handleRequestSuccess(final SyncStorageResponse response) {
+        // First, do some sanity checking.
+        if (response.getStatusCode() != 200 && response.getStatusCode() != 202) {
+            handleRequestError(
+                new IllegalStateException("handleRequestSuccess received a non-200/202 response: " + response.getStatusCode())
+            );
+            return;
+        }
+
+        // We always expect to see a Last-Modified header. It's returned with every success response.
+        if (!response.httpResponse().containsHeader(SyncResponse.X_LAST_MODIFIED)) {
+            handleRequestError(
+                    new IllegalStateException("Response did not have a Last-Modified header")
+            );
+            return;
+        }
+
+        // We expect to be able to parse the response as a JSON object.
+        final ExtendedJSONObject body;
+        try {
+            body = response.jsonObjectBody(); // jsonObjectBody() throws or returns non-null.
+        } catch (Exception e) {
+            Logger.error(LOG_TAG, "Got exception parsing POST success body.", e);
+            this.handleRequestError(e);
+            return;
+        }
+
+        // If we got a 200, it could be either a non-batching result, or a batch commit.
+        // - if we're in a batching mode, we expect this to be a commit.
+        // If we got a 202, we expect there to be a token present in the response
+        if (response.getStatusCode() == 200 && uploader.getCurrentBatch().getToken() != null) {
+            if (uploader.getInBatchingMode() && !isCommit) {
+                handleRequestError(
+                        new IllegalStateException("Got 200 OK in batching mode, but this was not a commit payload")
+                );
+                return;
+            }
+        } else if (response.getStatusCode() == 202) {
+            if (!body.containsKey(KEY_BATCH)) {
+                handleRequestError(
+                        new IllegalStateException("Batch response did not have a batch ID")
+                );
+                return;
+            }
+        }
+
+        // With sanity checks out of the way, can now safely say if we're in a batching mode or not.
+        // We only do this once per session.
+        if (uploader.getInBatchingMode() == null) {
+            uploader.setInBatchingMode(body.containsKey(KEY_BATCH));
+        }
+
+        // Tell current batch about the token we've received.
+        // Throws if token changed after being set once, or if we got a non-null token after a commit.
+        try {
+            uploader.getCurrentBatch().setToken(body.getString(KEY_BATCH), isCommit);
+        } catch (BatchingUploader.BatchingUploaderException e) {
+            handleRequestError(e);
+            return;
+        }
+
+        // Will throw if Last-Modified changed when it shouldn't have.
+        try {
+            uploader.setLastModified(
+                    response.normalizedTimestampForHeader(SyncResponse.X_LAST_MODIFIED),
+                    isCommit);
+        } catch (BatchingUploader.BatchingUploaderException e) {
+            handleRequestError(e);
+            return;
+        }
+
+        // All looks good up to this point, let's process success and failed arrays.
+        JSONArray success;
+        try {
+            success = body.getArray("success");
+        } catch (NonArrayJSONException e) {
+            handleRequestError(e);
+            return;
+        }
+
+        if (success != null && !success.isEmpty()) {
+            Logger.trace(LOG_TAG, "Successful records: " + success.toString());
+            for (Object o : success) {
+                try {
+                    uploader.recordSucceeded((String) o);
+                } catch (ClassCastException e) {
+                    Logger.error(LOG_TAG, "Got exception parsing POST success guid.", e);
+                    // Not much to be done.
+                }
+            }
+        }
+        // GC
+        success = null;
+
+        ExtendedJSONObject failed;
+        try {
+            failed = body.getObject("failed");
+        } catch (NonObjectJSONException e) {
+            handleRequestError(e);
+            return;
+        }
+
+        if (failed != null && !failed.object.isEmpty()) {
+            Logger.debug(LOG_TAG, "Failed records: " + failed.object.toString());
+            for (String guid : failed.keySet()) {
+                uploader.recordFailed(guid);
+            }
+        }
+        // GC
+        failed = null;
+
+        // And we're done! Let uploader finish up.
+        uploader.payloadSucceeded(response, isCommit, isLastPayload);
+    }
+
+    @Override
+    public void handleRequestFailure(final SyncStorageResponse response) {
+        this.handleRequestError(new HTTPFailureException(response));
+    }
+
+    @Override
+    public void handleRequestError(Exception e) {
+        for (String guid : postedRecordGuids) {
+            uploader.recordFailed(e, guid);
+        }
+        // GC
+        postedRecordGuids = null;
+
+        if (isLastPayload) {
+            uploader.lastPayloadFailed();
+        }
+    }
+}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/RecordUploadRunnable.java
@@ -0,0 +1,177 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.repositories.uploaders;
+
+import android.net.Uri;
+import android.support.annotation.VisibleForTesting;
+
+import org.mozilla.gecko.background.common.log.Logger;
+import org.mozilla.gecko.sync.Server11PreviousPostFailedException;
+import org.mozilla.gecko.sync.net.SyncStorageRequest;
+import org.mozilla.gecko.sync.net.SyncStorageRequestDelegate;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+
+import ch.boye.httpclientandroidlib.entity.ContentProducer;
+import ch.boye.httpclientandroidlib.entity.EntityTemplate;
+
+/**
+ * Responsible for creating and posting a <code>SyncStorageRequest</code> request object.
+ */
+public class RecordUploadRunnable implements Runnable {
+    public final String LOG_TAG = "RecordUploadRunnable";
+
+    public final static byte[] RECORDS_START = "[".getBytes(StandardCharsets.UTF_8);
+    public final static byte[] RECORD_SEPARATOR = ",".getBytes(StandardCharsets.UTF_8);
+    public final static byte[] RECORDS_END = "]".getBytes(StandardCharsets.UTF_8);
+
+    private static final String QUERY_PARAM_BATCH = "batch";
+    private static final String QUERY_PARAM_TRUE = "true";
+    private static final String QUERY_PARAM_BATCH_COMMIT = "commit";
+
+    private final MayUploadProvider mayUploadProvider;
+    private final SyncStorageRequestDelegate uploadDelegate;
+
+    private final ArrayList<byte[]> outgoing;
+    private final long byteCount;
+
+    // Used to construct POST URI during run().
+    @VisibleForTesting
+    public final boolean isCommit;
+    private final Uri collectionUri;
+    private final BatchMeta batchMeta;
+
+    public RecordUploadRunnable(MayUploadProvider mayUploadProvider,
+                                Uri collectionUri,
+                                BatchMeta batchMeta,
+                                SyncStorageRequestDelegate uploadDelegate,
+                                ArrayList<byte[]> outgoing,
+                                long byteCount,
+                                boolean isCommit) {
+        this.mayUploadProvider = mayUploadProvider;
+        this.uploadDelegate = uploadDelegate;
+        this.outgoing = outgoing;
+        this.byteCount = byteCount;
+        this.batchMeta = batchMeta;
+        this.collectionUri = collectionUri;
+        this.isCommit = isCommit;
+    }
+
+    public static class ByteArraysContentProducer implements ContentProducer {
+        ArrayList<byte[]> outgoing;
+        public ByteArraysContentProducer(ArrayList<byte[]> arrays) {
+            outgoing = arrays;
+        }
+
+        @Override
+        public void writeTo(OutputStream outstream) throws IOException {
+            int count = outgoing.size();
+            outstream.write(RECORDS_START);
+            if (count > 0) {
+                outstream.write(outgoing.get(0));
+                for (int i = 1; i < count; ++i) {
+                    outstream.write(RECORD_SEPARATOR);
+                    outstream.write(outgoing.get(i));
+                }
+            }
+            outstream.write(RECORDS_END);
+        }
+
+        public static long outgoingBytesCount(ArrayList<byte[]> outgoing) {
+            final long numberOfRecords = outgoing.size();
+
+            // Account for start and end tokens.
+            long count = RECORDS_START.length + RECORDS_END.length;
+
+            // Account for all the records.
+            for (int i = 0; i < numberOfRecords; i++) {
+                count += outgoing.get(i).length;
+            }
+
+            // Account for a separator between the records.
+            // There's one less separator than there are records.
+            if (numberOfRecords > 1) {
+                count += RECORD_SEPARATOR.length * (numberOfRecords - 1);
+            }
+
+            return count;
+        }
+    }
+
+    public static class ByteArraysEntity extends EntityTemplate {
+        private final long count;
+        public ByteArraysEntity(ArrayList<byte[]> arrays, long totalBytes) {
+            super(new ByteArraysContentProducer(arrays));
+            this.count = totalBytes;
+            this.setContentType("application/json");
+            // charset is set in BaseResource.
+
+            // Sanity check our byte counts.
+            long realByteCount = ByteArraysContentProducer.outgoingBytesCount(arrays);
+            if (realByteCount != totalBytes) {
+                throw new IllegalStateException("Mismatched byte counts. Received " + totalBytes + " while real byte count is " + realByteCount);
+            }
+        }
+
+        @Override
+        public long getContentLength() {
+            return count;
+        }
+
+        @Override
+        public boolean isRepeatable() {
+            return true;
+        }
+    }
+
+    @Override
+    public void run() {
+        if (!mayUploadProvider.mayUpload()) {
+            Logger.info(LOG_TAG, "Told not to proceed by the uploader. Cancelling upload, failing records.");
+            uploadDelegate.handleRequestError(new Server11PreviousPostFailedException());
+            return;
+        }
+
+        Logger.trace(LOG_TAG, "Running upload task. Outgoing records: " + outgoing.size());
+
+        // We don't want the task queue to proceed until this request completes.
+        // Fortunately, BaseResource is currently synchronous.
+        // If that ever changes, you'll need to block here.
+
+        final URI postURI = buildPostURI(isCommit, batchMeta, collectionUri);
+        final SyncStorageRequest request = new SyncStorageRequest(postURI);
+        request.delegate = uploadDelegate;
+
+        ByteArraysEntity body = new ByteArraysEntity(outgoing, byteCount);
+        request.post(body);
+    }
+
+    @VisibleForTesting
+    public static URI buildPostURI(boolean isCommit, BatchMeta batchMeta, Uri collectionUri) {
+        final Uri.Builder uriBuilder = collectionUri.buildUpon();
+        final String batchToken = batchMeta.getToken();
+
+        if (batchToken != null) {
+            uriBuilder.appendQueryParameter(QUERY_PARAM_BATCH, batchToken);
+        } else {
+            uriBuilder.appendQueryParameter(QUERY_PARAM_BATCH, QUERY_PARAM_TRUE);
+        }
+
+        if (isCommit) {
+            uriBuilder.appendQueryParameter(QUERY_PARAM_BATCH_COMMIT, QUERY_PARAM_TRUE);
+        }
+
+        try {
+            return new URI(uriBuilder.build().toString());
+        } catch (URISyntaxException e) {
+            throw new IllegalStateException("Failed to construct a collection URI", e);
+        }
+    }
+}
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/AndroidBrowserBookmarksServerSyncStage.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/AndroidBrowserBookmarksServerSyncStage.java
@@ -45,16 +45,17 @@ public class AndroidBrowserBookmarksServ
     AuthHeaderProvider authHeaderProvider = session.getAuthHeaderProvider();
     final JSONRecordFetcher countsFetcher = new JSONRecordFetcher(session.config.infoCollectionCountsURL(), authHeaderProvider);
     String collection = getCollection();
     return new SafeConstrainedServer11Repository(
         collection,
         session.config.storageURL(),
         session.getAuthHeaderProvider(),
         session.config.infoCollections,
+        session.config.infoConfiguration,
         BOOKMARKS_REQUEST_LIMIT,
         BOOKMARKS_SORT,
         countsFetcher);
   }
 
   @Override
   protected Repository getLocalRepository() {
     return new AndroidBrowserBookmarksRepository();
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/AndroidBrowserHistoryServerSyncStage.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/AndroidBrowserHistoryServerSyncStage.java
@@ -45,16 +45,17 @@ public class AndroidBrowserHistoryServer
   @Override
   protected Repository getRemoteRepository() throws URISyntaxException {
     String collection = getCollection();
     return new ConstrainedServer11Repository(
                                              collection,
                                              session.config.storageURL(),
                                              session.getAuthHeaderProvider(),
                                              session.config.infoCollections,
+                                             session.config.infoConfiguration,
                                              HISTORY_REQUEST_LIMIT,
                                              HISTORY_SORT);
   }
 
   @Override
   protected RecordFactory getRecordFactory() {
     return new HistoryRecordFactory();
   }
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/FetchInfoConfigurationStage.java
@@ -0,0 +1,59 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+package org.mozilla.gecko.sync.stage;
+
+import org.mozilla.gecko.sync.ExtendedJSONObject;
+import org.mozilla.gecko.sync.InfoConfiguration;
+import org.mozilla.gecko.sync.JSONRecordFetcher;
+import org.mozilla.gecko.sync.delegates.JSONRecordFetchDelegate;
+import org.mozilla.gecko.sync.net.AuthHeaderProvider;
+import org.mozilla.gecko.sync.net.SyncStorageResponse;
+
+/**
+ * Fetches configuration data from info/configurations endpoint.
+ */
+public class FetchInfoConfigurationStage extends AbstractNonRepositorySyncStage {
+    private final String configurationURL;
+    private final AuthHeaderProvider authHeaderProvider;
+
+    public FetchInfoConfigurationStage(final String configurationURL, final AuthHeaderProvider authHeaderProvider) {
+        super();
+        this.configurationURL = configurationURL;
+        this.authHeaderProvider = authHeaderProvider;
+    }
+
+    public class StageInfoConfigurationDelegate implements JSONRecordFetchDelegate {
+        @Override
+        public void handleSuccess(final ExtendedJSONObject result) {
+            session.config.infoConfiguration = new InfoConfiguration(result);
+            session.advance();
+        }
+
+        @Override
+        public void handleFailure(final SyncStorageResponse response) {
+            // Handle all non-404 failures upstream.
+            if (response.getStatusCode() != 404) {
+                session.handleHTTPError(response, "Failure fetching info/configuration");
+                return;
+            }
+
+            // End-point might not be available (404) if server is running an older version.
+            // We will use default config values in this case.
+            session.config.infoConfiguration = new InfoConfiguration();
+            session.advance();
+        }
+
+        @Override
+        public void handleError(final Exception e) {
+            session.abort(e, "Failure fetching info/configuration");
+        }
+    }
+    @Override
+    public void execute() {
+        final StageInfoConfigurationDelegate delegate = new StageInfoConfigurationDelegate();
+        final JSONRecordFetcher fetcher = new JSONRecordFetcher(configurationURL, authHeaderProvider);
+        fetcher.fetch(delegate);
+    }
+}
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/FormHistoryServerSyncStage.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/FormHistoryServerSyncStage.java
@@ -40,16 +40,17 @@ public class FormHistoryServerSyncStage 
   @Override
   protected Repository getRemoteRepository() throws URISyntaxException {
     String collection = getCollection();
     return new ConstrainedServer11Repository(
         collection,
         session.config.storageURL(),
         session.getAuthHeaderProvider(),
         session.config.infoCollections,
+        session.config.infoConfiguration,
         FORM_HISTORY_REQUEST_LIMIT,
         FORM_HISTORY_SORT);
   }
 
   @Override
   protected Repository getLocalRepository() {
     return new FormHistoryRepositorySession.FormHistoryRepository();
   }
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/GlobalSyncStage.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/GlobalSyncStage.java
@@ -13,16 +13,17 @@ import java.util.Map;
 import org.mozilla.gecko.sync.GlobalSession;
 
 
 public interface GlobalSyncStage {
   public static enum Stage {
     idle,                       // Start state.
     checkPreconditions,         // Preparation of the basics. TODO: clear status
     fetchInfoCollections,       // Take a look at timestamps.
+    fetchInfoConfiguration,     // Fetch server upload limits
     fetchMetaGlobal,
     ensureKeysStage,
     /*
     ensureSpecialRecords,
     updateEngineTimestamps,
     */
     syncClientsEngine(SyncClientsEngineStage.STAGE_NAME),
     /*
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/SafeConstrainedServer11Repository.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/SafeConstrainedServer11Repository.java
@@ -3,16 +3,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 package org.mozilla.gecko.sync.stage;
 
 import java.net.URISyntaxException;
 
 import org.mozilla.gecko.background.common.log.Logger;
 import org.mozilla.gecko.sync.InfoCollections;
+import org.mozilla.gecko.sync.InfoConfiguration;
 import org.mozilla.gecko.sync.InfoCounts;
 import org.mozilla.gecko.sync.JSONRecordFetcher;
 import org.mozilla.gecko.sync.net.AuthHeaderProvider;
 import org.mozilla.gecko.sync.repositories.ConstrainedServer11Repository;
 import org.mozilla.gecko.sync.repositories.Repository;
 import org.mozilla.gecko.sync.repositories.Server11RepositorySession;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionCreationDelegate;
 
@@ -32,21 +33,22 @@ public class SafeConstrainedServer11Repo
 
   // This can be lazily evaluated if we need it.
   private final JSONRecordFetcher countFetcher;
 
   public SafeConstrainedServer11Repository(String collection,
                                            String storageURL,
                                            AuthHeaderProvider authHeaderProvider,
                                            InfoCollections infoCollections,
+                                           InfoConfiguration infoConfiguration,
                                            long limit,
                                            String sort,
                                            JSONRecordFetcher countFetcher)
     throws URISyntaxException {
-    super(collection, storageURL, authHeaderProvider, infoCollections, limit, sort);
+    super(collection, storageURL, authHeaderProvider, infoCollections, infoConfiguration, limit, sort);
     if (countFetcher == null) {
       throw new IllegalArgumentException("countFetcher must not be null");
     }
     this.countFetcher = countFetcher;
   }
 
   @Override
   public void createSession(RepositorySessionCreationDelegate delegate,
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/ServerSyncStage.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/stage/ServerSyncStage.java
@@ -140,17 +140,18 @@ public abstract class ServerSyncStage ex
   protected abstract RecordFactory getRecordFactory();
 
   // Override this in subclasses.
   protected Repository getRemoteRepository() throws URISyntaxException {
     String collection = getCollection();
     return new Server11Repository(collection,
                                   session.config.storageURL(),
                                   session.getAuthHeaderProvider(),
-                                  session.config.infoCollections);
+                                  session.config.infoCollections,
+                                  session.config.infoConfiguration);
   }
 
   /**
    * Return a Crypto5Middleware-wrapped Server11Repository.
    *
    * @throws NoCollectionKeysSetException
    * @throws URISyntaxException
    */
--- a/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/net/test/TestServer11Repository.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/net/test/TestServer11Repository.java
@@ -3,44 +3,46 @@
 
 package org.mozilla.android.sync.net.test;
 
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mozilla.gecko.background.testhelpers.TestRunner;
 import org.mozilla.gecko.sync.InfoCollections;
+import org.mozilla.gecko.sync.InfoConfiguration;
 import org.mozilla.gecko.sync.repositories.Server11Repository;
 
 import java.net.URI;
 import java.net.URISyntaxException;
 
 @RunWith(TestRunner.class)
 public class TestServer11Repository {
 
   private static final String COLLECTION = "bookmarks";
   private static final String COLLECTION_URL = "http://foo.com/1.1/n6ec3u5bee3tixzp2asys7bs6fve4jfw/storage";
 
   protected final InfoCollections infoCollections = new InfoCollections();
+  protected final InfoConfiguration infoConfiguration = new InfoConfiguration();
 
   public static void assertQueryEquals(String expected, URI u) {
     Assert.assertEquals(expected, u.getRawQuery());
   }
 
   @SuppressWarnings("static-method")
   @Test
   public void testCollectionURIFull() throws URISyntaxException {
-    Server11Repository r = new Server11Repository(COLLECTION, COLLECTION_URL, null, infoCollections);
+    Server11Repository r = new Server11Repository(COLLECTION, COLLECTION_URL, null, infoCollections, infoConfiguration);
     assertQueryEquals("full=1&newer=5000.000",              r.collectionURI(true,  5000000L, -1,    null, null));
     assertQueryEquals("newer=1230.000",                     r.collectionURI(false, 1230000L, -1,    null, null));
     assertQueryEquals("newer=5000.000&limit=10",            r.collectionURI(false, 5000000L, 10,    null, null));
     assertQueryEquals("full=1&newer=5000.000&sort=index",   r.collectionURI(true,  5000000L,  0, "index", null));
     assertQueryEquals("full=1&ids=123,abc",                 r.collectionURI(true,       -1L, -1,    null, "123,abc"));
   }
 
   @Test
   public void testCollectionURI() throws URISyntaxException {
-    Server11Repository noTrailingSlash = new Server11Repository(COLLECTION, COLLECTION_URL, null, infoCollections);
-    Server11Repository trailingSlash = new Server11Repository(COLLECTION, COLLECTION_URL + "/", null, infoCollections);
+    Server11Repository noTrailingSlash = new Server11Repository(COLLECTION, COLLECTION_URL, null, infoCollections, infoConfiguration);
+    Server11Repository trailingSlash = new Server11Repository(COLLECTION, COLLECTION_URL + "/", null, infoCollections, infoConfiguration);
     Assert.assertEquals("http://foo.com/1.1/n6ec3u5bee3tixzp2asys7bs6fve4jfw/storage/bookmarks", noTrailingSlash.collectionURI().toASCIIString());
     Assert.assertEquals("http://foo.com/1.1/n6ec3u5bee3tixzp2asys7bs6fve4jfw/storage/bookmarks", trailingSlash.collectionURI().toASCIIString());
   }
 }
--- a/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestServer11RepositorySession.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/android/sync/test/TestServer11RepositorySession.java
@@ -1,53 +1,46 @@
 /* Any copyright is dedicated to the Public Domain.
    http://creativecommons.org/publicdomain/zero/1.0/ */
 
 package org.mozilla.android.sync.test;
 
-import ch.boye.httpclientandroidlib.HttpEntity;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mozilla.android.sync.test.SynchronizerHelpers.TrackingWBORepository;
 import org.mozilla.android.sync.test.helpers.BaseTestStorageRequestDelegate;
 import org.mozilla.android.sync.test.helpers.HTTPServerTestHelper;
 import org.mozilla.android.sync.test.helpers.MockServer;
-import org.mozilla.gecko.background.testhelpers.MockRecord;
 import org.mozilla.gecko.background.testhelpers.TestRunner;
 import org.mozilla.gecko.background.testhelpers.WaitHelper;
 import org.mozilla.gecko.sync.InfoCollections;
+import org.mozilla.gecko.sync.InfoConfiguration;
 import org.mozilla.gecko.sync.JSONRecordFetcher;
 import org.mozilla.gecko.sync.Utils;
 import org.mozilla.gecko.sync.crypto.KeyBundle;
 import org.mozilla.gecko.sync.middleware.Crypto5MiddlewareRepository;
 import org.mozilla.gecko.sync.net.AuthHeaderProvider;
 import org.mozilla.gecko.sync.net.BaseResource;
 import org.mozilla.gecko.sync.net.BasicAuthHeaderProvider;
-import org.mozilla.gecko.sync.net.SyncStorageRecordRequest;
 import org.mozilla.gecko.sync.net.SyncStorageResponse;
 import org.mozilla.gecko.sync.repositories.FetchFailedException;
-import org.mozilla.gecko.sync.repositories.Repository;
 import org.mozilla.gecko.sync.repositories.RepositorySession;
 import org.mozilla.gecko.sync.repositories.Server11Repository;
-import org.mozilla.gecko.sync.repositories.Server11RepositorySession;
 import org.mozilla.gecko.sync.repositories.StoreFailedException;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionCreationDelegate;
 import org.mozilla.gecko.sync.repositories.domain.BookmarkRecord;
 import org.mozilla.gecko.sync.repositories.domain.BookmarkRecordFactory;
-import org.mozilla.gecko.sync.repositories.domain.Record;
 import org.mozilla.gecko.sync.stage.SafeConstrainedServer11Repository;
 import org.mozilla.gecko.sync.synchronizer.ServerLocalSynchronizer;
 import org.mozilla.gecko.sync.synchronizer.Synchronizer;
 import org.simpleframework.http.ContentType;
 import org.simpleframework.http.Request;
 import org.simpleframework.http.Response;
 
 import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 @RunWith(TestRunner.class)
 public class TestServer11RepositorySession {
@@ -65,104 +58,67 @@ public class TestServer11RepositorySessi
       System.out.println("Content-Type:" + contentType);
       super.handle(request, response, 200, "{success:[]}");
     }
   }
 
   private static final int    TEST_PORT   = HTTPServerTestHelper.getTestPort();
   private static final String TEST_SERVER = "http://localhost:" + TEST_PORT + "/";
   static final String LOCAL_BASE_URL      = TEST_SERVER + "1.1/n6ec3u5bee3tixzp2asys7bs6fve4jfw/";
-  static final String LOCAL_REQUEST_URL   = LOCAL_BASE_URL + "storage/bookmarks";
   static final String LOCAL_INFO_BASE_URL = LOCAL_BASE_URL + "info/";
   static final String LOCAL_COUNTS_URL    = LOCAL_INFO_BASE_URL + "collection_counts";
 
   // Corresponds to rnewman+atest1@mozilla.com, local.
   static final String TEST_USERNAME          = "n6ec3u5bee3tixzp2asys7bs6fve4jfw";
   static final String TEST_PASSWORD          = "passowrd";
   static final String SYNC_KEY          = "eh7ppnb82iwr5kt3z3uyi5vr44";
 
   public final AuthHeaderProvider authHeaderProvider = new BasicAuthHeaderProvider(TEST_USERNAME, TEST_PASSWORD);
   protected final InfoCollections infoCollections = new InfoCollections();
+  protected final InfoConfiguration infoConfiguration = new InfoConfiguration();
 
   // Few-second timeout so that our longer operations don't time out and cause spurious error-handling results.
   private static final int SHORT_TIMEOUT = 10000;
 
   public AuthHeaderProvider getAuthHeaderProvider() {
     return new BasicAuthHeaderProvider(TEST_USERNAME, TEST_PASSWORD);
   }
 
   private HTTPServerTestHelper data     = new HTTPServerTestHelper();
 
-  public class MockServer11RepositorySession extends Server11RepositorySession {
-    public MockServer11RepositorySession(Repository repository) {
-      super(repository);
-    }
-
-    public RecordUploadRunnable getRecordUploadRunnable() {
-      // TODO: implement upload delegate in the class, too!
-      return new RecordUploadRunnable(null, recordsBuffer, recordGuidsBuffer, byteCount);
-    }
-
-    public void enqueueRecord(Record r) {
-      super.enqueue(r);
-    }
-
-    public HttpEntity getEntity() {
-      return this.getRecordUploadRunnable().getBodyEntity();
-    }
-  }
-
   public class TestSyncStorageRequestDelegate extends
   BaseTestStorageRequestDelegate {
     public TestSyncStorageRequestDelegate(String username, String password) {
       super(username, password);
     }
 
     @Override
     public void handleRequestSuccess(SyncStorageResponse res) {
       assertTrue(res.wasSuccessful());
       assertTrue(res.httpResponse().containsHeader("X-Weave-Timestamp"));
       BaseResource.consumeEntity(res);
       data.stopHTTPServer();
     }
   }
 
-  @Test
-  public void test() throws URISyntaxException {
-
-    BaseResource.rewriteLocalhost = false;
-    data.startHTTPServer(new POSTMockServer());
-
-    MockServer11RepositorySession session = new MockServer11RepositorySession(
-        null);
-    session.enqueueRecord(new MockRecord(Utils.generateGuid(), null, 0, false));
-    session.enqueueRecord(new MockRecord(Utils.generateGuid(), null, 0, false));
-
-    URI uri = new URI(LOCAL_REQUEST_URL);
-    SyncStorageRecordRequest r = new SyncStorageRecordRequest(uri);
-    TestSyncStorageRequestDelegate delegate = new TestSyncStorageRequestDelegate(TEST_USERNAME, TEST_PASSWORD);
-    r.delegate = delegate;
-    r.post(session.getEntity());
-  }
-
   @SuppressWarnings("static-method")
   protected TrackingWBORepository getLocal(int numRecords) {
     final TrackingWBORepository local = new TrackingWBORepository();
     for (int i = 0; i < numRecords; i++) {
       BookmarkRecord outbound = new BookmarkRecord("outboundFail" + i, "bookmarks", 1, false);
       local.wbos.put(outbound.guid, outbound);
     }
     return local;
   }
 
   protected Exception doSynchronize(MockServer server) throws Exception {
     final String COLLECTION = "test";
 
     final TrackingWBORepository local = getLocal(100);
-    final Server11Repository remote = new Server11Repository(COLLECTION, getCollectionURL(COLLECTION), authHeaderProvider, infoCollections);
+    final Server11Repository remote = new Server11Repository(COLLECTION, getCollectionURL(COLLECTION), authHeaderProvider, infoCollections, infoConfiguration);
     KeyBundle collectionKey = new KeyBundle(TEST_USERNAME, SYNC_KEY);
     Crypto5MiddlewareRepository cryptoRepo = new Crypto5MiddlewareRepository(remote, collectionKey);
     cryptoRepo.recordFactory = new BookmarkRecordFactory();
 
     final Synchronizer synchronizer = new ServerLocalSynchronizer();
     synchronizer.repositoryA = cryptoRepo;
     synchronizer.repositoryB = local;
 
@@ -229,16 +185,17 @@ public class TestServer11RepositorySessi
       }
     };
     final JSONRecordFetcher countsFetcher = new JSONRecordFetcher(LOCAL_COUNTS_URL, getAuthHeaderProvider());
     String collection = "bookmarks";
     final SafeConstrainedServer11Repository remote = new SafeConstrainedServer11Repository(collection,
         getCollectionURL(collection),
         getAuthHeaderProvider(),
         infoCollections,
+        infoConfiguration,
         5000, "sortindex", countsFetcher);
 
     data.startHTTPServer(server);
     final AtomicBoolean out = new AtomicBoolean(false);
 
     // Verify that shouldSkip returns true due to a fetch of too large counts,
     // rather than due to a timeout failure waiting to fetch counts.
     try {
--- a/mobile/android/tests/background/junit4/src/org/mozilla/gecko/background/testhelpers/MockRecord.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/background/testhelpers/MockRecord.java
@@ -1,20 +1,30 @@
 /* Any copyright is dedicated to the Public Domain.
    http://creativecommons.org/publicdomain/zero/1.0/ */
 
 package org.mozilla.gecko.background.testhelpers;
 
 import org.mozilla.gecko.sync.ExtendedJSONObject;
 import org.mozilla.gecko.sync.repositories.domain.Record;
 
+import java.util.Random;
+
 public class MockRecord extends Record {
-
+  private final int payloadByteCount;
   public MockRecord(String guid, String collection, long lastModified, boolean deleted) {
     super(guid, collection, lastModified, deleted);
+    // Payload used to be "foo", so let's not stray too far.
+    // Perhaps some tests "depend" on that payload size.
+    payloadByteCount = 3;
+  }
+
+  public MockRecord(String guid, String collection, long lastModified, boolean deleted, int payloadByteCount) {
+    super(guid, collection, lastModified, deleted);
+    this.payloadByteCount = payloadByteCount;
   }
 
   @Override
   protected void populatePayload(ExtendedJSONObject payload) {
   }
 
   @Override
   protected void initFromPayload(ExtendedJSONObject payload) {
@@ -24,11 +34,18 @@ public class MockRecord extends Record {
   public Record copyWithIDs(String guid, long androidID) {
     MockRecord r = new MockRecord(guid, this.collection, this.lastModified, this.deleted);
     r.androidID = androidID;
     return r;
   }
 
   @Override
   public String toJSONString() {
-    return "{\"id\":\"" + guid + "\", \"payload\": \"foo\"}";
+    // Build up a randomish payload string based on the length we were asked for.
+    final Random random = new Random();
+    final char[] payloadChars = new char[payloadByteCount];
+    for (int i = 0; i < payloadByteCount; i++) {
+      payloadChars[i] = (char) (random.nextInt(26) + 'a');
+    }
+    final String payloadString = new String(payloadChars);
+    return "{\"id\":\"" + guid + "\", \"payload\": \"" + payloadString+ "\"}";
   }
 }
\ No newline at end of file
--- a/mobile/android/tests/background/junit4/src/org/mozilla/gecko/sync/repositories/test/TestSafeConstrainedServer11Repository.java
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/sync/repositories/test/TestSafeConstrainedServer11Repository.java
@@ -6,16 +6,17 @@ package org.mozilla.gecko.sync.repositor
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mozilla.android.sync.test.helpers.HTTPServerTestHelper;
 import org.mozilla.android.sync.test.helpers.MockServer;
 import org.mozilla.gecko.background.testhelpers.TestRunner;
 import org.mozilla.gecko.background.testhelpers.WaitHelper;
 import org.mozilla.gecko.sync.InfoCollections;
+import org.mozilla.gecko.sync.InfoConfiguration;
 import org.mozilla.gecko.sync.JSONRecordFetcher;
 import org.mozilla.gecko.sync.net.AuthHeaderProvider;
 import org.mozilla.gecko.sync.repositories.RepositorySession;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionCreationDelegate;
 import org.mozilla.gecko.sync.stage.SafeConstrainedServer11Repository;
 import org.simpleframework.http.Request;
 import org.simpleframework.http.Response;
 
@@ -30,16 +31,17 @@ public class TestSafeConstrainedServer11
   private static final String  TEST_USERNAME  = "c6o7dvmr2c4ud2fyv6woz2u4zi22bcyd";
   private static final String  TEST_BASE_PATH = "/1.1/" + TEST_USERNAME + "/";
 
   public AuthHeaderProvider getAuthHeaderProvider() {
     return null;
   }
 
   protected final InfoCollections infoCollections = new InfoCollections();
+  protected final InfoConfiguration infoConfiguration = new InfoConfiguration();
 
   private class CountsMockServer extends MockServer {
     public final AtomicInteger count = new AtomicInteger(0);
     public final AtomicBoolean error = new AtomicBoolean(false);
 
     @Override
     public void handle(Request request, Response response) {
       final String path = request.getPath().getPath();
@@ -80,17 +82,17 @@ public class TestSafeConstrainedServer11
     try {
       String countsURL = TEST_SERVER + TEST_BASE_PATH + "info/collection_counts";
       JSONRecordFetcher countFetcher = new JSONRecordFetcher(countsURL, getAuthHeaderProvider());
       String sort = "sortindex";
       String collection = "rotary";
 
       final int TEST_LIMIT = 1000;
       final SafeConstrainedServer11Repository repo = new SafeConstrainedServer11Repository(
-          collection, getCollectionURL(collection), null, infoCollections,
+          collection, getCollectionURL(collection), null, infoCollections, infoConfiguration,
           TEST_LIMIT, sort, countFetcher);
 
       final AtomicBoolean shouldSkipLots = new AtomicBoolean(false);
       final AtomicBoolean shouldSkipFew = new AtomicBoolean(true);
       final AtomicBoolean shouldSkip503 = new AtomicBoolean (false);
 
       WaitHelper.getTestWaiter().performWait(2000, new Runnable() {
         @Override
new file mode 100644
--- /dev/null
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/sync/repositories/uploaders/BatchMetaTest.java
@@ -0,0 +1,282 @@
+/* Any copyright is dedicated to the Public Domain.
+   http://creativecommons.org/publicdomain/zero/1.0/ */
+
+package org.mozilla.gecko.sync.repositories.uploaders;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mozilla.gecko.background.testhelpers.TestRunner;
+
+import static org.junit.Assert.*;
+
+@RunWith(TestRunner.class)
+public class BatchMetaTest {
+    private BatchMeta batchMeta;
+    private long byteLimit = 1024;
+    private long recordLimit = 5;
+    private Object lock = new Object();
+    private Long collectionLastModified = 123L;
+
+    @Before
+    public void setUp() throws Exception {
+        batchMeta = new BatchMeta(lock, byteLimit, recordLimit, collectionLastModified);
+    }
+
+    @Test
+    public void testConstructor() {
+        assertEquals(batchMeta.collectionLastModified, collectionLastModified);
+
+        BatchMeta otherBatchMeta = new BatchMeta(lock, byteLimit, recordLimit, null);
+        assertNull(otherBatchMeta.collectionLastModified);
+    }
+
+    @Test
+    public void testGetLastModified() {
+        // Defaults to collection L-M
+        assertEquals(batchMeta.getLastModified(), Long.valueOf(123L));
+
+        try {
+            batchMeta.setLastModified(333L, true);
+        } catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
+        } catch (BatchingUploader.LastModifiedDidNotChange e) {}
+
+        assertEquals(batchMeta.getLastModified(), Long.valueOf(333L));
+    }
+
+    @Test
+    public void testSetLastModified() {
+        assertEquals(batchMeta.getLastModified(), collectionLastModified);
+
+        try {
+            batchMeta.setLastModified(123L, true);
+            assertEquals(batchMeta.getLastModified(), Long.valueOf(123L));
+        } catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
+            fail("Should not check for modifications on first L-M set");
+        } catch (BatchingUploader.LastModifiedDidNotChange e) {
+            fail("Should not check for modifications on first L-M set");
+        }
+
+        // Now the same, but passing in 'false' for "expecting to change".
+        batchMeta.reset();
+        assertEquals(batchMeta.getLastModified(), collectionLastModified);
+
+        try {
+            batchMeta.setLastModified(123L, false);
+            assertEquals(batchMeta.getLastModified(), Long.valueOf(123L));
+        } catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
+            fail("Should not check for modifications on first L-M set");
+        } catch (BatchingUploader.LastModifiedDidNotChange e) {
+            fail("Should not check for modifications on first L-M set");
+        }
+
+        // Test that we can't modify L-M when we're not expecting to
+        try {
+            batchMeta.setLastModified(333L, false);
+        } catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
+            assertTrue("Must throw when L-M changes unexpectedly", true);
+        } catch (BatchingUploader.LastModifiedDidNotChange e) {
+            fail("Not expecting did-not-change throw");
+        }
+        assertEquals(batchMeta.getLastModified(), Long.valueOf(123L));
+
+        // Test that we can modify L-M when we're expecting to
+        try {
+            batchMeta.setLastModified(333L, true);
+        } catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
+            fail("Not expecting changed-unexpectedly throw");
+        } catch (BatchingUploader.LastModifiedDidNotChange e) {
+            fail("Not expecting did-not-change throw");
+        }
+        assertEquals(batchMeta.getLastModified(), Long.valueOf(333L));
+
+        // Test that we catch L-M modifications that expect to change but actually don't
+        try {
+            batchMeta.setLastModified(333L, true);
+        } catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
+            fail("Not expecting changed-unexpectedly throw");
+        } catch (BatchingUploader.LastModifiedDidNotChange e) {
+            assertTrue("Expected-to-change-but-did-not-change didn't throw", true);
+        }
+        assertEquals(batchMeta.getLastModified(), Long.valueOf(333));
+    }
+
+    @Test
+    public void testSetToken() {
+        assertNull(batchMeta.getToken());
+
+        try {
+            batchMeta.setToken("MTIzNA", false);
+        } catch (BatchingUploader.TokenModifiedException e) {
+            fail("Should be able to set token for the first time");
+        }
+        assertEquals("MTIzNA", batchMeta.getToken());
+
+        try {
+            batchMeta.setToken("XYCvNA", false);
+        } catch (BatchingUploader.TokenModifiedException e) {
+            assertTrue("Should not be able to modify a token", true);
+        }
+        assertEquals("MTIzNA", batchMeta.getToken());
+
+        try {
+            batchMeta.setToken("XYCvNA", true);
+        } catch (BatchingUploader.TokenModifiedException e) {
+            assertTrue("Should catch non-null tokens during onCommit sets", true);
+        }
+        assertEquals("MTIzNA", batchMeta.getToken());
+
+        try {
+            batchMeta.setToken(null, true);
+        } catch (BatchingUploader.TokenModifiedException e) {
+            fail("Should be able to set token to null during onCommit set");
+        }
+        assertNull(batchMeta.getToken());
+    }
+
+    @Test
+    public void testRecordSucceeded() {
+        assertTrue(batchMeta.getSuccessRecordGuids().isEmpty());
+
+        batchMeta.recordSucceeded("guid1");
+
+        assertTrue(batchMeta.getSuccessRecordGuids().size() == 1);
+        assertTrue(batchMeta.getSuccessRecordGuids().contains("guid1"));
+
+        try {
+            batchMeta.recordSucceeded(null);
+            fail();
+        } catch (IllegalStateException e) {
+            assertTrue("Should not be able to 'succeed' a null guid", true);
+        }
+    }
+
+    @Test
+    public void testByteLimits() {
+        assertTrue(batchMeta.canFit(0));
+
+        // Should just fit
+        assertTrue(batchMeta.canFit(byteLimit - BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
+
+        // Can't fit a record due to payload overhead.
+        assertFalse(batchMeta.canFit(byteLimit));
+
+        assertFalse(batchMeta.canFit(byteLimit + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
+        assertFalse(batchMeta.canFit(byteLimit * 1000));
+
+        long recordDelta = byteLimit / 2;
+        assertFalse(batchMeta.addAndEstimateIfFull(recordDelta));
+
+        // Record delta shouldn't fit due to payload overhead.
+        assertFalse(batchMeta.canFit(recordDelta));
+    }
+
+    @Test
+    public void testCountLimits() {
+        // Our record limit is 5, let's add 4.
+        assertFalse(batchMeta.addAndEstimateIfFull(1));
+        assertFalse(batchMeta.addAndEstimateIfFull(1));
+        assertFalse(batchMeta.addAndEstimateIfFull(1));
+        assertFalse(batchMeta.addAndEstimateIfFull(1));
+
+        // 5th record still fits in
+        assertTrue(batchMeta.canFit(1));
+
+        // Add the 5th record
+        assertTrue(batchMeta.addAndEstimateIfFull(1));
+
+        // 6th record won't fit
+        assertFalse(batchMeta.canFit(1));
+    }
+
+    @Test
+    public void testNeedCommit() {
+        assertFalse(batchMeta.needToCommit());
+
+        assertFalse(batchMeta.addAndEstimateIfFull(1));
+
+        assertTrue(batchMeta.needToCommit());
+
+        assertFalse(batchMeta.addAndEstimateIfFull(1));
+        assertFalse(batchMeta.addAndEstimateIfFull(1));
+        assertFalse(batchMeta.addAndEstimateIfFull(1));
+
+        assertTrue(batchMeta.needToCommit());
+
+        batchMeta.reset();
+
+        assertFalse(batchMeta.needToCommit());
+    }
+
+    @Test
+    public void testAdd() {
+        // Ensure we account for payload overhead twice when the batch is empty.
+        // Payload overhead is either RECORDS_START or RECORDS_END, and for an empty payload
+        // we need both.
+        assertTrue(batchMeta.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
+        assertTrue(batchMeta.getRecordCount() == 0);
+
+        assertFalse(batchMeta.addAndEstimateIfFull(1));
+
+        assertTrue(batchMeta.getByteCount() == (1 + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
+        assertTrue(batchMeta.getRecordCount() == 1);
+
+        assertFalse(batchMeta.addAndEstimateIfFull(1));
+        assertFalse(batchMeta.addAndEstimateIfFull(1));
+        assertFalse(batchMeta.addAndEstimateIfFull(1));
+
+        assertTrue(batchMeta.getByteCount() == (4 + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
+        assertTrue(batchMeta.getRecordCount() == 4);
+
+        assertTrue(batchMeta.addAndEstimateIfFull(1));
+
+        try {
+            assertTrue(batchMeta.addAndEstimateIfFull(1));
+            fail("BatchMeta should not let us insert records that won't fit");
+        } catch (IllegalStateException e) {
+            assertTrue(true);
+        }
+    }
+
+    @Test
+    public void testReset() {
+        assertTrue(batchMeta.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
+        assertTrue(batchMeta.getRecordCount() == 0);
+        assertTrue(batchMeta.getSuccessRecordGuids().isEmpty());
+
+        // Shouldn't throw even if already empty
+        batchMeta.reset();
+        assertTrue(batchMeta.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
+        assertTrue(batchMeta.getRecordCount() == 0);
+        assertTrue(batchMeta.getSuccessRecordGuids().isEmpty());
+
+        assertFalse(batchMeta.addAndEstimateIfFull(1));
+        batchMeta.recordSucceeded("guid1");
+        try {
+            batchMeta.setToken("MTIzNA", false);
+        } catch (BatchingUploader.TokenModifiedException e) {}
+        try {
+            batchMeta.setLastModified(333L, true);
+        } catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
+        } catch (BatchingUploader.LastModifiedDidNotChange e) {}
+        assertEquals(Long.valueOf(333L), batchMeta.getLastModified());
+        assertEquals("MTIzNA", batchMeta.getToken());
+        assertTrue(batchMeta.getSuccessRecordGuids().size() == 1);
+
+        batchMeta.reset();
+
+        // Counts must be reset
+        assertTrue(batchMeta.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
+        assertTrue(batchMeta.getRecordCount() == 0);
+        assertTrue(batchMeta.getSuccessRecordGuids().isEmpty());
+
+        // Collection L-M shouldn't change
+        assertEquals(batchMeta.collectionLastModified, collectionLastModified);
+
+        // Token must be reset
+        assertNull(batchMeta.getToken());
+
+        // L-M must be reverted to collection L-M
+        assertEquals(batchMeta.getLastModified(), collectionLastModified);
+    }
+}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/sync/repositories/uploaders/BatchingUploaderTest.java
@@ -0,0 +1,441 @@
+/* Any copyright is dedicated to the Public Domain.
+   http://creativecommons.org/publicdomain/zero/1.0/ */
+
+package org.mozilla.gecko.sync.repositories.uploaders;
+
+import android.support.annotation.NonNull;
+
+import static org.junit.Assert.*;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mozilla.gecko.background.testhelpers.MockRecord;
+import org.mozilla.gecko.background.testhelpers.TestRunner;
+import org.mozilla.gecko.sync.ExtendedJSONObject;
+import org.mozilla.gecko.sync.InfoCollections;
+import org.mozilla.gecko.sync.InfoConfiguration;
+import org.mozilla.gecko.sync.Utils;
+import org.mozilla.gecko.sync.repositories.Server11Repository;
+import org.mozilla.gecko.sync.repositories.Server11RepositorySession;
+import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
+
+import java.net.URISyntaxException;
+import java.util.Random;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+
+@RunWith(TestRunner.class)
+public class BatchingUploaderTest {
+    class MockExecutorService implements Executor {
+        public int totalPayloads = 0;
+        public int commitPayloads = 0;
+
+        @Override
+        public void execute(@NonNull Runnable command) {
+            ++totalPayloads;
+            if (((RecordUploadRunnable) command).isCommit) {
+                ++commitPayloads;
+            }
+        }
+    }
+
+    class MockStoreDelegate implements RepositorySessionStoreDelegate {
+        public int storeFailed = 0;
+        public int storeSucceeded = 0;
+        public int storeCompleted = 0;
+
+        @Override
+        public void onRecordStoreFailed(Exception ex, String recordGuid) {
+            ++storeFailed;
+        }
+
+        @Override
+        public void onRecordStoreSucceeded(String guid) {
+            ++storeSucceeded;
+        }
+
+        @Override
+        public void onStoreCompleted(long storeEnd) {
+            ++storeCompleted;
+        }
+
+        @Override
+        public RepositorySessionStoreDelegate deferredStoreDelegate(ExecutorService executor) {
+            return null;
+        }
+    }
+
+    private Executor workQueue;
+    private RepositorySessionStoreDelegate storeDelegate;
+
+    @Before
+    public void setUp() throws Exception {
+        workQueue = new MockExecutorService();
+        storeDelegate = new MockStoreDelegate();
+    }
+
+    @Test
+    public void testProcessEvenPayloadBatch() {
+        BatchingUploader uploader = makeConstrainedUploader(2, 4);
+
+        MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
+        // 1st
+        uploader.process(record);
+        assertEquals(0, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+        // 2nd -> payload full
+        uploader.process(record);
+        assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+        // 3rd
+        uploader.process(record);
+        assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+        // 4th -> batch & payload full
+        uploader.process(record);
+        assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+        // 5th
+        uploader.process(record);
+        assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+        // 6th -> payload full
+        uploader.process(record);
+        assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+        // 7th
+        uploader.process(record);
+        assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+        // 8th -> batch & payload full
+        uploader.process(record);
+        assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(2, ((MockExecutorService) workQueue).commitPayloads);
+        // 9th
+        uploader.process(record);
+        assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(2, ((MockExecutorService) workQueue).commitPayloads);
+        // 10th -> payload full
+        uploader.process(record);
+        assertEquals(5, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(2, ((MockExecutorService) workQueue).commitPayloads);
+        // 11th
+        uploader.process(record);
+        assertEquals(5, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(2, ((MockExecutorService) workQueue).commitPayloads);
+        // 12th -> batch & payload full
+        uploader.process(record);
+        assertEquals(6, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(3, ((MockExecutorService) workQueue).commitPayloads);
+        // 13th
+        uploader.process(record);
+        assertEquals(6, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(3, ((MockExecutorService) workQueue).commitPayloads);
+    }
+
+    @Test
+    public void testProcessUnevenPayloadBatch() {
+        BatchingUploader uploader = makeConstrainedUploader(2, 5);
+
+        MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
+        // 1st
+        uploader.process(record);
+        assertEquals(0, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+        // 2nd -> payload full
+        uploader.process(record);
+        assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+        // 3rd
+        uploader.process(record);
+        assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+        // 4th -> payload full
+        uploader.process(record);
+        assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+        // 5th -> batch full
+        uploader.process(record);
+        assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+        // 6th -> starts new batch
+        uploader.process(record);
+        assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+        // 7th -> payload full
+        uploader.process(record);
+        assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+        // 8th
+        uploader.process(record);
+        assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+        // 9th -> payload full
+        uploader.process(record);
+        assertEquals(5, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+        // 10th -> batch full
+        uploader.process(record);
+        assertEquals(6, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(2, ((MockExecutorService) workQueue).commitPayloads);
+        // 11th -> starts new batch
+        uploader.process(record);
+        assertEquals(6, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(2, ((MockExecutorService) workQueue).commitPayloads);
+    }
+
+    @Test
+    public void testNonBatchingOptimization() {
+        BatchingUploader uploader = makeConstrainedUploader(2, 4);
+
+        MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
+        // 1st
+        uploader.process(record);
+        assertEquals(0, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+        // 2nd
+        uploader.process(record);
+        assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+        // 3rd
+        uploader.process(record);
+        assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+        // 4th
+        uploader.process(record);
+        assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+
+        // 5th
+        uploader.process(record);
+        assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+
+        // And now we tell uploader that batching isn't supported.
+        // It shouldn't bother with batches from now on, just payloads.
+        uploader.setInBatchingMode(false);
+
+        // 6th
+        uploader.process(record);
+        assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+
+        // 7th
+        uploader.process(record);
+        assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+
+        // 8th
+        uploader.process(record);
+        assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+
+        // 9th
+        uploader.process(record);
+        assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+
+        // 10th
+        uploader.process(record);
+        assertEquals(5, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+    }
+
+    @Test
+    public void testPreemtiveUploadByteCounts() {
+        // While processing a record, if we know for sure that another one won't fit,
+        // we upload the payload.
+        BatchingUploader uploader = makeConstrainedUploader(3, 6);
+
+        // Payload byte max: 1024; batch byte max: 4096
+        MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false, 400);
+
+        uploader.process(record);
+        assertEquals(0, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+
+        // After 2nd record, byte count is at 800+overhead. Our payload max is 1024, so it's unlikely
+        // we can fit another record at this pace. Expect payload to be uploaded.
+        uploader.process(record);
+        assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+
+        // After this record, we'll have less than 124 bytes of room left in the payload. Expect upload.
+        record = new MockRecord(Utils.generateGuid(), null, 0, false, 970);
+        uploader.process(record);
+        assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+
+        uploader.process(record);
+        assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+
+        // At this point our byte count for the batch is at 3600+overhead;
+        // since we have just 496 bytes left in the batch, it's unlikely we'll fit another record.
+        // Expect a batch commit
+        uploader.process(record);
+        assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+    }
+
+    @Test
+    public void testRandomPayloadSizesBatching() {
+        BatchingUploader uploader = makeConstrainedUploader(2, 4);
+
+        final Random random = new Random();
+        for (int i = 0; i < 15000; i++) {
+            uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, random.nextInt(15000)));
+        }
+    }
+
+    @Test
+    public void testRandomPayloadSizesNonBatching() {
+        BatchingUploader uploader = makeConstrainedUploader(2, 4);
+
+        final Random random = new Random();
+        uploader.setInBatchingMode(false);
+        for (int i = 0; i < 15000; i++) {
+            uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, random.nextInt(15000)));
+        }
+    }
+
+    @Test
+    public void testRandomPayloadSizesNonBatchingDelayed() {
+        BatchingUploader uploader = makeConstrainedUploader(2, 4);
+
+        final Random random = new Random();
+        // Delay telling uploader that batching isn't supported.
+        // Randomize how many records we wait for.
+        final int delay = random.nextInt(20);
+        for (int i = 0; i < 15000; i++) {
+            if (delay == i) {
+                uploader.setInBatchingMode(false);
+            }
+            uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, random.nextInt(15000)));
+        }
+    }
+
+    @Test
+    public void testNoMoreRecordsAfterPayloadPost() {
+        BatchingUploader uploader = makeConstrainedUploader(2, 4);
+
+        // Process two records (payload limit is also two, batch is four),
+        // and ensure that 'no more records' commits.
+        MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
+        uploader.process(record);
+        uploader.process(record);
+        uploader.setInBatchingMode(true);
+        uploader.commitIfNecessaryAfterLastPayload();
+        // One will be a payload post, the other one is batch commit (empty payload)
+        assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+    }
+
+    @Test
+    public void testNoMoreRecordsAfterPayloadPostWithOneRecordLeft() {
+        BatchingUploader uploader = makeConstrainedUploader(2, 4);
+
+        // Process two records (payload limit is also two, batch is four),
+        // and ensure that 'no more records' commits.
+        MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
+        uploader.process(record);
+        uploader.process(record);
+        uploader.process(record);
+        uploader.commitIfNecessaryAfterLastPayload();
+        // One will be a payload post, the other one is batch commit (one record payload)
+        assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+    }
+
+    @Test
+    public void testNoMoreRecordsNoOp() {
+        BatchingUploader uploader = makeConstrainedUploader(2, 4);
+
+        uploader.commitIfNecessaryAfterLastPayload();
+        assertEquals(0, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+    }
+
+    @Test
+    public void testNoMoreRecordsNoOpAfterCommit() {
+        BatchingUploader uploader = makeConstrainedUploader(2, 4);
+
+        MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
+        uploader.process(record);
+        uploader.process(record);
+        uploader.process(record);
+        uploader.process(record);
+        assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+
+        uploader.commitIfNecessaryAfterLastPayload();
+        assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+    }
+
+    @Test
+    public void testNoMoreRecordsEvenNonBatching() {
+        BatchingUploader uploader = makeConstrainedUploader(2, 4);
+
+        // Process two records (payload limit is also two, batch is four),
+        // set non-batching mode, and ensure that 'no more records' doesn't commit.
+        MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
+        uploader.process(record);
+        uploader.process(record);
+        uploader.setInBatchingMode(false);
+        uploader.commitIfNecessaryAfterLastPayload();
+        // One will be a payload post, the other one is batch commit (one record payload)
+        assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
+    }
+
+    @Test
+    public void testNoMoreRecordsIncompletePayload() {
+        BatchingUploader uploader = makeConstrainedUploader(2, 4);
+
+        // We have one record (payload limit is 2), and "no-more-records" signal should commit it.
+        MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
+        uploader.process(record);
+
+        uploader.commitIfNecessaryAfterLastPayload();
+        assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
+    }
+
+    private BatchingUploader makeConstrainedUploader(long maxPostRecords, long maxTotalRecords) {
+        Server11RepositorySession server11RepositorySession = new Server11RepositorySession(
+                makeCountConstrainedRepository(maxPostRecords, maxTotalRecords)
+        );
+        server11RepositorySession.setStoreDelegate(storeDelegate);
+        return new BatchingUploader(server11RepositorySession, workQueue, storeDelegate);
+    }
+
+    private Server11Repository makeCountConstrainedRepository(long maxPostRecords, long maxTotalRecords) {
+        return makeConstrainedRepository(1024, 1024, maxPostRecords, 4096, maxTotalRecords);
+    }
+
+    private Server11Repository makeConstrainedRepository(long maxRequestBytes, long maxPostBytes, long maxPostRecords, long maxTotalBytes, long maxTotalRecords) {
+        ExtendedJSONObject infoConfigurationJSON = new ExtendedJSONObject();
+        infoConfigurationJSON.put(InfoConfiguration.MAX_TOTAL_BYTES, maxTotalBytes);
+        infoConfigurationJSON.put(InfoConfiguration.MAX_TOTAL_RECORDS, maxTotalRecords);
+        infoConfigurationJSON.put(InfoConfiguration.MAX_POST_RECORDS, maxPostRecords);
+        infoConfigurationJSON.put(InfoConfiguration.MAX_POST_BYTES, maxPostBytes);
+        infoConfigurationJSON.put(InfoConfiguration.MAX_REQUEST_BYTES, maxRequestBytes);
+
+        InfoConfiguration infoConfiguration = new InfoConfiguration(infoConfigurationJSON);
+
+        try {
+            return new Server11Repository(
+                    "dummyCollection",
+                    "http://dummy.url/",
+                    null,
+                    new InfoCollections(),
+                    infoConfiguration
+            );
+        } catch (URISyntaxException e) {
+            // Won't throw, and this won't happen.
+            return null;
+        }
+    }
+}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/sync/repositories/uploaders/PayloadTest.java
@@ -0,0 +1,137 @@
+/* Any copyright is dedicated to the Public Domain.
+   http://creativecommons.org/publicdomain/zero/1.0/ */
+
+package org.mozilla.gecko.sync.repositories.uploaders;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mozilla.gecko.background.testhelpers.TestRunner;
+
+import static org.junit.Assert.*;
+
+@RunWith(TestRunner.class)
+public class PayloadTest {
+    private Payload payload;
+    private long byteLimit = 1024;
+    private long recordLimit = 5;
+    private Object lock = new Object();
+
+    @Before
+    public void setUp() throws Exception {
+        payload = new Payload(lock, byteLimit, recordLimit);
+    }
+
+    @Test
+    public void testByteLimits() {
+        assertTrue(payload.canFit(0));
+
+        // Should just fit
+        assertTrue(payload.canFit(byteLimit - BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
+
+        // Can't fit a record due to payload overhead.
+        assertFalse(payload.canFit(byteLimit));
+
+        assertFalse(payload.canFit(byteLimit + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
+        assertFalse(payload.canFit(byteLimit * 1000));
+
+        long recordDelta = byteLimit / 2;
+        assertFalse(payload.addAndEstimateIfFull(recordDelta, new byte[0], null));
+
+        // Record delta shouldn't fit due to payload overhead.
+        assertFalse(payload.canFit(recordDelta));
+    }
+
+    @Test
+    public void testCountLimits() {
+        byte[] bytes = new byte[0];
+
+        // Our record limit is 5, let's add 4.
+        assertFalse(payload.addAndEstimateIfFull(1, bytes, null));
+        assertFalse(payload.addAndEstimateIfFull(1, bytes, null));
+        assertFalse(payload.addAndEstimateIfFull(1, bytes, null));
+        assertFalse(payload.addAndEstimateIfFull(1, bytes, null));
+
+        // 5th record still fits in
+        assertTrue(payload.canFit(1));
+
+        // Add the 5th record
+        assertTrue(payload.addAndEstimateIfFull(1, bytes, null));
+
+        // 6th record won't fit
+        assertFalse(payload.canFit(1));
+    }
+
+    @Test
+    public void testAdd() {
+        assertTrue(payload.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
+        assertTrue(payload.getRecordCount() == 0);
+        assertTrue(payload.isEmpty());
+        assertTrue(payload.getRecordsBuffer().isEmpty());
+        assertTrue(payload.getRecordGuidsBuffer().isEmpty());
+
+        try {
+            payload.addAndEstimateIfFull(1024);
+            fail("Simple add is not supported");
+        } catch (UnsupportedOperationException e) {
+            assertTrue(true);
+        }
+
+        byte[] recordBytes1 = new byte[100];
+        assertFalse(payload.addAndEstimateIfFull(1, recordBytes1, "guid1"));
+
+        assertTrue(payload.getRecordsBuffer().size() == 1);
+        assertTrue(payload.getRecordGuidsBuffer().size() == 1);
+        assertTrue(payload.getRecordGuidsBuffer().contains("guid1"));
+        assertTrue(payload.getRecordsBuffer().contains(recordBytes1));
+
+        assertTrue(payload.getByteCount() == (1 + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
+        assertTrue(payload.getRecordCount() == 1);
+
+        assertFalse(payload.isEmpty());
+
+        assertFalse(payload.addAndEstimateIfFull(1, recordBytes1, "guid2"));
+        assertFalse(payload.addAndEstimateIfFull(1, recordBytes1, "guid3"));
+        assertFalse(payload.addAndEstimateIfFull(1, recordBytes1, "guid4"));
+
+        assertTrue(payload.getByteCount() == (4 + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
+        assertTrue(payload.getRecordCount() == 4);
+
+        assertTrue(payload.addAndEstimateIfFull(1, recordBytes1, "guid5"));
+
+        try {
+            assertTrue(payload.addAndEstimateIfFull(1, recordBytes1, "guid6"));
+            fail("Payload should not let us insert records that won't fit");
+        } catch (IllegalStateException e) {
+            assertTrue(true);
+        }
+    }
+
+    @Test
+    public void testReset() {
+        assertTrue(payload.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
+        assertTrue(payload.getRecordCount() == 0);
+        assertTrue(payload.getRecordsBuffer().isEmpty());
+        assertTrue(payload.getRecordGuidsBuffer().isEmpty());
+        assertTrue(payload.isEmpty());
+
+        // Shouldn't throw even if already empty
+        payload.reset();
+        assertTrue(payload.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
+        assertTrue(payload.getRecordCount() == 0);
+        assertTrue(payload.getRecordsBuffer().isEmpty());
+        assertTrue(payload.getRecordGuidsBuffer().isEmpty());
+        assertTrue(payload.isEmpty());
+
+        byte[] recordBytes1 = new byte[100];
+        assertFalse(payload.addAndEstimateIfFull(1, recordBytes1, "guid1"));
+        assertFalse(payload.isEmpty());
+        payload.reset();
+
+        assertTrue(payload.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
+        assertTrue(payload.getRecordCount() == 0);
+        assertTrue(payload.getRecordsBuffer().isEmpty());
+        assertTrue(payload.getRecordGuidsBuffer().isEmpty());
+        assertTrue(payload.isEmpty());
+    }
+}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/sync/repositories/uploaders/PayloadUploadDelegateTest.java
@@ -0,0 +1,404 @@
+/* Any copyright is dedicated to the Public Domain.
+   http://creativecommons.org/publicdomain/zero/1.0/ */
+
+package org.mozilla.gecko.sync.repositories.uploaders;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mozilla.gecko.background.testhelpers.TestRunner;
+import org.mozilla.gecko.sync.HTTPFailureException;
+import org.mozilla.gecko.sync.InfoCollections;
+import org.mozilla.gecko.sync.InfoConfiguration;
+import org.mozilla.gecko.sync.NonObjectJSONException;
+import org.mozilla.gecko.sync.net.SyncResponse;
+import org.mozilla.gecko.sync.net.SyncStorageResponse;
+import org.mozilla.gecko.sync.repositories.Server11Repository;
+import org.mozilla.gecko.sync.repositories.Server11RepositorySession;
+import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
+
+import java.io.ByteArrayInputStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.concurrent.Executor;
+
+import ch.boye.httpclientandroidlib.HttpResponse;
+import ch.boye.httpclientandroidlib.ProtocolVersion;
+import ch.boye.httpclientandroidlib.entity.BasicHttpEntity;
+import ch.boye.httpclientandroidlib.message.BasicHttpResponse;
+import ch.boye.httpclientandroidlib.message.BasicStatusLine;
+
+import static org.junit.Assert.*;
+
+@RunWith(TestRunner.class)
+public class PayloadUploadDelegateTest {
+    private BatchingUploader batchingUploader;
+
+    class MockUploader extends BatchingUploader {
+        public final ArrayList<String> successRecords = new ArrayList<>();
+        public final HashMap<String, Exception> failedRecords = new HashMap<>();
+        public boolean didLastPayloadFail = false;
+
+        public ArrayList<SyncStorageResponse> successResponses = new ArrayList<>();
+        public int commitPayloadsSucceeded = 0;
+        public int lastPayloadsSucceeded = 0;
+
+        public MockUploader(final Server11RepositorySession repositorySession, final Executor workQueue, final RepositorySessionStoreDelegate sessionStoreDelegate) {
+            super(repositorySession, workQueue, sessionStoreDelegate);
+        }
+
+        @Override
+        public void payloadSucceeded(final SyncStorageResponse response, final boolean isCommit, final boolean isLastPayload) {
+            successResponses.add(response);
+            if (isCommit) {
+                ++commitPayloadsSucceeded;
+            }
+            if (isLastPayload) {
+                ++lastPayloadsSucceeded;
+            }
+        }
+
+        @Override
+        public void recordSucceeded(final String recordGuid) {
+            successRecords.add(recordGuid);
+        }
+
+        @Override
+        public void recordFailed(final String recordGuid) {
+            recordFailed(new Exception(), recordGuid);
+        }
+
+        @Override
+        public void recordFailed(final Exception e, final String recordGuid) {
+            failedRecords.put(recordGuid, e);
+        }
+
+        @Override
+        public void lastPayloadFailed() {
+            didLastPayloadFail = true;
+        }
+    }
+
+    @Before
+    public void setUp() throws Exception {
+        Server11Repository server11Repository = new Server11Repository(
+                "dummyCollection",
+                "http://dummy.url/",
+                null,
+                new InfoCollections(),
+                new InfoConfiguration()
+        );
+        batchingUploader = new MockUploader(
+                new Server11RepositorySession(server11Repository),
+                null,
+                null
+        );
+    }
+
+    @Test
+    public void testHandleRequestSuccessNonSuccess() {
+        ArrayList<String> postedGuids = new ArrayList<>(2);
+        postedGuids.add("testGuid1");
+        postedGuids.add("testGuid2");
+        PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, false, false);
+
+        // Test that non-2* responses aren't processed
+        payloadUploadDelegate.handleRequestSuccess(makeSyncStorageResponse(404, null, null));
+        assertEquals(2, ((MockUploader) batchingUploader).failedRecords.size());
+        assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
+        assertEquals(IllegalStateException.class,
+                ((MockUploader) batchingUploader).failedRecords.get("testGuid1").getClass());
+        assertEquals(IllegalStateException.class,
+                ((MockUploader) batchingUploader).failedRecords.get("testGuid2").getClass());
+    }
+
+    @Test
+    public void testHandleRequestSuccessNoHeaders() {
+        ArrayList<String> postedGuids = new ArrayList<>(2);
+        postedGuids.add("testGuid1");
+        postedGuids.add("testGuid2");
+        PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, false, false);
+
+        // Test that responses without X-Last-Modified header aren't processed
+        payloadUploadDelegate.handleRequestSuccess(makeSyncStorageResponse(200, null, null));
+        assertEquals(2, ((MockUploader) batchingUploader).failedRecords.size());
+        assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
+        assertEquals(IllegalStateException.class,
+                ((MockUploader) batchingUploader).failedRecords.get("testGuid1").getClass());
+        assertEquals(IllegalStateException.class,
+                ((MockUploader) batchingUploader).failedRecords.get("testGuid2").getClass());
+    }
+
+    @Test
+    public void testHandleRequestSuccessBadBody() {
+        ArrayList<String> postedGuids = new ArrayList<>(2);
+        postedGuids.add("testGuid1");
+        postedGuids.add("testGuid2");
+        PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, false, true);
+
+        // Test that we catch json processing errors
+        payloadUploadDelegate.handleRequestSuccess(makeSyncStorageResponse(200, "non json body", "123"));
+        assertEquals(2, ((MockUploader) batchingUploader).failedRecords.size());
+        assertTrue(((MockUploader) batchingUploader).didLastPayloadFail);
+        assertEquals(NonObjectJSONException.class,
+                ((MockUploader) batchingUploader).failedRecords.get("testGuid1").getClass());
+        assertEquals(NonObjectJSONException.class,
+                ((MockUploader) batchingUploader).failedRecords.get("testGuid2").getClass());
+    }
+
+    @Test
+    public void testHandleRequestSuccess202NoToken() {
+        ArrayList<String> postedGuids = new ArrayList<>(1);
+        postedGuids.add("testGuid1");
+        PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, false, true);
+
+        // Test that we catch absent tokens in 202 responses
+        payloadUploadDelegate.handleRequestSuccess(makeSyncStorageResponse(202, "{\"success\": []}", "123"));
+        assertEquals(1, ((MockUploader) batchingUploader).failedRecords.size());
+        assertEquals(IllegalStateException.class,
+                ((MockUploader) batchingUploader).failedRecords.get("testGuid1").getClass());
+    }
+
+    @Test
+    public void testHandleRequestSuccessBad200() {
+        ArrayList<String> postedGuids = new ArrayList<>(1);
+        postedGuids.add("testGuid1");
+
+        PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, false, false);
+
+        // Test that if in batching mode and saw the token, 200 must be a response to a commit
+        try {
+            batchingUploader.getCurrentBatch().setToken("MTIzNA", true);
+        } catch (BatchingUploader.BatchingUploaderException e) {}
+        batchingUploader.setInBatchingMode(true);
+
+        // not a commit, so should fail
+        payloadUploadDelegate.handleRequestSuccess(makeSyncStorageResponse(200, "{\"success\": []}", "123"));
+        assertEquals(1, ((MockUploader) batchingUploader).failedRecords.size());
+        assertEquals(IllegalStateException.class,
+                ((MockUploader) batchingUploader).failedRecords.get("testGuid1").getClass());
+    }
+
+    @Test
+    public void testHandleRequestSuccessNonBatchingFailedLM() {
+        ArrayList<String> postedGuids = new ArrayList<>(1);
+        postedGuids.add("guid1");
+        postedGuids.add("guid2");
+        postedGuids.add("guid3");
+        PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, false, false);
+
+        payloadUploadDelegate.handleRequestSuccess(
+                makeSyncStorageResponse(200, "{\"success\": [\"guid1\", \"guid2\", \"guid3\"]}", "123"));
+        assertEquals(0, ((MockUploader) batchingUploader).failedRecords.size());
+        assertEquals(3, ((MockUploader) batchingUploader).successRecords.size());
+        assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
+        assertEquals(1, ((MockUploader) batchingUploader).successResponses.size());
+        assertEquals(0, ((MockUploader) batchingUploader).commitPayloadsSucceeded);
+        assertEquals(0, ((MockUploader) batchingUploader).lastPayloadsSucceeded);
+
+        // These should fail, because we're returning a non-changed L-M in a non-batching mode
+        postedGuids.add("guid4");
+        postedGuids.add("guid6");
+        payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, false, false);
+        payloadUploadDelegate.handleRequestSuccess(
+                makeSyncStorageResponse(200, "{\"success\": [\"guid4\", 5, \"guid6\"]}", "123"));
+        assertEquals(5, ((MockUploader) batchingUploader).failedRecords.size());
+        assertEquals(3, ((MockUploader) batchingUploader).successRecords.size());
+        assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
+        assertEquals(1, ((MockUploader) batchingUploader).successResponses.size());
+        assertEquals(0, ((MockUploader) batchingUploader).commitPayloadsSucceeded);
+        assertEquals(0, ((MockUploader) batchingUploader).lastPayloadsSucceeded);
+        assertEquals(BatchingUploader.LastModifiedDidNotChange.class,
+                ((MockUploader) batchingUploader).failedRecords.get("guid4").getClass());
+    }
+
+    @Test
+    public void testHandleRequestSuccessNonBatching() {
+        ArrayList<String> postedGuids = new ArrayList<>();
+        postedGuids.add("guid1");
+        postedGuids.add("guid2");
+        postedGuids.add("guid3");
+        PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, false, false);
+        payloadUploadDelegate.handleRequestSuccess(
+                makeSyncStorageResponse(200, "{\"success\": [\"guid1\", \"guid2\", \"guid3\"], \"failed\": {}}", "123"));
+
+        postedGuids = new ArrayList<>();
+        postedGuids.add("guid4");
+        postedGuids.add("guid5");
+        payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, false, false);
+        payloadUploadDelegate.handleRequestSuccess(
+                makeSyncStorageResponse(200, "{\"success\": [\"guid4\", \"guid5\"], \"failed\": {}}", "333"));
+
+        postedGuids = new ArrayList<>();
+        postedGuids.add("guid6");
+        payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, false, true);
+        payloadUploadDelegate.handleRequestSuccess(
+                makeSyncStorageResponse(200, "{\"success\": [\"guid6\"], \"failed\": {}}", "444"));
+
+        assertEquals(0, ((MockUploader) batchingUploader).failedRecords.size());
+        assertEquals(6, ((MockUploader) batchingUploader).successRecords.size());
+        assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
+        assertEquals(3, ((MockUploader) batchingUploader).successResponses.size());
+        assertEquals(0, ((MockUploader) batchingUploader).commitPayloadsSucceeded);
+        assertEquals(1, ((MockUploader) batchingUploader).lastPayloadsSucceeded);
+        assertFalse(batchingUploader.getInBatchingMode());
+
+        postedGuids = new ArrayList<>();
+        postedGuids.add("guid7");
+        postedGuids.add("guid8");
+        payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, false, true);
+        payloadUploadDelegate.handleRequestSuccess(
+                makeSyncStorageResponse(200, "{\"success\": [\"guid8\"], \"failed\": {\"guid7\": \"reason\"}}", "555"));
+        assertEquals(1, ((MockUploader) batchingUploader).failedRecords.size());
+        assertTrue(((MockUploader) batchingUploader).failedRecords.containsKey("guid7"));
+        assertEquals(7, ((MockUploader) batchingUploader).successRecords.size());
+        assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
+        assertEquals(4, ((MockUploader) batchingUploader).successResponses.size());
+        assertEquals(0, ((MockUploader) batchingUploader).commitPayloadsSucceeded);
+        assertEquals(2, ((MockUploader) batchingUploader).lastPayloadsSucceeded);
+        assertFalse(batchingUploader.getInBatchingMode());
+    }
+
+    @Test
+    public void testHandleRequestSuccessBatching() {
+        ArrayList<String> postedGuids = new ArrayList<>();
+        postedGuids.add("guid1");
+        postedGuids.add("guid2");
+        postedGuids.add("guid3");
+        PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, false, false);
+        payloadUploadDelegate.handleRequestSuccess(
+                makeSyncStorageResponse(202, "{\"batch\": \"MTIzNA\", \"success\": [\"guid1\", \"guid2\", \"guid3\"], \"failed\": {}}", "123"));
+
+        assertTrue(batchingUploader.getInBatchingMode());
+        assertEquals("MTIzNA", batchingUploader.getCurrentBatch().getToken());
+
+        postedGuids = new ArrayList<>();
+        postedGuids.add("guid4");
+        postedGuids.add("guid5");
+        postedGuids.add("guid6");
+        payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, false, false);
+        payloadUploadDelegate.handleRequestSuccess(
+                makeSyncStorageResponse(202, "{\"batch\": \"MTIzNA\", \"success\": [\"guid4\", \"guid5\", \"guid6\"], \"failed\": {}}", "123"));
+
+        assertTrue(batchingUploader.getInBatchingMode());
+        assertEquals("MTIzNA", batchingUploader.getCurrentBatch().getToken());
+
+        postedGuids = new ArrayList<>();
+        postedGuids.add("guid7");
+        payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, true, false);
+        payloadUploadDelegate.handleRequestSuccess(
+                makeSyncStorageResponse(200, "{\"success\": [\"guid6\"], \"failed\": {}}", "222"));
+
+        // Even though everything indicates we're not in a batching, we were, so test that
+        // we don't reset the flag.
+        assertTrue(batchingUploader.getInBatchingMode());
+        assertNull(batchingUploader.getCurrentBatch().getToken());
+
+        postedGuids = new ArrayList<>();
+        postedGuids.add("guid8");
+        payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, postedGuids, true, true);
+        payloadUploadDelegate.handleRequestSuccess(
+                makeSyncStorageResponse(200, "{\"success\": [\"guid7\"], \"failed\": {}}", "333"));
+
+        assertEquals(0, ((MockUploader) batchingUploader).failedRecords.size());
+        assertEquals(8, ((MockUploader) batchingUploader).successRecords.size());
+        assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
+        assertEquals(4, ((MockUploader) batchingUploader).successResponses.size());
+        assertEquals(2, ((MockUploader) batchingUploader).commitPayloadsSucceeded);
+        assertEquals(1, ((MockUploader) batchingUploader).lastPayloadsSucceeded);
+        assertTrue(batchingUploader.getInBatchingMode());
+    }
+
+    @Test
+    public void testHandleRequestError() {
+        ArrayList<String> postedGuids = new ArrayList<>(3);
+        postedGuids.add("testGuid1");
+        postedGuids.add("testGuid2");
+        postedGuids.add("testGuid3");
+        PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(batchingUploader, postedGuids, false, false);
+
+        IllegalStateException e = new IllegalStateException();
+        payloadUploadDelegate.handleRequestError(e);
+
+        assertEquals(3, ((MockUploader) batchingUploader).failedRecords.size());
+        assertEquals(e, ((MockUploader) batchingUploader).failedRecords.get("testGuid1"));
+        assertEquals(e, ((MockUploader) batchingUploader).failedRecords.get("testGuid2"));
+        assertEquals(e, ((MockUploader) batchingUploader).failedRecords.get("testGuid3"));
+        assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
+
+        payloadUploadDelegate = new PayloadUploadDelegate(batchingUploader, postedGuids, false, true);
+        payloadUploadDelegate.handleRequestError(e);
+        assertEquals(3, ((MockUploader) batchingUploader).failedRecords.size());
+        assertTrue(((MockUploader) batchingUploader).didLastPayloadFail);
+    }
+
+    @Test
+    public void testHandleRequestFailure() {
+        ArrayList<String> postedGuids = new ArrayList<>(3);
+        postedGuids.add("testGuid1");
+        postedGuids.add("testGuid2");
+        postedGuids.add("testGuid3");
+        PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(batchingUploader, postedGuids, false, false);
+
+        final HttpResponse response = new BasicHttpResponse(
+                new BasicStatusLine(new ProtocolVersion("HTTP", 1, 1), 503, "Illegal method/protocol"));
+        payloadUploadDelegate.handleRequestFailure(new SyncStorageResponse(response));
+        assertEquals(3, ((MockUploader) batchingUploader).failedRecords.size());
+        assertEquals(HTTPFailureException.class,
+                ((MockUploader) batchingUploader).failedRecords.get("testGuid1").getClass());
+        assertEquals(HTTPFailureException.class,
+                ((MockUploader) batchingUploader).failedRecords.get("testGuid2").getClass());
+        assertEquals(HTTPFailureException.class,
+                ((MockUploader) batchingUploader).failedRecords.get("testGuid3").getClass());
+
+        payloadUploadDelegate = new PayloadUploadDelegate(batchingUploader, postedGuids, false, true);
+        payloadUploadDelegate.handleRequestFailure(new SyncStorageResponse(response));
+        assertEquals(3, ((MockUploader) batchingUploader).failedRecords.size());
+        assertTrue(((MockUploader) batchingUploader).didLastPayloadFail);
+    }
+
+    @Test
+    public void testIfUnmodifiedSince() {
+        PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
+                batchingUploader, new ArrayList<String>(), false, false);
+
+        assertNull(payloadUploadDelegate.ifUnmodifiedSince());
+
+        try {
+            batchingUploader.getCurrentBatch().setLastModified(1471645412480L, true);
+        } catch (BatchingUploader.BatchingUploaderException e) {}
+
+        assertEquals("1471645412.480", payloadUploadDelegate.ifUnmodifiedSince());
+    }
+
+    private SyncStorageResponse makeSyncStorageResponse(int code, String body, String lastModified) {
+        BasicHttpResponse response = new BasicHttpResponse(
+                new BasicStatusLine(new ProtocolVersion("HTTP", 1, 1), code, null));
+
+        if (body != null) {
+            BasicHttpEntity entity = new BasicHttpEntity();
+            entity.setContent(new ByteArrayInputStream(body.getBytes()));
+            response.setEntity(entity);
+        }
+
+        if (lastModified != null) {
+            response.addHeader(SyncResponse.X_LAST_MODIFIED, lastModified);
+        }
+        return new SyncStorageResponse(response);
+    }
+}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/sync/repositories/uploaders/RecordUploadRunnableTest.java
@@ -0,0 +1,38 @@
+/* Any copyright is dedicated to the Public Domain.
+   http://creativecommons.org/publicdomain/zero/1.0/ */
+
+package org.mozilla.gecko.sync.repositories.uploaders;
+
+import android.net.Uri;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mozilla.gecko.background.testhelpers.TestRunner;
+
+import java.net.URI;
+
+import static org.junit.Assert.*;
+
+@RunWith(TestRunner.class)
+public class RecordUploadRunnableTest {
+    @Test
+    public void testBuildPostURI() throws Exception {
+        BatchMeta batchMeta = new BatchMeta(new Object(), 1, 1, null);
+        URI postURI = RecordUploadRunnable.buildPostURI(
+                false, batchMeta, Uri.parse("http://example.com/"));
+        assertEquals("http://example.com/?batch=true", postURI.toString());
+
+        postURI = RecordUploadRunnable.buildPostURI(
+                true, batchMeta, Uri.parse("http://example.com/"));
+        assertEquals("http://example.com/?batch=true&commit=true", postURI.toString());
+
+        batchMeta.setToken("MTIzNA", false);
+        postURI = RecordUploadRunnable.buildPostURI(
+                false, batchMeta, Uri.parse("http://example.com/"));
+        assertEquals("http://example.com/?batch=MTIzNA", postURI.toString());
+
+        postURI = RecordUploadRunnable.buildPostURI(
+                true, batchMeta, Uri.parse("http://example.com/"));
+        assertEquals("http://example.com/?batch=MTIzNA&commit=true", postURI.toString());
+    }
+}
\ No newline at end of file
--- a/netwerk/base/Predictor.cpp
+++ b/netwerk/base/Predictor.cpp
@@ -43,21 +43,16 @@
 #include "mozilla/net/NeckoCommon.h"
 #include "mozilla/net/NeckoParent.h"
 
 #include "LoadContextInfo.h"
 #include "mozilla/ipc/URIUtils.h"
 #include "SerializedLoadContext.h"
 #include "mozilla/net/NeckoChild.h"
 
-#if defined(ANDROID) && !defined(MOZ_WIDGET_GONK)
-#include "nsIPropertyBag2.h"
-static const int32_t ANDROID_23_VERSION = 10;
-#endif
-
 using namespace mozilla;
 
 namespace mozilla {
 namespace net {
 
 Predictor *Predictor::sSelf = nullptr;
 
 static LazyLogModule gPredictorLog("NetworkPredictor");
@@ -585,32 +580,16 @@ Predictor::Init()
 
   if (!NS_IsMainThread()) {
     MOZ_ASSERT(false, "Predictor::Init called off the main thread!");
     return NS_ERROR_UNEXPECTED;
   }
 
   nsresult rv = NS_OK;
 
-#if defined(ANDROID) && !defined(MOZ_WIDGET_GONK)
-  // This is an ugly hack to disable the predictor on android < 2.3, as it
-  // doesn't play nicely with those android versions, at least on our infra.
-  // Causes timeouts in reftests. See bug 881804 comment 86.
-  nsCOMPtr<nsIPropertyBag2> infoService =
-    do_GetService("@mozilla.org/system-info;1");
-  if (infoService) {
-    int32_t androidVersion = -1;
-    rv = infoService->GetPropertyAsInt32(NS_LITERAL_STRING("version"),
-                                         &androidVersion);
-    if (NS_SUCCEEDED(rv) && (androidVersion < ANDROID_23_VERSION)) {
-      return NS_ERROR_NOT_AVAILABLE;
-    }
-  }
-#endif
-
   rv = InstallObserver();
   NS_ENSURE_SUCCESS(rv, rv);
 
   mLastStartupTime = mStartupTime = NOW_IN_SECONDS();
 
   if (!mDNSListener) {
     mDNSListener = new DNSListener();
   }
--- a/python/mozlint/mozlint/cli.py
+++ b/python/mozlint/mozlint/cli.py
@@ -1,17 +1,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import print_function, unicode_literals
 
 import os
 import sys
-from argparse import ArgumentParser
+from argparse import ArgumentParser, REMAINDER
 
 
 SEARCH_PATHS = []
 
 
 class MozlintParser(ArgumentParser):
     arguments = [
         [['paths'],
@@ -47,24 +47,35 @@ class MozlintParser(ArgumentParser):
                   "mercurial or git."
           }],
         [['-w', '--workdir'],
          {'default': False,
           'action': 'store_true',
           'help': "Lint files touched by changes in the working directory "
                   "(i.e haven't been committed yet). Works with mercurial or git.",
           }],
+        [['extra_args'],
+         {'nargs': REMAINDER,
+          'help': "Extra arguments that will be forwarded to the underlying linter.",
+          }],
     ]
 
     def __init__(self, **kwargs):
         ArgumentParser.__init__(self, usage=self.__doc__, **kwargs)
 
         for cli, args in self.arguments:
             self.add_argument(*cli, **args)
 
+    def parse_known_args(self, *args, **kwargs):
+        # This is here so the eslint mach command doesn't lose 'extra_args'
+        # when using mach's dispatch functionality.
+        args, extra = ArgumentParser.parse_known_args(self, *args, **kwargs)
+        args.extra_args = extra
+        return args, extra
+
 
 def find_linters(linters=None):
     lints = []
     for search_path in SEARCH_PATHS:
         if not os.path.isdir(search_path):
             continue
 
         files = os.listdir(search_path)
--- a/security/sandbox/linux/SandboxFilter.cpp
+++ b/security/sandbox/linux/SandboxFilter.cpp
@@ -536,16 +536,17 @@ public:
     case __NR_chmod:
     case __NR_rename:
     case __NR_symlink:
     case __NR_quotactl:
     case __NR_link:
     case __NR_unlink:
     CASES_FOR_fchown:
     case __NR_fchmod:
+    case __NR_flock:
 #endif
       return Allow();
 
     case __NR_readlink:
     case __NR_readlinkat:
 #ifdef DESKTOP
       // Bug 1290896
       return Allow();
--- a/taskcluster/ci/legacy/tasks/branches/base_jobs.yml
+++ b/taskcluster/ci/legacy/tasks/branches/base_jobs.yml
@@ -267,36 +267,38 @@ builds:
     types:
       opt:
         task: tasks/builds/opt_win64.yml
       debug:
         task: tasks/builds/dbg_win64.yml
 
 # Miscellaneous tasks.
 tasks:
-  eslint-gecko:
-    task: tasks/tests/eslint-gecko.yml
+  mozlint-eslint:
+    task: tasks/tests/mozlint-eslint.yml
     root: true
     when:
       file_patterns:
         # Files that are likely audited.
         - '**/*.js'
         - '**/*.jsm'
         - '**/*.jsx'
         - '**/*.html'
+        - '**/*.xhtml'
         - '**/*.xml'
         # Run when eslint policies change.
         - '**/.eslintignore'
         - '**/*eslintrc*'
         # The plugin implementing custom checks.
         - 'tools/lint/eslint/eslint-plugin-mozilla/**'
         # Other misc lint related files.
+        - 'python/mozlint/**'
         - 'tools/lint/**'
         - 'testing/docker/lint/**'
-  flake8-gecko:
+  mozlint-flake8:
     task: tasks/tests/mozlint-flake8.yml
     root: true
     when:
       file_patterns:
         - '**/*.py'
         - '**/.flake8'
         - 'python/mozlint/**'
         - 'tools/lint/**'
rename from taskcluster/ci/legacy/tasks/tests/eslint-gecko.yml
rename to taskcluster/ci/legacy/tasks/tests/mozlint-eslint.yml
--- a/taskcluster/ci/legacy/tasks/tests/eslint-gecko.yml
+++ b/taskcluster/ci/legacy/tasks/tests/mozlint-eslint.yml
@@ -5,17 +5,17 @@
       build_product: 'lint'
       build_name: 'eslint-gecko'
       build_type: 'opt'
 
 docker-image: lint
 task:
   metadata:
     name: '[TC] - ESLint'
-    description: 'ESLint test'
+    description: 'JavaScript ESLint linter'
 
   payload:
     image:
       type: 'task-image'
       path: 'public/image.tar'
       taskId:
         task-reference: "<docker-image>"
 
@@ -27,21 +27,20 @@ task:
       - -cx
       - >
           cd /home/worker/checkouts/gecko/tools/lint/eslint &&
           /build/tooltool.py fetch -m manifest.tt &&
           tar xvfz eslint.tar.gz &&
           rm eslint.tar.gz &&
           ln -s ../eslint-plugin-mozilla node_modules &&
           cd ../../.. &&
-          tools/lint/eslint/node_modules/.bin/eslint --quiet --plugin html --ext [.js,.jsm,.jsx,.xml,.html,.xhtml] -f tools/lint/eslint-formatter .
-
+          ./mach lint -l eslint -f treeherder --quiet
   extra:
     locations:
-        build: null
-        tests: null
+      build: null
+      tests: null
     treeherder:
-        machine:
-            platform: lint
-        symbol: ES
+      machine:
+        platform: lint
+      symbol: ES
     treeherderEnv:
-        - production
-        - staging
+      - production
+      - staging
--- a/taskcluster/ci/legacy/tasks/tests/mozlint-flake8.yml
+++ b/taskcluster/ci/legacy/tasks/tests/mozlint-flake8.yml
@@ -1,14 +1,14 @@
 ---
 $inherits:
   from: 'tasks/lint.yml'
   variables:
     build_product: 'lint'
-    build_name: 'flake8-gecko'
+    build_name: 'mozlint-flake8'
     build_type: 'opt'
 
 docker-image: lint
 task:
   metadata:
     name: '[TC] - Flake8'
     description: 'Python flake8 linter'
   payload:
--- a/taskcluster/ci/legacy/tasks/windows_build.yml
+++ b/taskcluster/ci/legacy/tasks/windows_build.yml
@@ -1,13 +1,13 @@
 $inherits:
   from: 'tasks/build.yml'
 
 task:
-  workerType: gecko-1-b-win2012
+  workerType: 'gecko-{{level}}-b-win2012'
   payload:
     artifacts:
       -
         type: 'directory'
         path: 'public\build'
         expires:
           relative-datestamp: '1 year'
   extra:
--- a/testing/firefox-ui/harness/firefox_ui_harness/testcases.py
+++ b/testing/firefox-ui/harness/firefox_ui_harness/testcases.py
@@ -1,12 +1,13 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
+import os
 import pprint
 from datetime import datetime
 
 import mozfile
 
 from marionette import MarionetteTestCase
 from marionette_driver import Wait
 
@@ -109,20 +110,22 @@ class UpdateTestCase(FirefoxTestCase):
         # Check if the user has permissions to run the update
         self.assertTrue(self.software_update.allowed,
                         'Current user has permissions to update the application.')
 
     def tearDown(self):
         try:
             self.browser.tabbar.close_all_tabs([self.browser.tabbar.selected_tab])
 
+            # Add content of the update log file for detailed failures when applying an update
+            self.updates[self.current_update_index]['update_log'] = self.read_update_log()
+
             # Print results for now until we have treeherder integration
             output = pprint.pformat(self.updates)
             self.logger.info('Update test results: \n{}'.format(output))
-
         finally:
             super(UpdateTestCase, self).tearDown()
 
             # Ensure that no trace of an partially downloaded update remain
             self.remove_downloaded_update()
 
             self.restore_config_files()
 
@@ -350,21 +353,35 @@ class UpdateTestCase(FirefoxTestCase):
                 dialog.close()
 
             finally:
                 self.updates[self.current_update_index]['patch'] = self.patch_info
 
         # Restart Firefox to apply the update
         self.restart()
 
+    def read_update_log(self):
+        """Read the content of the update log file for the last update attempt."""
+        path = os.path.join(os.path.dirname(self.software_update.staging_directory),
+                            'last-update.log')
+        try:
+            with open(path, 'rb') as f:
+                return f.read().splitlines()
+        except IOError as exc:
+            self.logger.warning(str(exc))
+            return None
+
     def remove_downloaded_update(self):
-        """Remove an already downloaded update from the update staging directory."""
-        self.logger.info('Clean-up update staging directory: {}'.format(
-            self.software_update.staging_directory))
-        mozfile.remove(self.software_update.staging_directory)
+        """Remove an already downloaded update from the update staging directory.
+
+        Hereby not only remove the update subdir but everything below 'updates'.
+        """
+        path = os.path.dirname(self.software_update.staging_directory)
+        self.logger.info('Clean-up update staging directory: {}'.format(path))
+        mozfile.remove(path)
 
     def restore_config_files(self):
         # Reset channel-prefs.js file if modified
         try:
             if self.default_update_channel:
                 path = self.default_update_channel['path']
                 self.logger.info('Restoring channel defaults for: {}'.format(path))
                 with open(path, 'w') as f:
--- a/testing/profiles/prefs_general.js
+++ b/testing/profiles/prefs_general.js
@@ -227,17 +227,17 @@ user_pref("browser.snippets.firstrunHome
 user_pref("general.useragent.updates.enabled", false);
 
 // Disable webapp updates.  Yes, it is supposed to be an integer.
 user_pref("browser.webapps.checkForUpdates", 0);
 
 // Enable debug logging in the tcp presentation server.
 user_pref("dom.presentation.tcp_server.debug", true);
 // Enable debug logging in the presentation core service.
-pref("logging.Presentation", "debug");
+user_pref("logging.Presentation", "debug");
 
 // Don't connect to Yahoo! for RSS feed tests.
 // en-US only uses .types.0.uri, but set all of them just to be sure.
 user_pref('browser.contentHandlers.types.0.uri', 'http://test1.example.org/rss?url=%%s')
 user_pref('browser.contentHandlers.types.1.uri', 'http://test1.example.org/rss?url=%%s')
 user_pref('browser.contentHandlers.types.2.uri', 'http://test1.example.org/rss?url=%%s')
 user_pref('browser.contentHandlers.types.3.uri', 'http://test1.example.org/rss?url=%%s')
 user_pref('browser.contentHandlers.types.4.uri', 'http://test1.example.org/rss?url=%%s')
--- a/toolkit/components/narrate/Narrator.jsm
+++ b/toolkit/components/narrate/Narrator.jsm
@@ -139,18 +139,18 @@ Narrator.prototype = {
     win.dispatchEvent(new win.CustomEvent(eventType,
       { detail: Cu.cloneInto(detail, win.document) }));
   },
 
   _speakInner: function() {
     this._win.speechSynthesis.cancel();
     let tw = this._treeWalker;
     let paragraph = tw.currentNode;
-    if (!paragraph) {
-      tw.currentNode = tw.root;
+    if (paragraph == tw.root) {
+      this._sendTestEvent("paragraphsdone", {});
       return Promise.resolve();
     }
 
     let utterance = new this._win.SpeechSynthesisUtterance(
       paragraph.textContent);
     utterance.rate = this._speechOptions.rate;
     if (this._speechOptions.voice) {
       utterance.voice = this._speechOptions.voice;
@@ -188,17 +188,17 @@ Narrator.prototype = {
         if (this._inTest) {
           this._sendTestEvent("paragraphend", {});
         }
 
         if (this._stopped) {
           // User pressed stopped.
           resolve();
         } else {
-          tw.nextNode();
+          tw.currentNode = tw.nextNode() || tw.root;
           this._speakInner().then(resolve, reject);
         }
       });
 
       utterance.addEventListener("error", () => {
         reject("speech synthesis failed");
       });
 
--- a/toolkit/components/narrate/test/browser_narrate.js
+++ b/toolkit/components/narrate/test/browser_narrate.js
@@ -113,16 +113,22 @@ add_task(function* testNarrate() {
     content.scrollBy(0, -10);
     yield promiseEvent;
     ok(NarrateTestUtils.isVisible(popup), "popup stays visible after scroll");
 
     toggle.click();
     ok(!NarrateTestUtils.isVisible(popup), "popup is dismissed while speaking");
     NarrateTestUtils.isStartedState(content, ok);
 
-    promiseEvent = ContentTaskUtils.waitForEvent(content, "paragraphend");
-    $(NarrateTestUtils.STOP).click();
-    yield promiseEvent;
+    // Go forward all the way to the end of the article. We should eventually
+    // stop.
+    do {
+      promiseEvent = Promise.race([
+        ContentTaskUtils.waitForEvent(content, "paragraphstart"),
+        ContentTaskUtils.waitForEvent(content, "paragraphsdone")]);
+      $(NarrateTestUtils.FORWARD).click();
+    } while ((yield promiseEvent).type == "paragraphstart");
+
     yield ContentTaskUtils.waitForCondition(
       () => !$(NarrateTestUtils.STOP), "transitioned to stopped state");
     NarrateTestUtils.isStoppedState(content, ok);
   });
 });
--- a/toolkit/locales/jar.mn
+++ b/toolkit/locales/jar.mn
@@ -56,26 +56,30 @@
 #endif
   locale/@AB_CD@/global/globalKeys.dtd                  (%chrome/global/globalKeys.dtd)
   locale/@AB_CD@/global/headsUpDisplay.properties       (%chrome/global/headsUpDisplay.properties)
   locale/@AB_CD@/global/intl.css                        (%chrome/global/intl.css)
   locale/@AB_CD@/global/intl.properties                 (%chrome/global/intl.properties)
   locale/@AB_CD@/global/keys.properties                 (%chrome/global/keys.properties)
   locale/@AB_CD@/global/languageNames.properties        (%chrome/global/languageNames.properties)
   locale/@AB_CD@/global/mozilla.dtd                     (%chrome/global/mozilla.dtd)
+#ifndef MOZ_FENNEC
   locale/@AB_CD@/global/narrate.properties              (%chrome/global/narrate.properties)
+#endif
   locale/@AB_CD@/global/notification.dtd                (%chrome/global/notification.dtd)
   locale/@AB_CD@/global/preferences.dtd                 (%chrome/global/preferences.dtd)
+#ifndef MOZ_FENNEC
   locale/@AB_CD@/global/printdialog.dtd                 (%chrome/global/printdialog.dtd)
   locale/@AB_CD@/global/printjoboptions.dtd             (%chrome/global/printjoboptions.dtd)
   locale/@AB_CD@/global/printPageSetup.dtd              (%chrome/global/printPageSetup.dtd)
   locale/@AB_CD@/global/printPreview.dtd                (%chrome/global/printPreview.dtd)
   locale/@AB_CD@/global/printPreviewProgress.dtd        (%chrome/global/printPreviewProgress.dtd)
   locale/@AB_CD@/global/printdialog.properties          (%chrome/global/printdialog.properties)
   locale/@AB_CD@/global/printProgress.dtd               (%chrome/global/printProgress.dtd)
+#endif
   locale/@AB_CD@/global/regionNames.properties          (%chrome/global/regionNames.properties)
   locale/@AB_CD@/global/resetProfile.dtd                (%chrome/global/resetProfile.dtd)
   locale/@AB_CD@/global/resetProfile.properties         (%chrome/global/resetProfile.properties)
   locale/@AB_CD@/global/dialog.properties               (%chrome/global/dialog.properties)
   locale/@AB_CD@/global/tree.dtd                        (%chrome/global/tree.dtd)
   locale/@AB_CD@/global/textcontext.dtd                 (%chrome/global/textcontext.dtd)
   locale/@AB_CD@/global/videocontrols.dtd               (%chrome/global/videocontrols.dtd)
   locale/@AB_CD@/global/viewSource.dtd                  (%chrome/global/viewSource.dtd)
--- a/toolkit/modules/FinderHighlighter.jsm
+++ b/toolkit/modules/FinderHighlighter.jsm
@@ -590,41 +590,46 @@ FinderHighlighter.prototype = {
   },
 
   /**
    * Utility; returns the bounds of the page relative to the viewport.
    * If the pages is part of a frameset or inside an iframe of any kind, its
    * offset is accounted for.
    * Geometry.jsm takes care of the DOMRect calculations.
    *
-   * @param  {nsIDOMWindow} window
+   * @param  {nsIDOMWindow} window          Window to read the boundary rect from
+   * @param  {Boolean}      [includeScroll] Whether to ignore the scroll offset,
+   *                                        which is useful for comparing DOMRects.
+   *                                        Optional, defaults to `true`
    * @return {Rect}
    */
-  _getRootBounds(window) {
+  _getRootBounds(window, includeScroll = true) {
     let dwu = this._getDWU(window);
     let cssPageRect = Rect.fromRect(dwu.getRootBounds());
-
     let scrollX = {};
     let scrollY = {};
-    dwu.getScrollXY(false, scrollX, scrollY);
-    cssPageRect.translate(scrollX.value, scrollY.value);
+    if (includeScroll) {
+      dwu.getScrollXY(false, scrollX, scrollY);
+      cssPageRect.translate(scrollX.value, scrollY.value);
+    }
 
     // If we're in a frame, update the position of the rect (top/ left).
     let currWin = window;
     while (currWin != window.top) {
       // Since the frame is an element inside a parent window, we'd like to
       // learn its position relative to it.
       let el = this._getDWU(currWin).containerElement;
       currWin = window.parent;
       dwu = this._getDWU(currWin);
       let parentRect = Rect.fromRect(dwu.getBoundsWithoutFlushing(el));
 
-      // Always take the scroll position into account.
-      dwu.getScrollXY(false, scrollX, scrollY);
-      parentRect.translate(scrollX.value, scrollY.value);
+      if (includeScroll) {
+        dwu.getScrollXY(false, scrollX, scrollY);
+        parentRect.translate(scrollX.value, scrollY.value);
+      }
 
       cssPageRect.translate(parentRect.left, parentRect.top);
     }
 
     return cssPageRect;
   },
 
   /**
@@ -779,27 +784,28 @@ FinderHighlighter.prototype = {
       bounds = dict.frames.get(window);
       if (!bounds) {
         bounds = this._getRootBounds(window);
         dict.frames.set(window, bounds);
       }
     } else
       bounds = this._getRootBounds(window);
 
+    let topBounds = this._getRootBounds(window.top, false);
     let rects = new Set();
     // A range may consist of multiple rectangles, we can also do these kind of
     // precise cut-outs. range.getBoundingClientRect() returns the fully
     // encompassing rectangle, which is too much for our purpose here.
-    for (let dims of range.getClientRects()) {
-      rects.add({
-        height: dims.bottom - dims.top,
-        width: dims.right - dims.left,
-        y: dims.top + bounds.top,
-        x: dims.left + bounds.left
-      });
+    for (let rect of range.getClientRects()) {
+      rect = Rect.fromRect(rect);
+      rect.x += bounds.x;
+      rect.y += bounds.y;
+      // If the rect is not even visible from the top document, we can ignore it.
+      if (rect.intersects(topBounds))
+        rects.add(rect);
     }
 
     dict = dict || this.getForWindow(window.top);
     dict.modalHighlightRectsMap.set(range, rects);
     if (checkIfDynamic && this._isInDynamicContainer(range))
       dict.dynamicRangesSet.add(range);
     return rects;
   },
@@ -889,16 +895,18 @@ FinderHighlighter.prototype = {
   _maybeCreateModalHighlightNodes(window) {
     window = window.top;
     let dict = this.getForWindow(window);
     if (dict.modalHighlightOutline) {
       if (!dict.modalHighlightAllMask) {
         // Make sure to at least show the dimmed background.
         this._repaintHighlightAllMask(window, false);
         this._scheduleRepaintOfMask(window);
+      } else {
+        this._scheduleRepaintOfMask(window, { scrollOnly: true });
       }
       return;
     }
 
     let document = window.document;
     // A hidden document doesn't accept insertAnonymousContent calls yet.
     if (document.hidden) {
       let onVisibilityChange = () => {
@@ -1044,55 +1052,54 @@ FinderHighlighter.prototype = {
       dict.updateAllRanges = updateAllRanges;
 
     if (dict.modalRepaintScheduler)
       return;
 
     dict.modalRepaintScheduler = window.setTimeout(() => {
       dict.modalRepaintScheduler = null;
 
-      if (dict.unconditionalRepaintRequested) {
+      let { width: previousWidth, height: previousHeight } = dict.lastWindowDimensions;
+      let { width, height } = dict.lastWindowDimensions = this._getWindowDimensions(window);
+      let pageContentChanged = (Math.abs(previousWidth - width) > kContentChangeThresholdPx ||
+                                Math.abs(previousHeight - height) > kContentChangeThresholdPx);
+      // When the page has changed significantly enough in size, we'll restart
+      // the iterator with the same parameters as before to find us new ranges.
+      if (pageContentChanged)
+        this.iterator.restart(this.finder);
+
+      if (dict.unconditionalRepaintRequested ||
+          (dict.modalHighlightRectsMap.size && pageContentChanged)) {
         dict.unconditionalRepaintRequested = false;
         this._repaintHighlightAllMask(window);
-        return;
       }
-
-      let { width, height } = this._getWindowDimensions(window);
-      if (!dict.modalHighlightRectsMap.size ||
-          (Math.abs(dict.lastWindowDimensions.width - width) < kContentChangeThresholdPx &&
-           Math.abs(dict.lastWindowDimensions.height - height) < kContentChangeThresholdPx)) {
-        return;
-      }
-
-      this.iterator.restart(this.finder);
-      dict.lastWindowDimensions = { width, height };
-      this._repaintHighlightAllMask(window);
     }, kModalHighlightRepaintFreqMs);
   },
 
   /**
    * The outline that shows/ highlights the current found range is styled and
    * animated using CSS. This style can be found in `kModalStyle`, but to have it
    * applied on any DOM node we insert using the AnonymousContent API we need to
    * inject an agent sheet into the document.
    *
    * @param {nsIDOMWindow} window
    */
   _maybeInstallStyleSheet(window) {
     window = window.top;
     let dict = this.getForWindow(window);
-    if (dict.installedSheet)
+    let document = window.document;
+    if (dict.installedSheet == document)
       return;
 
     let dwu = this._getDWU(window);
     let uri = this.modalStyleSheetURI;
     try {
       dwu.loadSheetUsingURIString(uri, dwu.AGENT_SHEET);
     } catch (e) {}
-    dict.installedSheet = true;
+    dict.installedSheet = document;
   },
 
   /**
    * Add event listeners to the content which will cause the modal highlight
    * AnonymousContent to be re-painted or hidden.
    *
    * @param {nsIDOMWindow} window
    */
--- a/tools/lint/docs/index.rst
+++ b/tools/lint/docs/index.rst
@@ -21,16 +21,17 @@ 2. It provides a streamlined interface f
 like mach, mozreview and taskcluster.
 
 .. toctree::
   :caption: Linting User Guide
   :maxdepth: 2
 
   usage
   create
+  linters/eslint
   linters/flake8
 
 Indices and tables
 ==================
 
 * :ref:`genindex`
 * :ref:`modindex`
 * :ref:`search`
rename from tools/lint/eslint/eslint-plugin-mozilla/docs/index.rst
rename to tools/lint/docs/linters/eslint-plugin-mozilla.rst
--- a/tools/lint/eslint/eslint-plugin-mozilla/docs/index.rst
+++ b/tools/lint/docs/linters/eslint-plugin-mozilla.rst
@@ -1,56 +1,157 @@
-.. _index:
-
 =====================
 Mozilla ESLint Plugin
 =====================
 
-``balanced-listeners`` checks that every addEventListener has a
-removeEventListener (and does the same for on/off).
+
+balanced-listeners
+------------------
+
+Checks that for every occurence of 'addEventListener' or 'on' there is an
+occurence of 'removeEventListener' or 'off' with the same event name.
+
+
+components-imports
+------------------
 
-``components-imports`` adds the filename of imported files e.g.
-``Cu.import("some/path/Blah.jsm")`` adds Blah to the global scope.
+Checks the filename of imported files e.g. ``Cu.import("some/path/Blah.jsm")``
+adds Blah to the global scope.
+
+
+import-browserjs-globals
+------------------------
+
+When included files from the main browser UI scripts will be loaded and any
+declared globals will be defined for the current file. This is mostly useful for
+browser-chrome mochitests that call browser functions.
+
 
-``import-globals-from`` When the "import-globals-from <path>" comment is found
-in a file, then all globals from the file at <path> will be imported in the
-current scope.
+import-globals-from
+-------------------
+
+Parses a file for globals defined in various unique Mozilla ways.
+
+When a "import-globals-from <path>" comment is found in a file, then all globals
+from the file at <path> will be imported in the current scope. This will also
+operate recursively.
+
+This is useful for scripts that are loaded as <script> tag in a window and rely
+on each other's globals.
 
-``import-headjs-globals`` imports globals from head.js and from any files that
-should be imported by head.js (as far as we can correctly resolve the path).
+If <path> is a relative path, then it must be relative to the file being
+checked by the rule.
+
 
-``mark-test-function-used`` simply marks test (the test method) as used. This
-avoids ESLint telling us that the function is never called.
+import-headjs-globals
+---------------------
+
+Import globals from head.js and from any files that were imported by
+head.js (as far as we can correctly resolve the path).
+
+The following file import patterns are supported:
 
-``no-aArgs`` prevents using the hungarian notation in function arguments.
+-  ``Services.scriptloader.loadSubScript(path)``
+-  ``loader.loadSubScript(path)``
+-  ``loadSubScript(path)``
+-  ``loadHelperScript(path)``
+-  ``import-globals-from path``
+
+If path does not exist because it is generated e.g.
+``testdir + "/somefile.js"`` we do our best to resolve it.
+
+The following patterns are supported:
 
-``no-cpows-in-tests`` checks if the file is a browser mochitest and,
-if so, checks for possible CPOW usage.
+-  ``Cu.import("resource://devtools/client/shared/widgets/ViewHelpers.jsm");``
+-  ``loader.lazyImporter(this, "name1");``
+-  ``loader.lazyRequireGetter(this, "name2"``
+-  ``loader.lazyServiceGetter(this, "name3"``
+-  ``XPCOMUtils.defineLazyModuleGetter(this, "setNamedTimeout", ...)``
+-  ``loader.lazyGetter(this, "toolboxStrings"``
+-  ``XPCOMUtils.defineLazyGetter(this, "clipboardHelper"``
 
-``no-single-arg-cu-import`` rejects calls to "Cu.import" that do not supply a
-second argument (meaning they add the exported properties into global scope).
+
+mark-test-function-used
+-----------------------
 
-``reject-importGlobalProperties`` rejects calls to
-"Cu.importGlobalProperties".  Use of this function is undesirable in
-some parts of the tree.
+Simply marks test (the test method) as used. This avoids ESLint telling
+us that the function is never called.
+
+
+no-aArgs
+--------
+
+Checks that function argument names don't start with lowercase 'a' followed by
+a capital letter. This is to prevent the use of Hungarian notation whereby the
+first letter is a prefix that indicates the type or intended use of a variable.
+
 
-``reject-some-requires`` rejects some calls to ``require``, according
-to a regexp passed in as an option.
+no-cpows-in-tests
+-----------------
+
+This rule checks if the file is a browser mochitest and, if so, checks for
+possible CPOW usage by checking for the following strings:
 
-``this-top-level-scope`` treats top-level assignments like
-``this.mumble = value`` as declaring a global.
+- "gBrowser.contentWindow"
+- "gBrowser.contentDocument"
+- "gBrowser.selectedBrowser.contentWindow"
+- "browser.contentDocument"
+- "window.content"
+- "content"
+- "content."
 
 Note: These are string matches so we will miss situations where the parent
 object is assigned to another variable e.g.::
 
    var b = gBrowser;
    b.content // Would not be detected as a CPOW.
 
-``var-only-at-top-level`` marks all var declarations that are not at the top
-level invalid.
+
+no-single-arg-cu-import
+-----------------------
+
+Rejects calls to "Cu.import" that do not supply a second argument (meaning they
+add the exported properties into global scope).
+
+
+reject-importGlobalProperties
+-----------------------------
+
+Rejects calls to ``Cu.importGlobalProperties``.  Use of this function is
+undesirable in some parts of the tree.
+
+
+reject-some-requires
+--------------------
+
+This takes an option, a regular expression.  Invocations of
+``require`` with a string literal argument are matched against this
+regexp; and if it matches, the ``require`` use is flagged.
+
+
+this-top-level-scope
+--------------------
+
+Treats top-level assignments like ``this.mumble = value`` as declaring a global.
+
+Note: These are string matches so we will miss situations where the parent
+object is assigned to another variable e.g.::
+
+   var b = gBrowser;
+   b.content // Would not be detected as a CPOW.
+
+
+var-only-at-top-level
+---------------------
+
+Marks all var declarations that are not at the top level invalid.
+
+
+Example
+=======
 
 +-------+-----------------------+
 | Possible values for all rules |
 +-------+-----------------------+
 | Value | Meaning               |
 +-------+-----------------------+
 | 0     | Deactivated           |
 +-------+-----------------------+
@@ -65,23 +166,8 @@ Example configuration::
      "mozilla/balanced-listeners": 2,
      "mozilla/components-imports": 1,
      "mozilla/import-globals-from": 1,
      "mozilla/import-headjs-globals": 1,
      "mozilla/mark-test-function-used": 1,
      "mozilla/var-only-at-top-level": 1,
      "mozilla/no-cpows-in-tests": 1,
    }
-
-.. toctree::
-   :maxdepth: 1
-
-   balanced-listeners
-   import-browserjs-globals
-   import-globals
-   import-headjs-globals
-   mark-test-function-used
-   no-aArgs
-   no-cpows-in-tests
-   no-single-arg-cu-import
-   reject-importGlobalProperties
-   reject-some-requires
-   var-only-at-top-level
new file mode 100644
--- /dev/null
+++ b/tools/lint/docs/linters/eslint.rst
@@ -0,0 +1,45 @@
+ESLint
+======
+
+`ESLint`_ is a popular linter for JavaScript.
+
+Run Locally
+-----------
+
+The mozlint integration of `ESLint`_ can be run using mach:
+
+.. parsed-literal::
+
+    $ mach lint --linter eslint <file paths>
+
+Alternatively, omit the ``--linter eslint`` and run all configured linters, which will include
+ESLint.
+
+
+Configuration
+-------------
+
+The `ESLint`_ mozilla-central integration uses a blacklist to exclude certain directories from being
+linted. This lives in ``topsrcdir/.eslintignore``. If you don't wish your directory to be linted, it
+must be added here.
+
+The global configuration file lives in ``topsrcdir/.eslintrc``. This global configuration can be
+overridden by including an ``.eslintrc`` in the appropriate subdirectory. For an overview of the
+supported configuration, see `ESLint's documentation`_.
+
+
+ESLint Plugin Mozilla
+---------------------
+
+In addition to default ESLint rules, there are several Mozilla-specific rules that are defined in
+the :doc:`Mozilla ESLint Plugin <eslint-plugin-mozilla>`.
+
+
+.. _ESLint: http://eslint.org/
+.. _ESLint's documentation: http://eslint.org/docs/user-guide/configuring
+
+
+.. toctree::
+   :hidden:
+
+   eslint-plugin-mozilla
deleted file mode 100644
--- a/tools/lint/eslint-formatter.js
+++ /dev/null
@@ -1,23 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-"use strict";
-
-const path = require("path")
-
-module.exports = function(results) {
-  for (let file of results) {
-    let filePath = path.relative(".", file.filePath);
-    for (let message of file.messages) {
-      let status = message.message;
-
-      if ("ruleId" in message) {
-        status = `${status} (${message.ruleId})`;
-      }
-
-      let severity = message.severity == 1 ? "TEST-UNEXPECTED-WARNING"
-                                           : "TEST-UNEXPECTED-ERROR";
-      console.log(`${severity} | ${filePath}:${message.line}:${message.column} | ${status}`);
-    }
-  }
-};
new file mode 100644
--- /dev/null
+++ b/tools/lint/eslint.lint
@@ -0,0 +1,349 @@
+# -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import os
+import platform
+import re
+import signal
+import subprocess
+import sys
+from distutils.version import LooseVersion
+
+import which
+from mozprocess import ProcessHandler
+
+from mozlint import result
+
+
+ESLINT_NOT_FOUND_MESSAGE = """
+Could not find eslint!  We looked at the --binary option, at the ESLINT
+environment variable, and then at your local node_modules path. Please Install
+eslint and needed plugins with:
+
+mach eslint --setup
+
+and try again.
+""".strip()
+
+NODE_NOT_FOUND_MESSAGE = """
+nodejs v4.2.3 is either not installed or is installed to a non-standard path.
+Please install nodejs from https://nodejs.org and try again.
+
+Valid installation paths:
+""".strip()
+
+NPM_NOT_FOUND_MESSAGE = """
+Node Package Manager (npm) is either not installed or installed to a
+non-standard path. Please install npm from https://nodejs.org (it comes as an
+option in the node installation) and try again.
+
+Valid installation paths:
+""".strip()
+
+
+VERSION_RE = re.compile(r"^\d+\.\d+\.\d+$")
+CARET_VERSION_RANGE_RE = re.compile(r"^\^((\d+)\.\d+\.\d+)$")
+
+EXTENSIONS = ['.js', '.jsm', '.jsx', '.xml', '.html', '.xhtml']
+
+project_root = None
+
+
+def eslint_setup():
+    """Ensure eslint is optimally configured.
+
+    This command will inspect your eslint configuration and
+    guide you through an interactive wizard helping you configure
+    eslint for optimal use on Mozilla projects.
+    """
+    orig_cwd = os.getcwd()
+    sys.path.append(os.path.dirname(__file__))
+
+    module_path = get_eslint_module_path()
+
+    # npm sometimes fails to respect cwd when it is run using check_call so
+    # we manually switch folders here instead.
+    os.chdir(module_path)
+
+    npm_path = get_node_or_npm_path("npm")
+    if not npm_path:
+        return 1
+
+    # Install ESLint and external plugins
+    cmd = [npm_path, "install"]
+    print("Installing eslint for mach using \"%s\"..." % (" ".join(cmd)))
+    if not call_process("eslint", cmd):
+        return 1
+
+    # Install in-tree ESLint plugin
+    cmd = [npm_path, "install",
+           os.path.join(module_path, "eslint-plugin-mozilla")]
+    print("Installing eslint-plugin-mozilla using \"%s\"..." % (" ".join(cmd)))
+    if not call_process("eslint-plugin-mozilla", cmd):
+        return 1
+
+    eslint_path = os.path.join(module_path, "node_modules", ".bin", "eslint")
+
+    print("\nESLint and approved plugins installed successfully!")
+    print("\nNOTE: Your local eslint binary is at %s\n" % eslint_path)
+
+    os.chdir(orig_cwd)
+
+
+def call_process(name, cmd, cwd=None):
+    try:
+        with open(os.devnull, "w") as fnull:
+            subprocess.check_call(cmd, cwd=cwd, stdout=fnull)
+    except subprocess.CalledProcessError:
+        if cwd:
+            print("\nError installing %s in the %s folder, aborting." % (name, cwd))
+        else:
+            print("\nError installing %s, aborting." % name)
+
+        return False
+
+    return True
+
+
+def expected_eslint_modules():
+    # Read the expected version of ESLint and external modules
+    expected_modules_path = os.path.join(get_eslint_module_path(), "package.json")
+    with open(expected_modules_path, "r") as f:
+        expected_modules = json.load(f)["dependencies"]
+
+    # Also read the in-tree ESLint plugin version
+    mozilla_json_path = os.path.join(get_eslint_module_path(),
+                                     "eslint-plugin-mozilla", "package.json")
+    with open(mozilla_json_path, "r") as f:
+        expected_modules["eslint-plugin-mozilla"] = json.load(f)["version"]
+
+    return expected_modules
+
+
+def eslint_module_has_issues():
+    has_issues = False
+    node_modules_path = os.path.join(get_eslint_module_path(), "node_modules")
+
+    for name, version_range in expected_eslint_modules().iteritems():
+        path = os.path.join(node_modules_path, name, "package.json")
+
+        if not os.path.exists(path):
+            print("%s v%s needs to be installed locally." % (name, version_range))
+            has_issues = True
+            continue
+
+        data = json.load(open(path))
+
+        if not version_in_range(data["version"], version_range):
+            print("%s v%s should be v%s." % (name, data["version"], version_range))
+            has_issues = True
+
+    return has_issues
+
+
+def version_in_range(version, version_range):
+    """
+    Check if a module version is inside a version range.  Only supports explicit versions and
+    caret ranges for the moment, since that's all we've used so far.
+    """
+    if version == version_range:
+        return True
+
+    version_match = VERSION_RE.match(version)
+    if not version_match:
+        raise RuntimeError("mach eslint doesn't understand module version %s" % version)
+    version = LooseVersion(version)
+
+    # Caret ranges as specified by npm allow changes that do not modify the left-most non-zero
+    # digit in the [major, minor, patch] tuple.  The code below assumes the major digit is
+    # non-zero.
+    range_match = CARET_VERSION_RANGE_RE.match(version_range)
+    if range_match:
+        range_version = range_match.group(1)
+        range_major = int(range_match.group(2))
+
+        range_min = LooseVersion(range_version)
+        range_max = LooseVersion("%d.0.0" % (range_major + 1))
+
+        return range_min <= version < range_max
+
+    return False
+
+
+def get_possible_node_paths_win():
+    """
+    Return possible nodejs paths on Windows.
+    """
+    if platform.system() != "Windows":
+        return []
+
+    return list({
+        "%s\\nodejs" % os.environ.get("SystemDrive"),
+        os.path.join(os.environ.get("ProgramFiles"), "nodejs"),
+        os.path.join(os.environ.get("PROGRAMW6432"), "nodejs"),
+        os.path.join(os.environ.get("PROGRAMFILES"), "nodejs")
+    })
+
+
+def get_node_or_npm_path(filename, minversion=None):
+    """
+    Return the nodejs or npm path.
+    """
+    if platform.system() == "Windows":
+        for ext in [".cmd", ".exe", ""]:
+            try:
+                node_or_npm_path = which.which(filename + ext,
+                                               path=get_possible_node_paths_win())
+                if is_valid(node_or_npm_path, minversion):
+                    return node_or_npm_path
+            except which.WhichError:
+                pass
+    else:
+        try:
+            node_or_npm_path = which.which(filename)
+            if is_valid(node_or_npm_path, minversion):
+                return node_or_npm_path
+        except which.WhichError:
+            pass
+
+    if filename == "node":
+        print(NODE_NOT_FOUND_MESSAGE)
+    elif filename == "npm":
+        print(NPM_NOT_FOUND_MESSAGE)
+
+    if platform.system() == "Windows":
+        app_paths = get_possible_node_paths_win()
+
+        for p in app_paths:
+            print("  - %s" % p)
+    elif platform.system() == "Darwin":
+        print("  - /usr/local/bin/node")
+    elif platform.system() == "Linux":
+        print("  - /usr/bin/nodejs")
+
+    return None
+
+
+def is_valid(path, minversion=None):
+    try:
+        version_str = subprocess.check_output([path, "--version"],
+                                              stderr=subprocess.STDOUT)
+        if minversion:
+            # nodejs prefixes its version strings with "v"
+            version = LooseVersion(version_str.lstrip('v'))
+            return version >= minversion
+        return True
+    except (subprocess.CalledProcessError, OSError):
+        return False
+
+
+def get_project_root():
+    global project_root
+    return project_root
+
+
+def get_eslint_module_path():
+    return os.path.join(get_project_root(), "tools", "lint", "eslint")
+
+
+def lint(paths, binary=None, fix=None, setup=None, **lintargs):
+    """Run eslint."""
+    global project_root
+    project_root = lintargs['root']
+
+    module_path = get_eslint_module_path()
+
+    # eslint requires at least node 4.2.3
+    node_path = get_node_or_npm_path("node", LooseVersion("4.2.3"))
+    if not node_path:
+        return 1
+
+    if setup:
+        return eslint_setup()
+
+    npm_path = get_node_or_npm_path("npm")
+    if not npm_path:
+        return 1
+
+    if eslint_module_has_issues():
+        eslint_setup()
+
+    # Valid binaries are:
+    #  - Any provided by the binary argument.
+    #  - Any pointed at by the ESLINT environmental variable.
+    #  - Those provided by mach eslint --setup.
+    #
+    #  eslint --setup installs some mozilla specific plugins and installs
+    #  all node modules locally. This is the preferred method of
+    #  installation.
+
+    if not binary:
+        binary = os.environ.get('ESLINT', None)
+
+        if not binary:
+            binary = os.path.join(module_path, "node_modules", ".bin", "eslint")
+            if not os.path.isfile(binary):
+                binary = None
+
+    if not binary:
+        print(ESLINT_NOT_FOUND_MESSAGE)
+        return 1
+
+    extra_args = lintargs.get('extra_args') or []
+    cmd_args = [binary,
+                # Enable the HTML plugin.
+                # We can't currently enable this in the global config file
+                # because it has bad interactions with the SublimeText
+                # ESLint plugin (bug 1229874).
+                '--plugin', 'html',
+                # This keeps ext as a single argument.
+                '--ext', '[{}]'.format(','.join(EXTENSIONS)),
+                '--format', 'json',
+                ] + extra_args + paths
+
+    # eslint requires that --fix be set before the --ext argument.
+    if fix:
+        cmd_args.insert(1, '--fix')
+
+    orig = signal.signal(signal.SIGINT, signal.SIG_IGN)
+    proc = ProcessHandler(cmd_args, env=os.environ, stream=None)
+    proc.run()
+    signal.signal(signal.SIGINT, orig)
+
+    try:
+        proc.wait()
+    except KeyboardInterrupt:
+        proc.kill()
+        return []
+
+    results = []
+    for obj in json.loads(proc.output[0] or '[]'):
+        errors = obj['messages']
+
+        for err in errors:
+            err.update({
+                'hint': err.get('fix'),
+                'level': 'error' if err['severity'] == 2 else 'warning',
+                'lineno': err.get('line'),
+                'path': obj['filePath'],
+                'rule': err.get('ruleId'),
+            })
+            results.append(result.from_linter(LINTER, **err))
+
+    return results
+
+
+LINTER = {
+    'name': "eslint",
+    'description': "JavaScript linter",
+    # ESLint infra handles its own path filtering, so just include cwd
+    'include': ['.'],
+    'exclude': [],
+    'extensions': EXTENSIONS,
+    'type': 'external',
+    'payload': lint,
+}
deleted file mode 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/docs/balanced-listeners.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-.. _balanced-listeners:
-
-==================
-balanced-listeners
-==================
-
-Rule Details
-------------
-
-Checks that for every occurences of 'addEventListener' or 'on' there is an
-occurence of 'removeEventListener' or 'off' with the same event name.
deleted file mode 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/docs/import-browserjs-globals.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-.. _import-browserjs-globals:
-
-========================
-import-browserjs-globals
-========================
-
-Rule Details
-------------
-
-When included files from the main browser UI scripts will be loaded and any
-declared globals will be defined for the current file. This is mostly useful for
-browser-chrome mochitests that call browser functions.
deleted file mode 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/docs/import-globals.rst
+++ /dev/null
@@ -1,20 +0,0 @@
-.. _import-globals:
-
-==============
-import-globals
-==============
-
-Rule Details
-------------
-
-Parses a file for globals defined in various unique Mozilla ways.
-
-When a "import-globals-from <path>" comment is found in a file, then all globals
-from the file at <path> will be imported in the current scope. This will also
-operate recursively.
-
-This is useful for scripts that are loaded as <script> tag in a window and rely
-on each other's globals.
-
-If <path> is a relative path, then it must be relative to the file being
-checked by the rule.
deleted file mode 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/docs/import-headjs-globals.rst
+++ /dev/null
@@ -1,32 +0,0 @@
-.. _import-headjs-globals:
-
-=====================
-import-headjs-globals
-=====================
-
-Rule Details
-------------
-
-Import globals from head.js and from any files that were imported by
-head.js (as far as we can correctly resolve the path).
-
-The following file import patterns are supported:
-
--  ``Services.scriptloader.loadSubScript(path)``
--  ``loader.loadSubScript(path)``
--  ``loadSubScript(path)``
--  ``loadHelperScript(path)``
--  ``import-globals-from path``
-
-If path does not exist because it is generated e.g.
-``testdir + "/somefile.js"`` we do our best to resolve it.
-
-The following patterns are supported:
-
--  ``Cu.import("resource://devtools/client/shared/widgets/ViewHelpers.jsm");``
--  ``loader.lazyImporter(this, "name1");``
--  ``loader.lazyRequireGetter(this, "name2"``
--  ``loader.lazyServiceGetter(this, "name3"``
--  ``XPCOMUtils.defineLazyModuleGetter(this, "setNamedTimeout", ...)``
--  ``loader.lazyGetter(this, "toolboxStrings"``
--  ``XPCOMUtils.defineLazyGetter(this, "clipboardHelper"``
deleted file mode 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/docs/mark-test-function-used.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-.. _mark-test-function-used:
-
-=======================
-mark-test-function-used
-=======================
-
-Rule Details
-------------
-
-Simply marks test (the test method) as used. This avoids ESLint telling
-us that the function is never called.
deleted file mode 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/docs/no-aArgs.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-.. _no-aArgs:
-
-========
-no-aArgs
-========
-
-Rule Details
-------------
-
-Checks that function argument names don't start with lowercase 'a' followed by a
-capital letter. This is to prevent the use of Hungarian notation whereby the
-first letter is a prefix that indicates the type or intended use of a variable.
deleted file mode 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/docs/no-cpows-in-tests.rst
+++ /dev/null
@@ -1,25 +0,0 @@
-.. _no-cpows-in-tests:
-
-=================
-no-cpows-in-tests
-=================
-
-Rule Details
-------------
-
-This rule checks if the file is a browser mochitest and, if so, checks for
-possible CPOW usage by checking for the following strings:
-
-- "gBrowser.contentWindow"
-- "gBrowser.contentDocument"
-- "gBrowser.selectedBrowser.contentWindow"
-- "browser.contentDocument"
-- "window.content"
-- "content"
-- "content."
-
-Note: These are string matches so we will miss situations where the parent
-object is assigned to another variable e.g.::
-
-   var b = gBrowser;
-   b.content // Would not be detected as a CPOW.
deleted file mode 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/docs/no-single-arg-cu-import.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-.. _no-single-arg-cu-import:
-
-=======================
-no-single-arg-cu-import
-=======================
-
-Rule Details
-------------
-
-Rejects calls to "Cu.import" that do not supply a second argument (meaning they
-add the exported properties into global scope).
deleted file mode 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/docs/reject-importGlobalProperties.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-.. _reject-importGlobalProperties:
-
-=============================
-reject-importGlobalProperties
-=============================
-
-Rule Details
-------------
-
-Reject calls to Cu.importGlobalProperties.
deleted file mode 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/docs/reject-some-requires.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-.. _reject-some-requires:
-
-====================
-reject-some-requires
-====================
-
-Rule Details
-------------
-
-This takes an option, a regular expression.  Invocations of
-``require`` with a string literal argument are matched against this
-regexp; and if it matches, the ``require`` use is flagged.
deleted file mode 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/docs/var-only-at-top-level.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-.. _var-only-at-top-level:
-
-=======================
-var-only-at-top-level
-=======================
-
-Rule Details
-------------
-
-Marks all var declarations that are not at the top level invalid.
deleted file mode 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/moz.build
+++ /dev/null
@@ -1,7 +0,0 @@
-# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
-# vim: set filetype=python:
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-SPHINX_TREES['eslint-plugin-mozilla'] = 'docs'
--- a/tools/lint/mach_commands.py
+++ b/tools/lint/mach_commands.py
@@ -1,69 +1,32 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import argparse
-import json
-import logging
 import os
-import platform
-import re
-import subprocess
-import sys
-import which
-from distutils.version import LooseVersion
 
 from mozbuild.base import (
     MachCommandBase,
 )
 
 
 from mach.decorators import (
     CommandArgument,
     CommandProvider,
     Command,
 )
 
 
 here = os.path.abspath(os.path.dirname(__file__))
 
 
-ESLINT_NOT_FOUND_MESSAGE = '''
-Could not find eslint!  We looked at the --binary option, at the ESLINT
-environment variable, and then at your local node_modules path. Please Install
-eslint and needed plugins with:
-
-mach eslint --setup
-
-and try again.
-'''.strip()
-
-NODE_NOT_FOUND_MESSAGE = '''
-nodejs v4.2.3 is either not installed or is installed to a non-standard path.
-Please install nodejs from https://nodejs.org and try again.
-
-Valid installation paths:
-'''.strip()
-
-NPM_NOT_FOUND_MESSAGE = '''
-Node Package Manager (npm) is either not installed or installed to a
-non-standard path. Please install npm from https://nodejs.org (it comes as an
-option in the node installation) and try again.
-
-Valid installation paths:
-'''.strip()
-
-VERSION_RE = re.compile(r"^\d+\.\d+\.\d+$")
-CARET_VERSION_RANGE_RE = re.compile(r"^\^((\d+)\.\d+\.\d+)$")
-
-
 def setup_argument_parser():
     from mozlint import cli
     return cli.MozlintParser()
 
 
 @CommandProvider
 class MachCommands(MachCommandBase):
 
@@ -75,300 +38,23 @@ class MachCommands(MachCommandBase):
         """Run linters."""
         from mozlint import cli
         lintargs['exclude'] = ['obj*']
         cli.SEARCH_PATHS.append(here)
         return cli.run(*runargs, **lintargs)
 
     @Command('eslint', category='devenv',
              description='Run eslint or help configure eslint for optimal development.')
+    @CommandArgument('paths', default=None, nargs='*',
+                     help="Paths to file or directories to lint, like "
+                          "'browser/components/loop' Defaults to the "
+                          "current directory if not given.")
     @CommandArgument('-s', '--setup', default=False, action='store_true',
                      help='Configure eslint for optimal development.')
-    @CommandArgument('-e', '--ext', default='[.js,.jsm,.jsx,.xml,.html,.xhtml]',
-                     help='Filename extensions to lint, default: ' +
-                     '"[.js,.jsm,.jsx,.xml,.html,.xhtml]".')
     @CommandArgument('-b', '--binary', default=None,
                      help='Path to eslint binary.')
     @CommandArgument('--fix', default=False, action='store_true',
                      help='Request that eslint automatically fix errors, where possible.')
-    @CommandArgument('args', nargs=argparse.REMAINDER)  # Passed through to eslint.
-    def eslint(self, setup, ext=None, binary=None, fix=False, args=None):
-        '''Run eslint.'''
-
-        module_path = self.get_eslint_module_path()
-
-        # eslint requires at least node 4.2.3
-        nodePath = self.get_node_or_npm_path("node", LooseVersion("4.2.3"))
-        if not nodePath:
-            return 1
-
-        if setup:
-            return self.eslint_setup()
-
-        npm_path = self.get_node_or_npm_path("npm")
-        if not npm_path:
-            return 1
-
-        if self.eslint_module_has_issues():
-            install = self._prompt_yn("\nContinuing will automatically fix "
-                                      "these issues. Would you like to "
-                                      "continue")
-            if install:
-                self.eslint_setup()
-            else:
-                return 1
-
-        # Valid binaries are:
-        #  - Any provided by the binary argument.
-        #  - Any pointed at by the ESLINT environmental variable.
-        #  - Those provided by mach eslint --setup.
-        #
-        #  eslint --setup installs some mozilla specific plugins and installs
-        #  all node modules locally. This is the preferred method of
-        #  installation.
-
-        if not binary:
-            binary = os.environ.get('ESLINT', None)
-
-            if not binary:
-                binary = os.path.join(module_path, "node_modules", ".bin", "eslint")
-                if not os.path.isfile(binary):
-                    binary = None
-
-        if not binary:
-            print(ESLINT_NOT_FOUND_MESSAGE)
-            return 1
-
-        self.log(logging.INFO, 'eslint', {'binary': binary, 'args': args},
-                 'Running {binary}')
-
-        args = args or ['.']
-
-        cmd_args = [binary,
-                    # Enable the HTML plugin.
-                    # We can't currently enable this in the global config file
-                    # because it has bad interactions with the SublimeText
-                    # ESLint plugin (bug 1229874).
-                    '--plugin', 'html',
-                    '--ext', ext,  # This keeps ext as a single argument.
-                    ] + args
-
-        # eslint requires that --fix be set before the --ext argument.
-        if fix:
-            cmd_args.insert(1, '--fix')
-
-        success = self.run_process(
-            cmd_args,
-            pass_thru=True,  # Allow user to run eslint interactively.
-            ensure_exit_code=False,  # Don't throw on non-zero exit code.
-            require_unix_environment=True  # eslint is not a valid Win32 binary.
-        )
-
-        self.log(logging.INFO, 'eslint', {'msg': ('No errors' if success == 0 else 'Errors')},
-                 'Finished eslint. {msg} encountered.')
-        return success
-
-    def eslint_setup(self):
-        """Ensure eslint is optimally configured.
-
-        This command will inspect your eslint configuration and
-        guide you through an interactive wizard helping you configure
-        eslint for optimal use on Mozilla projects.
-        """
-        orig_cwd = os.getcwd()
-        sys.path.append(os.path.dirname(__file__))
-
-        module_path = self.get_eslint_module_path()
-
-        # npm sometimes fails to respect cwd when it is run using check_call so
-        # we manually switch folders here instead.
-        os.chdir(module_path)
-
-        npm_path = self.get_node_or_npm_path("npm")
-        if not npm_path:
-            return 1
-
-        # Install ESLint and external plugins
-        cmd = [npm_path, "install"]
-        print("Installing eslint for mach using \"%s\"..." % (" ".join(cmd)))
-        if not self.call_process("eslint", cmd):
-            return 1
-
-        # Install in-tree ESLint plugin
-        cmd = [npm_path, "install",
-               os.path.join(module_path, "eslint-plugin-mozilla")]
-        print("Installing eslint-plugin-mozilla using \"%s\"..." % (" ".join(cmd)))
-        if not self.call_process("eslint-plugin-mozilla", cmd):
-            return 1
-
-        eslint_path = os.path.join(module_path, "node_modules", ".bin", "eslint")
-
-        print("\nESLint and approved plugins installed successfully!")
-        print("\nNOTE: Your local eslint binary is at %s\n" % eslint_path)
-
-        os.chdir(orig_cwd)
-
-    def call_process(self, name, cmd, cwd=None):
-        try:
-            with open(os.devnull, "w") as fnull:
-                subprocess.check_call(cmd, cwd=cwd, stdout=fnull)
-        except subprocess.CalledProcessError:
-            if cwd:
-                print("\nError installing %s in the %s folder, aborting." % (name, cwd))
-            else:
-                print("\nError installing %s, aborting." % name)
-
-            return False
-
-        return True
-
-    def expected_eslint_modules(self):
-        # Read the expected version of ESLint and external modules
-        expected_modules_path = os.path.join(self.get_eslint_module_path(), "package.json")
-        with open(expected_modules_path, "r") as f:
-            expected_modules = json.load(f)["dependencies"]
-
-        # Also read the in-tree ESLint plugin version
-        mozilla_json_path = os.path.join(self.get_eslint_module_path(),
-                                         "eslint-plugin-mozilla", "package.json")
-        with open(mozilla_json_path, "r") as f:
-            expected_modules["eslint-plugin-mozilla"] = json.load(f)["version"]
-
-        return expected_modules
-
-    def eslint_module_has_issues(self):
-        has_issues = False
-        node_modules_path = os.path.join(self.get_eslint_module_path(), "node_modules")
-
-        for name, version_range in self.expected_eslint_modules().iteritems():
-            path = os.path.join(node_modules_path, name, "package.json")
-
-            if not os.path.exists(path):
-                print("%s v%s needs to be installed locally." % (name, version_range))
-                has_issues = True
-                continue
-
-            data = json.load(open(path))
-
-            if not self.version_in_range(data["version"], version_range):
-                print("%s v%s should be v%s." % (name, data["version"], version_range))
-                has_issues = True
-
-        return has_issues
-
-    def version_in_range(self, version, version_range):
-        """
-        Check if a module version is inside a version range.  Only supports explicit versions and
-        caret ranges for the moment, since that's all we've used so far.
-        """
-        if version == version_range:
-            return True
-
-        version_match = VERSION_RE.match(version)
-        if not version_match:
-            raise RuntimeError("mach eslint doesn't understand module version %s" % version)
-        version = LooseVersion(version)
-
-        # Caret ranges as specified by npm allow changes that do not modify the left-most non-zero
-        # digit in the [major, minor, patch] tuple.  The code below assumes the major digit is
-        # non-zero.
-        range_match = CARET_VERSION_RANGE_RE.match(version_range)
-        if range_match:
-            range_version = range_match.group(1)
-            range_major = int(range_match.group(2))
-
-            range_min = LooseVersion(range_version)
-            range_max = LooseVersion("%d.0.0" % (range_major + 1))
-
-            return range_min <= version < range_max
-
-        return False
-
-    def get_possible_node_paths_win(self):
-        """
-        Return possible nodejs paths on Windows.
-        """
-        if platform.system() != "Windows":
-            return []
-
-        return list({
-            "%s\\nodejs" % os.environ.get("SystemDrive"),
-            os.path.join(os.environ.get("ProgramFiles"), "nodejs"),
-            os.path.join(os.environ.get("PROGRAMW6432"), "nodejs"),
-            os.path.join(os.environ.get("PROGRAMFILES"), "nodejs")
-        })
-
-    def get_node_or_npm_path(self, filename, minversion=None):
-        """
-        Return the nodejs or npm path.
-        """
-        if platform.system() == "Windows":
-            for ext in [".cmd", ".exe", ""]:
-                try:
-                    node_or_npm_path = which.which(filename + ext,
-                                                   path=self.get_possible_node_paths_win())
-                    if self.is_valid(node_or_npm_path, minversion):
-                        return node_or_npm_path
-                except which.WhichError:
-                    pass
-        else:
-            try:
-                node_or_npm_path = which.which(filename)
-                if self.is_valid(node_or_npm_path, minversion):
-                    return node_or_npm_path
-            except which.WhichError:
-                pass
-
-        if filename == "node":
-            print(NODE_NOT_FOUND_MESSAGE)
-        elif filename == "npm":
-            print(NPM_NOT_FOUND_MESSAGE)
-
-        if platform.system() == "Windows":
-            appPaths = self.get_possible_node_paths_win()
-
-            for p in appPaths:
-                print("  - %s" % p)
-        elif platform.system() == "Darwin":
-            print("  - /usr/local/bin/node")
-        elif platform.system() == "Linux":
-            print("  - /usr/bin/nodejs")
-
-        return None
-
-    def is_valid(self, path, minversion=None):
-        try:
-            version_str = subprocess.check_output([path, "--version"],
-                                                  stderr=subprocess.STDOUT)
-            if minversion:
-                # nodejs prefixes its version strings with "v"
-                version = LooseVersion(version_str.lstrip('v'))
-                return version >= minversion
-            return True
-        except (subprocess.CalledProcessError, OSError):
-            return False
-
-    def get_project_root(self):
-        fullpath = os.path.abspath(sys.modules['__main__'].__file__)
-        return os.path.dirname(fullpath)
-
-    def get_eslint_module_path(self):
-        return os.path.join(self.get_project_root(), "tools", "lint", "eslint")
-
-    def _prompt_yn(self, msg):
-        if not sys.stdin.isatty():
-            return False
-
-        print('%s? [Y/n]' % msg)
-
-        while True:
-            choice = raw_input().lower().strip()
-
-            if not choice:
-                return True
-
-            if choice in ('y', 'yes'):
-                return True
-
-            if choice in ('n', 'no'):
-                return False
-
-            print('Must reply with one of {yes, no, y, n}.')
+    @CommandArgument('extra_args', nargs=argparse.REMAINDER,
+                     help='Extra args that will be forwarded to eslint.')
+    def eslint(self, paths, **kwargs):
+        self._mach_context.commands.dispatch('lint', self._mach_context,
+                                             linters=['eslint'], paths=paths, **kwargs)
--- a/widget/android/GfxInfo.cpp
+++ b/widget/android/GfxInfo.cpp
@@ -457,94 +457,17 @@ GfxInfo::GetFeatureStatusImpl(int32_t aF
           cHardware.EqualsLiteral("ventana") ||
           cHardware.EqualsLiteral("rk30board"))
       {
         *aStatus = nsIGfxInfo::FEATURE_BLOCKED_DEVICE;
         aFailureId = "FEATURE_FAILURE_STAGE_HW";
         return NS_OK;
       }
 
-      if (CompareVersions(mOSVersion.get(), "2.2.0") >= 0 &&
-          CompareVersions(mOSVersion.get(), "2.3.0") < 0)
-      {
-        // Froyo LG devices are whitelisted.
-        // All other Froyo
-        bool isWhitelisted =
-          cManufacturer.Equals("lge", nsCaseInsensitiveCStringComparator());
-
-        if (!isWhitelisted) {
-          *aStatus = nsIGfxInfo::FEATURE_BLOCKED_DEVICE;
-          aFailureId = "FEATURE_FAILURE_OLD_ANDROID";
-          return NS_OK;
-        }
-      }
-      else if (CompareVersions(mOSVersion.get(), "2.3.0") >= 0 &&
-          CompareVersions(mOSVersion.get(), "2.4.0") < 0)
-      {
-        // Gingerbread HTC devices are whitelisted.
-        // Gingerbread Samsung devices are whitelisted except for:
-        //   Samsung devices identified in Bug 847837
-        // Gingerbread Sony devices are whitelisted.
-        // All other Gingerbread devices are blacklisted.
-        bool isWhitelisted =
-          cManufacturer.Equals("htc", nsCaseInsensitiveCStringComparator()) ||
-          (cManufacturer.Find("sony", true) != -1) ||
-          cManufacturer.Equals("samsung", nsCaseInsensitiveCStringComparator());
-
-        if (cModel.Equals("GT-I8160", nsCaseInsensitiveCStringComparator()) ||
-            cModel.Equals("GT-I8160L", nsCaseInsensitiveCStringComparator()) ||
-            cModel.Equals("GT-I8530", nsCaseInsensitiveCStringComparator()) ||
-            cModel.Equals("GT-I9070", nsCaseInsensitiveCStringComparator()) ||
-            cModel.Equals("GT-I9070P", nsCaseInsensitiveCStringComparator()) ||
-            cModel.Equals("GT-I8160P", nsCaseInsensitiveCStringComparator()) ||
-            cModel.Equals("GT-S7500", nsCaseInsensitiveCStringComparator()) ||
-            cModel.Equals("GT-S7500T", nsCaseInsensitiveCStringComparator()) ||
-            cModel.Equals("GT-S7500L", nsCaseInsensitiveCStringComparator()) ||
-            cModel.Equals("GT-S6500T", nsCaseInsensitiveCStringComparator()) ||
-            cHardware.Equals("smdkc110", nsCaseInsensitiveCStringComparator()) ||
-            cHardware.Equals("smdkc210", nsCaseInsensitiveCStringComparator()) ||
-            cHardware.Equals("herring", nsCaseInsensitiveCStringComparator()) ||
-            cHardware.Equals("shw-m110s", nsCaseInsensitiveCStringComparator()) ||
-            cHardware.Equals("shw-m180s", nsCaseInsensitiveCStringComparator()) ||
-            cHardware.Equals("n1", nsCaseInsensitiveCStringComparator()) ||
-            cHardware.Equals("latona", nsCaseInsensitiveCStringComparator()) ||
-            cHardware.Equals("aalto", nsCaseInsensitiveCStringComparator()) ||
-            cHardware.Equals("atlas", nsCaseInsensitiveCStringComparator()) ||
-            cHardware.Equals("qcom", nsCaseInsensitiveCStringComparator()))
-        {
-          isWhitelisted = false;
-        }
-
-        if (!isWhitelisted) {
-          *aStatus = nsIGfxInfo::FEATURE_BLOCKED_DEVICE;
-          aFailureId = "FEATURE_FAILURE_OLD_ANDROID_2";
-          return NS_OK;
-        }
-      }
-      else if (CompareVersions(mOSVersion.get(), "3.0.0") >= 0 &&
-          CompareVersions(mOSVersion.get(), "4.0.0") < 0)
-      {
-        // Honeycomb Samsung devices are whitelisted.
-        // All other Honeycomb devices are blacklisted.
-        bool isWhitelisted =
-          cManufacturer.Equals("samsung", nsCaseInsensitiveCStringComparator());
-
-        if (!isWhitelisted) {
-          *aStatus = nsIGfxInfo::FEATURE_BLOCKED_DEVICE;
-          aFailureId = "FEATURE_FAILURE_SAMSUNG";
-          return NS_OK;
-        }
-      }
-      else if (CompareVersions(mOSVersion.get(), "4.0.0") < 0)
-      {
-        *aStatus = nsIGfxInfo::FEATURE_BLOCKED_OS_VERSION;
-        aFailureId = "FEATURE_FAILURE_OLD_ANDROID_4";
-        return NS_OK;
-      }
-      else if (CompareVersions(mOSVersion.get(), "4.1.0") < 0)
+      if (CompareVersions(mOSVersion.get(), "4.1.0") < 0)
       {
         // Whitelist:
         //   All Samsung ICS devices, except for:
         //     Samsung SGH-I717 (Bug 845729)
         //     Samsung SGH-I727 (Bug 845729)
         //     Samsung SGH-I757 (Bug 845729)
         //   All Galaxy nexus ICS devices
         //   Sony Xperia Ion (LT28) ICS devices
--- a/widget/cocoa/TextInputHandler.mm
+++ b/widget/cocoa/TextInputHandler.mm
@@ -3279,17 +3279,17 @@ IMEInputHandler::GetAttributedSubstringF
     startOffset -= mIMECompositionStart;
   }
   textContent.InitForQueryTextContent(startOffset, aRange.length, options);
   textContent.RequestFontRanges();
   DispatchEvent(textContent);
 
   MOZ_LOG(gLog, LogLevel::Info,
     ("%p IMEInputHandler::GetAttributedSubstringFromRange, "
-     "textContent={ mSucceeded=%s, mReply={ mString=\"%s\", mOffset=%llu } }",
+     "textContent={ mSucceeded=%s, mReply={ mString=\"%s\", mOffset=%u } }",
      this, TrueOrFalse(textContent.mSucceeded),
      NS_ConvertUTF16toUTF8(textContent.mReply.mString).get(),
      textContent.mReply.mOffset));
 
   if (!textContent.mSucceeded) {
     return nil;
   }
 
@@ -3356,17 +3356,17 @@ IMEInputHandler::SelectedRange()
 
   RefPtr<IMEInputHandler> kungFuDeathGrip(this);
 
   WidgetQueryContentEvent selection(true, eQuerySelectedText, mWidget);
   DispatchEvent(selection);
 
   MOZ_LOG(gLog, LogLevel::Info,
     ("%p IMEInputHandler::SelectedRange, selection={ mSucceeded=%s, "
-     "mReply={ mOffset=%llu, mString.Length()=%llu } }",
+     "mReply={ mOffset=%u, mString.Length()=%u } }",
      this, TrueOrFalse(selection.mSucceeded), selection.mReply.mOffset,
      selection.mReply.mString.Length()));
 
   if (!selection.mSucceeded) {
     return mSelectedRange;
   }
 
   mWritingMode = selection.GetWritingMode();