merge mozilla-central to autoland. r=merge a=merge
authorSebastian Hengst <archaeopteryx@coole-files.de>
Tue, 12 Sep 2017 11:36:32 +0200
changeset 429756 7c348ad2e0514a79d925af96cd93278ac6462f1d
parent 429755 8ab8827d56b640f9555faecfab7e8f6850591e45 (current diff)
parent 429740 b0e945eed81db8bf076daf64e381c514f70144f0 (diff)
child 429757 6f5ae22906a10ea2fb13fe2dec9ebfc9ccc720d0
push id7761
push userjlund@mozilla.com
push dateFri, 15 Sep 2017 00:19:52 +0000
treeherdermozilla-beta@c38455951db4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge, merge
milestone57.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
merge mozilla-central to autoland. r=merge a=merge
taskcluster/ci/beetmover-partials/kind.yml
taskcluster/ci/partials-signing/kind.yml
taskcluster/ci/partials/kind.yml
taskcluster/docker/partial-update-generator/Dockerfile
taskcluster/docker/partial-update-generator/Makefile
taskcluster/docker/partial-update-generator/README
taskcluster/docker/partial-update-generator/dep.pubkey
taskcluster/docker/partial-update-generator/nightly_sha1.pubkey
taskcluster/docker/partial-update-generator/nightly_sha384.pubkey
taskcluster/docker/partial-update-generator/release_sha1.pubkey
taskcluster/docker/partial-update-generator/release_sha384.pubkey
taskcluster/docker/partial-update-generator/requirements.txt
taskcluster/docker/partial-update-generator/runme.sh
taskcluster/docker/partial-update-generator/scripts/funsize.py
taskcluster/docker/partial-update-generator/scripts/mbsdiff_hook.sh
taskcluster/taskgraph/transforms/beetmover_partials.py
taskcluster/taskgraph/transforms/partials.py
taskcluster/taskgraph/transforms/partials_signing.py
taskcluster/taskgraph/util/partials.py
toolkit/components/telemetry/Histograms.json
--- a/accessible/tests/browser/bounds/browser.ini
+++ b/accessible/tests/browser/bounds/browser.ini
@@ -3,9 +3,9 @@ support-files =
   head.js
   !/accessible/tests/browser/events.js
   !/accessible/tests/browser/shared-head.js
   !/accessible/tests/mochitest/*.js
   !/accessible/tests/mochitest/letters.gif
 
 [browser_test_zoom.js]
 [browser_test_zoom_text.js]
-skip-if = true # Bug 1372296, Bug 1379808, Bug 1391453
+skip-if = e10s && os == 'win' # bug 1372296
--- a/accessible/tests/mochitest/common.js
+++ b/accessible/tests/mochitest/common.js
@@ -186,16 +186,28 @@ function isObject(aObj, aExpectedObj, aM
     return;
   }
 
   ok(false,
      aMsg + " - got '" + prettyName(aObj) +
             "', expected '" + prettyName(aExpectedObj) + "'");
 }
 
+/**
+ * is() function checking the expected value is within the range.
+ */
+function isWithin(aExpected, aGot, aWithin, aMsg) {
+  if (Math.abs(aGot - aExpected) <= aWithin) {
+    ok(true, `${aMsg} - Got ${aGot}`);
+  } else {
+    ok(false,
+       `${aMsg} - Got ${aGot}, expected ${aExpected} with error of ${aWithin}`);
+  }
+}
+
 // //////////////////////////////////////////////////////////////////////////////
 // Helpers for getting DOM node/accessible
 
 /**
  * Return the DOM node by identifier (may be accessible, DOM node or ID).
  */
 function getNode(aAccOrNodeOrID, aDocument) {
   if (!aAccOrNodeOrID)
--- a/accessible/tests/mochitest/layout.js
+++ b/accessible/tests/mochitest/layout.js
@@ -145,33 +145,39 @@ function testTextPos(aID, aOffset, aPoin
  */
 function testTextBounds(aID, aStartOffset, aEndOffset, aRect, aCoordOrigin) {
   var [expectedX, expectedY, expectedWidth, expectedHeight] = aRect;
 
   var xObj = {}, yObj = {}, widthObj = {}, heightObj = {};
   var hyperText = getAccessible(aID, [nsIAccessibleText]);
   hyperText.getRangeExtents(aStartOffset, aEndOffset,
                             xObj, yObj, widthObj, heightObj, aCoordOrigin);
+
+  // x
   is(xObj.value, expectedX,
      "Wrong x coordinate of text between offsets (" + aStartOffset + ", " +
      aEndOffset + ") for " + prettyName(aID));
-  is(yObj.value, expectedY,
-     "Wrong y coordinate of text between offsets (" + aStartOffset + ", " +
-     aEndOffset + ") for " + prettyName(aID));
 
+  // y
+  isWithin(yObj.value, expectedY, 1,
+           `y coord of text between offsets (${aStartOffset}, ${aEndOffset}) ` +
+           `for ${prettyName(aID)}`);
+
+  // Width
   var msg = "Wrong width of text between offsets (" + aStartOffset + ", " +
     aEndOffset + ") for " + prettyName(aID);
   if (widthObj.value == expectedWidth)
     ok(true, msg);
   else
     todo(false, msg); // fails on some windows machines
 
-  is(heightObj.value, expectedHeight,
-     "Wrong height of text between offsets (" + aStartOffset + ", " +
-     aEndOffset + ") for " + prettyName(aID));
+  // Height
+  isWithin(heightObj.value, expectedHeight, 1,
+           `height of text between offsets (${aStartOffset}, ${aEndOffset}) ` +
+           `for ${prettyName(aID)}`);
 }
 
 /**
  * Return the accessible coordinates relative to the screen in device pixels.
  */
 function getPos(aID) {
   var accessible = getAccessible(aID);
   var x = {}, y = {};
--- a/browser/components/customizableui/CustomizableUI.jsm
+++ b/browser/components/customizableui/CustomizableUI.jsm
@@ -4348,17 +4348,17 @@ OverflowableToolbar.prototype = {
 
     let win = this._target.ownerGlobal;
     win.UpdateUrlbarSearchSplitterState();
   },
 
   _onResize(aEvent) {
     if (!this._lazyResizeHandler) {
       this._lazyResizeHandler = new DeferredTask(this._onLazyResize.bind(this),
-                                                 LAZY_RESIZE_INTERVAL_MS);
+                                                 LAZY_RESIZE_INTERVAL_MS, 0);
     }
     this._lazyResizeHandler.arm();
   },
 
   _moveItemsBackToTheirOrigin(shouldMoveAllItems) {
     let placements = gPlacements.get(this._toolbar.id);
     while (this._list.firstChild) {
       let child = this._list.firstChild;
--- a/browser/components/extensions/ext-browser.js
+++ b/browser/components/extensions/ext-browser.js
@@ -512,19 +512,17 @@ class TabTracker extends TabTrackerBase 
       this.emit("tab-removed", {nativeTab, tabId, windowId, isWindowClosing});
     });
   }
 
   getBrowserData(browser) {
     if (browser.ownerDocument.documentURI === "about:addons") {
       // When we're loaded into a <browser> inside about:addons, we need to go up
       // one more level.
-      browser = browser.ownerGlobal.QueryInterface(Ci.nsIInterfaceRequestor)
-                       .getInterface(Ci.nsIDocShell)
-                       .chromeEventHandler;
+      browser = browser.ownerDocument.docShell.chromeEventHandler;
     }
 
     let result = {
       tabId: -1,
       windowId: -1,
     };
 
     let {gBrowser} = browser.ownerGlobal;
--- a/browser/components/extensions/ext-browsingData.js
+++ b/browser/components/extensions/ext-browsingData.js
@@ -45,20 +45,17 @@ const clearCache = () => {
 
 const clearCookies = async function(options) {
   let cookieMgr = Services.cookies;
   // This code has been borrowed from sanitize.js.
   let yieldCounter = 0;
 
   if (options.since || options.hostnames) {
     // Iterate through the cookies and delete any created after our cutoff.
-    let cookiesEnum = cookieMgr.enumerator;
-    while (cookiesEnum.hasMoreElements()) {
-      let cookie = cookiesEnum.getNext().QueryInterface(Ci.nsICookie2);
-
+    for (const cookie of XPCOMUtils.IterSimpleEnumerator(cookieMgr.enumerator, Ci.nsICookie2)) {
       if ((!options.since || cookie.creationTime >= PlacesUtils.toPRTime(options.since)) &&
           (!options.hostnames || options.hostnames.includes(cookie.host.replace(/^\./, "")))) {
         // This cookie was created after our cutoff, clear it.
         cookieMgr.remove(cookie.host, cookie.name, cookie.path,
                          false, cookie.originAttributes);
 
         if (++yieldCounter % YIELD_PERIOD == 0) {
           await new Promise(resolve => setTimeout(resolve, 0)); // Don't block the main thread too long.
--- a/browser/components/extensions/ext-tabs.js
+++ b/browser/components/extensions/ext-tabs.js
@@ -729,19 +729,17 @@ this.tabs = class extends ExtensionAPI {
 
 
           let zoomListener = event => {
             let browser = event.originalTarget;
 
             // For non-remote browsers, this event is dispatched on the document
             // rather than on the <browser>.
             if (browser instanceof Ci.nsIDOMDocument) {
-              browser = browser.defaultView.QueryInterface(Ci.nsIInterfaceRequestor)
-                               .getInterface(Ci.nsIDocShell)
-                               .chromeEventHandler;
+              browser = browser.docShell.chromeEventHandler;
             }
 
             let {gBrowser} = browser.ownerGlobal;
             let nativeTab = gBrowser.getTabForBrowser(browser);
             if (!nativeTab) {
               // We only care about zoom events in the top-level browser of a tab.
               return;
             }
--- a/browser/installer/windows/nsis/shared.nsh
+++ b/browser/installer/windows/nsis/shared.nsh
@@ -108,19 +108,22 @@
 
   ${RemoveDeprecatedFiles}
 
   ; Fix the distribution.ini file if applicable
   ${FixDistributionsINI}
 
   RmDir /r /REBOOTOK "$INSTDIR\${TO_BE_DELETED}"
 
-  ; Register AccessibleHandler.dll with COM (this writes to HKLM)
+  ; Register AccessibleHandler.dll with COM (this requires write access to HKLM)
   ${RegisterAccessibleHandler}
 
+  ; Register AccessibleMarshal.dll with COM (this requires write access to HKLM)
+  ${RegisterAccessibleMarshal}
+
 !ifdef MOZ_MAINTENANCE_SERVICE
   Call IsUserAdmin
   Pop $R0
   ${If} $R0 == "true"
   ; Only proceed if we have HKLM write access
   ${AndIf} $TmpVal == "HKLM"
     ; We check to see if the maintenance service install was already attempted.
     ; Since the Maintenance service can be installed either x86 or x64,
@@ -1009,16 +1012,21 @@
 !define AddMaintCertKeys "!insertmacro AddMaintCertKeys"
 !endif
 
 !macro RegisterAccessibleHandler
   ${RegisterDLL} "$INSTDIR\AccessibleHandler.dll"
 !macroend
 !define RegisterAccessibleHandler "!insertmacro RegisterAccessibleHandler"
 
+!macro RegisterAccessibleMarshal
+  ${RegisterDLL} "$INSTDIR\AccessibleMarshal.dll"
+!macroend
+!define RegisterAccessibleMarshal "!insertmacro RegisterAccessibleMarshal"
+
 ; Removes various registry entries for reasons noted below (does not use SHCTX).
 !macro RemoveDeprecatedKeys
   StrCpy $0 "SOFTWARE\Classes"
   ; Remove support for launching chrome urls from the shell during install or
   ; update if the DefaultIcon is from firefox.exe (Bug 301073).
   ${RegCleanAppHandler} "chrome"
 
   ; Remove protocol handler registry keys added by the MS shim
--- a/devtools/client/framework/test/browser.ini
+++ b/devtools/client/framework/test/browser.ini
@@ -67,17 +67,16 @@ skip-if = debug # Bug 1282269
 [browser_source_map-inline.js]
 [browser_source_map-no-race.js]
 [browser_source_map-reload.js]
 [browser_target_from_url.js]
 [browser_target_events.js]
 [browser_target_remote.js]
 [browser_target_support.js]
 [browser_toolbox_custom_host.js]
-skip-if = true # Bug 1386410
 [browser_toolbox_dynamic_registration.js]
 [browser_toolbox_getpanelwhenready.js]
 [browser_toolbox_highlight.js]
 [browser_toolbox_hosts.js]
 [browser_toolbox_hosts_size.js]
 [browser_toolbox_hosts_telemetry.js]
 [browser_toolbox_keyboard_navigation.js]
 skip-if = os == "mac" # Full keyboard navigation on OSX only works if Full Keyboard Access setting is set to All Control in System Keyboard Preferences
--- a/devtools/server/performance/memory.js
+++ b/devtools/server/performance/memory.js
@@ -190,17 +190,17 @@ Memory.prototype = {
 
     this.drainAllocationsTimeoutTimer = options.drainAllocationsTimeout;
 
     if (this.drainAllocationsTimeoutTimer != null) {
       if (this._poller) {
         this._poller.disarm();
       }
       this._poller = new DeferredTask(this._emitAllocations,
-                                      this.drainAllocationsTimeoutTimer);
+                                      this.drainAllocationsTimeoutTimer, 0);
       this._poller.arm();
     }
 
     if (options.maxLogLength != null) {
       this.dbg.memory.maxAllocationsLogLength = options.maxLogLength;
     }
     this.dbg.memory.trackingAllocationSites = true;
 
--- a/devtools/server/performance/profiler.js
+++ b/devtools/server/performance/profiler.js
@@ -382,17 +382,17 @@ const ProfilerManager = (function () {
     /**
      * Will enable or disable "profiler-status" events depending on
      * if there are subscribers and if the profiler is current recording.
      */
     _updateProfilerStatusPolling: function () {
       if (this._profilerStatusSubscribers > 0 && nsIProfilerModule.IsActive()) {
         if (!this._poller) {
           this._poller = new DeferredTask(this._emitProfilerStatus.bind(this),
-                                          this._profilerStatusInterval);
+                                          this._profilerStatusInterval, 0);
         }
         this._poller.arm();
       } else if (this._poller) {
         // No subscribers; turn off if it exists.
         this._poller.disarm();
       }
     },
 
--- a/dom/base/nsGlobalWindow.cpp
+++ b/dom/base/nsGlobalWindow.cpp
@@ -12759,17 +12759,17 @@ nsGlobalWindow::FireDelayedDOMEvents()
 //*****************************************************************************
 
 nsPIDOMWindowOuter*
 nsGlobalWindow::GetParentInternal()
 {
   if (IsInnerWindow()) {
     nsGlobalWindow* outer = GetOuterWindowInternal();
     if (!outer) {
-      NS_WARNING("No outer window available!");
+      // No outer window available!
       return nullptr;
     }
     return outer->GetParentInternal();
   }
 
   nsCOMPtr<nsPIDOMWindowOuter> parent = GetParent();
 
   if (parent && parent != AsOuter()) {
--- a/dom/ipc/CoalescedMouseData.cpp
+++ b/dom/ipc/CoalescedMouseData.cpp
@@ -21,19 +21,16 @@ CoalescedMouseData::Coalesce(const Widge
     mGuid = aGuid;
     mInputBlockId = aInputBlockId;
   } else {
     MOZ_ASSERT(mGuid == aGuid);
     MOZ_ASSERT(mInputBlockId == aInputBlockId);
     MOZ_ASSERT(mCoalescedInputEvent->mModifiers == aEvent.mModifiers);
     MOZ_ASSERT(mCoalescedInputEvent->mReason == aEvent.mReason);
     MOZ_ASSERT(mCoalescedInputEvent->inputSource == aEvent.inputSource);
-
-    // Assuming button changes should trigger other mouse events and dispatch
-    // the coalesced mouse move events.
     MOZ_ASSERT(mCoalescedInputEvent->button == aEvent.button);
     MOZ_ASSERT(mCoalescedInputEvent->buttons == aEvent.buttons);
     mCoalescedInputEvent->mTimeStamp = aEvent.mTimeStamp;
     mCoalescedInputEvent->mRefPoint = aEvent.mRefPoint;
     mCoalescedInputEvent->pressure = aEvent.pressure;
     mCoalescedInputEvent->AssignPointerHelperData(aEvent);
   }
 }
@@ -42,16 +39,18 @@ bool
 CoalescedMouseData::CanCoalesce(const WidgetMouseEvent& aEvent,
                              const ScrollableLayerGuid& aGuid,
                              const uint64_t& aInputBlockId)
 {
   return !mCoalescedInputEvent ||
          (mCoalescedInputEvent->mModifiers == aEvent.mModifiers &&
           mCoalescedInputEvent->inputSource == aEvent.inputSource &&
           mCoalescedInputEvent->pointerId == aEvent.pointerId &&
+          mCoalescedInputEvent->button == aEvent.button &&
+          mCoalescedInputEvent->buttons == aEvent.buttons &&
           mGuid == aGuid &&
           mInputBlockId == aInputBlockId);
 }
 
 
 void
 CoalescedMouseMoveFlusher::WillRefresh(mozilla::TimeStamp aTime)
 {
--- a/dom/ipc/ContentChild.cpp
+++ b/dom/ipc/ContentChild.cpp
@@ -2305,16 +2305,18 @@ ContentChild::ActorDestroy(ActorDestroyR
   }
 
   if (AbnormalShutdown == why) {
     NS_WARNING("shutting down early because of crash!");
     ProcessChild::QuickExit();
   }
 
 #ifndef NS_FREE_PERMANENT_DATA
+  CompositorManagerChild::Shutdown();
+
   // In release builds, there's no point in the content process
   // going through the full XPCOM shutdown path, because it doesn't
   // keep persistent state.
   ProcessChild::QuickExit();
 #else
   if (gFirstIdleTask) {
     gFirstIdleTask->Cancel();
   }
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -407,17 +407,17 @@ DecodedStream::DestroyData(UniquePtr<Dec
   }
 
   mOutputListener.Disconnect();
 
   DecodedStreamData* data = aData.release();
   data->Forget();
   nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction("DecodedStream::DestroyData",
                                                    [=]() { delete data; });
-  mAbstractMainThread->Dispatch(r.forget());
+  NS_DispatchToMainThread(r.forget());
 }
 
 void
 DecodedStream::SetPlaying(bool aPlaying)
 {
   AssertOwnerThread();
 
   // Resume/pause matters only when playback started.
new file mode 100644
--- /dev/null
+++ b/dom/media/test/crashtests/1378826.html
@@ -0,0 +1,46 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+<head>
+<title>Bug 1378826 : Removing last video track from recorder stream crashes.</title>
+</head>
+<body>
+<canvas id="canvas"></canvas>
+<script type="text/javascript">
+
+function wait(ms) {
+  return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+function boom() {
+  let canvas = document.getElementById("canvas");
+  let ctx = canvas.getContext('2d');
+  ctx.fillRect(10, 10, 100, 100);
+  let stream = canvas.captureStream();
+  let rec = new MediaRecorder(stream);
+  // At the time of fixing this bug onstop would fire, but this may change in
+  // future. As such defensively listen for onerror too to prevent this test
+  // timing out.
+  let stoppedPromise = new Promise(y => (rec.onstop = y,
+                                         rec.onerror = e => y));
+  rec.onstart = () => {
+    // Remove the video track from the stream we're recording
+    stream.removeTrack(stream.getTracks()[0]);
+    // Recorder should stop or error in response to the above
+    return stoppedPromise
+    .then(() => {
+      // Little wait to help get bad writes if they're going to happen
+      wait(100)
+      .then(() => {
+        // Didn't crash, finish
+        document.documentElement.removeAttribute("class");
+      });
+    });
+  };
+  rec.start();
+}
+
+window.onload = boom;
+
+</script>
+</body>
+</html>
--- a/dom/media/test/crashtests/crashtests.list
+++ b/dom/media/test/crashtests/crashtests.list
@@ -80,16 +80,17 @@ load 1127188.html
 load 1157994.html
 load 1158427.html
 load 1185176.html
 load 1185192.html
 load 1304948.html
 load 1319486.html
 load 1368490.html
 load 1291702.html
+load 1378826.html
 load 1384248.html
 load disconnect-wrong-destination.html
 load analyser-channels-1.html
 load audiocontext-double-suspend.html
 load buffer-source-duration-1.html
 load buffer-source-ended-1.html
 load buffer-source-resampling-start-1.html
 load buffer-source-slow-resampling-1.html
--- a/ipc/glue/GeckoChildProcessHost.cpp
+++ b/ipc/glue/GeckoChildProcessHost.cpp
@@ -1114,16 +1114,17 @@ GeckoChildProcessHost::PerformAsyncLaunc
 
   // Process type
   cmdLine.AppendLooseValue(UTF8ToWide(childProcessType));
 
 #if defined(XP_WIN) && defined(MOZ_SANDBOX)
   if (shouldSandboxCurrentProcess) {
     if (mSandboxBroker.LaunchApp(cmdLine.program().c_str(),
                                  cmdLine.command_line_string().c_str(),
+                                 mProcessType,
                                  mEnableSandboxLogging,
                                  &process)) {
       EnvironmentLog("MOZ_PROCESS_LOG").print(
         "==> process %d launched child process %d (%S)\n",
         base::GetCurrentProcId(), base::GetProcId(process),
         cmdLine.command_line_string().c_str());
     }
   } else
--- a/ipc/mscom/oop/Handler.cpp
+++ b/ipc/mscom/oop/Handler.cpp
@@ -350,19 +350,22 @@ Handler::Register(REFCLSID aClsid)
                                  mozilla::ArrayLength(absLibPath));
   if (!size || (size == mozilla::ArrayLength(absLibPath) &&
       GetLastError() == ERROR_INSUFFICIENT_BUFFER)) {
     DWORD lastError = GetLastError();
     Unregister(aClsid);
     return HRESULT_FROM_WIN32(lastError);
   }
 
+  // The result of GetModuleFileName excludes the null terminator
+  DWORD valueSizeWithNullInBytes = (size + 1) * sizeof(wchar_t);
+
   result = RegSetValueEx(inprocHandlerKey, L"", 0, REG_EXPAND_SZ,
                          reinterpret_cast<const BYTE*>(absLibPath),
-                         sizeof(absLibPath));
+                         valueSizeWithNullInBytes);
   if (result != ERROR_SUCCESS) {
     Unregister(aClsid);
     return HRESULT_FROM_WIN32(result);
   }
 
   const wchar_t kApartment[] = L"Apartment";
   result = RegSetValueEx(inprocHandlerKey, L"ThreadingModel", 0, REG_SZ,
                          reinterpret_cast<const BYTE*>(kApartment),
--- a/js/src/jit/JitcodeMap.cpp
+++ b/js/src/jit/JitcodeMap.cpp
@@ -1653,19 +1653,22 @@ JS::ForEachProfiledFrameOp::FrameHandle:
         return JS::ProfilingFrameIterator::Frame_Baseline;
     return JS::ProfilingFrameIterator::Frame_Ion;
 }
 
 JS_PUBLIC_API(void)
 JS::ForEachProfiledFrame(JSContext* cx, void* addr, ForEachProfiledFrameOp& op)
 {
     js::jit::JitcodeGlobalTable* table = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
-    js::jit::JitcodeGlobalEntry& entry = table->lookupInfallible(addr);
+    js::jit::JitcodeGlobalEntry* entry = table->lookup(addr);
+
+    if (!entry)
+        return;
 
     // Extract the stack for the entry.  Assume maximum inlining depth is <64
     const char* labels[64];
-    uint32_t depth = entry.callStackAtAddr(cx->runtime(), addr, labels, 64);
+    uint32_t depth = entry->callStackAtAddr(cx->runtime(), addr, labels, 64);
     MOZ_ASSERT(depth < 64);
     for (uint32_t i = depth; i != 0; i--) {
-        JS::ForEachProfiledFrameOp::FrameHandle handle(cx->runtime(), entry, addr, labels[i - 1], i - 1);
+        JS::ForEachProfiledFrameOp::FrameHandle handle(cx->runtime(), *entry, addr, labels[i - 1], i - 1);
         op(handle);
     }
 }
--- a/js/src/jit/JitcodeMap.h
+++ b/js/src/jit/JitcodeMap.h
@@ -1037,17 +1037,17 @@ class JitcodeGlobalTable
             freeTowers_[i] = nullptr;
     }
     ~JitcodeGlobalTable() {}
 
     bool empty() const {
         return skiplistSize_ == 0;
     }
 
-    const JitcodeGlobalEntry* lookup(void* ptr) {
+    JitcodeGlobalEntry* lookup(void* ptr) {
         return lookupInternal(ptr);
     }
 
     JitcodeGlobalEntry& lookupInfallible(void* ptr) {
         JitcodeGlobalEntry* entry = lookupInternal(ptr);
         MOZ_ASSERT(entry);
         return *entry;
     }
--- a/js/xpconnect/loader/ScriptPreloader.cpp
+++ b/js/xpconnect/loader/ScriptPreloader.cpp
@@ -596,18 +596,16 @@ ScriptPreloader::PrepareCacheWrite()
 //
 // - A block of XDR data for the encoded scripts, with each script's data at
 //   an offset from the start of the block, as specified above.
 Result<Ok, nsresult>
 ScriptPreloader::WriteCache()
 {
     MOZ_ASSERT(!NS_IsMainThread());
 
-    Unused << URLPreloader::GetSingleton().WriteCache();
-
     if (!mDataPrepared && !mSaveComplete) {
         MOZ_ASSERT(!mBlockedOnSyncDispatch);
         mBlockedOnSyncDispatch = true;
 
         MonitorAutoUnlock mau(mSaveMonitor);
 
         NS_DispatchToMainThread(
           NewRunnableMethod("ScriptPreloader::PrepareCacheWrite",
@@ -690,17 +688,20 @@ ScriptPreloader::Run()
     // Ideally wait about 10 seconds before saving, to avoid unnecessary IO
     // during early startup. But only if the cache hasn't been invalidated,
     // since that can trigger a new write during shutdown, and we don't want to
     // cause shutdown hangs.
     if (!mCacheInvalidated) {
         mal.Wait(10000);
     }
 
-    auto result = WriteCache();
+    auto result = URLPreloader::GetSingleton().WriteCache();
+    Unused << NS_WARN_IF(result.isErr());
+
+    result = WriteCache();
     Unused << NS_WARN_IF(result.isErr());
 
     result = mChildCache->WriteCache();
     Unused << NS_WARN_IF(result.isErr());
 
     mSaveComplete = true;
     NS_ReleaseOnMainThreadSystemGroup("ScriptPreloader::mSaveThread",
                                       mSaveThread.forget());
--- a/js/xpconnect/loader/URLPreloader.cpp
+++ b/js/xpconnect/loader/URLPreloader.cpp
@@ -197,16 +197,30 @@ URLPreloader::FindCacheFile()
     return Move(cacheFile);
 }
 
 Result<Ok, nsresult>
 URLPreloader::WriteCache()
 {
     MOZ_ASSERT(!NS_IsMainThread());
 
+    // The script preloader might call us a second time, if it has to re-write
+    // its cache after a cache flush. We don't care about cache flushes, since
+    // our cache doesn't store any file data, only paths. And we currently clear
+    // our cached file list after the first write, which means that a second
+    // write would (aside from breaking the invariant that we never touch
+    // mCachedURLs off-main-thread after the first write, and trigger a data
+    // race) mean we get no pre-loading on the next startup.
+    if (mCacheWritten) {
+        return Ok();
+    }
+    mCacheWritten = true;
+
+    LOG(Debug, "Writing cache...");
+
     nsCOMPtr<nsIFile> cacheFile;
     MOZ_TRY_VAR(cacheFile, GetCacheFile(NS_LITERAL_STRING("-new.bin")));
 
     bool exists;
     MOZ_TRY(cacheFile->Exists(&exists));
     if (exists) {
         MOZ_TRY(cacheFile->Remove(false));
     }
@@ -251,16 +265,18 @@ void
 URLPreloader::Cleanup()
 {
     mCachedURLs.Clear();
 }
 
 Result<Ok, nsresult>
 URLPreloader::ReadCache(LinkedList<URLEntry>& pendingURLs)
 {
+    LOG(Debug, "Reading cache...");
+
     nsCOMPtr<nsIFile> cacheFile;
     MOZ_TRY_VAR(cacheFile, FindCacheFile());
 
     AutoMemMap cache;
     MOZ_TRY(cache.init(cacheFile));
 
     auto size = cache.size();
 
@@ -296,16 +312,18 @@ URLPreloader::ReadCache(LinkedList<URLEn
 
         Range<uint8_t> header(data, data + headerSize);
         data += headerSize;
 
         InputBuffer buf(header);
         while (!buf.finished()) {
             CacheKey key(buf);
 
+            LOG(Debug, "Cached file: %s %s", key.TypeString(), key.mPath.get());
+
             auto entry = mCachedURLs.LookupOrAdd(key, key);
             entry->mResultCode = NS_ERROR_NOT_INITIALIZED;
 
             pendingURLs.insertBack(entry);
         }
 
         if (buf.error()) {
             return Err(NS_ERROR_UNEXPECTED);
@@ -380,16 +398,18 @@ URLPreloader::BackgroundReadFiles()
         // If there is any other error code, the entry has already failed at
         // this point, so don't bother trying to read it again.
         if (entry->mResultCode != NS_ERROR_NOT_INITIALIZED) {
             continue;
         }
 
         nsresult rv = NS_OK;
 
+        LOG(Debug, "Background reading %s file %s", entry->TypeString(), entry->mPath.get());
+
         if (entry->mType == entry->TypeFile) {
             auto result = entry->Read();
             if (result.isErr()) {
                 rv = result.unwrapErr();
             }
         } else {
             auto& cursor = cursors[i++];
 
--- a/js/xpconnect/loader/URLPreloader.h
+++ b/js/xpconnect/loader/URLPreloader.h
@@ -298,16 +298,19 @@ private:
     using HashType = nsClassHashtable<nsGenericHashKey<CacheKey>, URLEntry>;
 
     size_t ShallowSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf);
 
 
     bool mStartupFinished = false;
     bool mReaderInitialized = false;
 
+    // Only to be accessed from the cache write thread.
+    bool mCacheWritten = false;
+
     // The prefix URLs for files in the GRE and App omni jar archives.
     nsCString mGREPrefix;
     nsCString mAppPrefix;
 
     nsCOMPtr<nsIResProtocolHandler> mResProto;
     nsCOMPtr<nsIChromeRegistry> mChromeReg;
     nsCOMPtr<nsIFile> mProfD;
 
--- a/modules/libpref/init/all.js
+++ b/modules/libpref/init/all.js
@@ -1826,16 +1826,17 @@ pref("network.http.max_response_header_s
 // If we should attempt to race the cache and network
 pref("network.http.rcwn.enabled", false);
 pref("network.http.rcwn.cache_queue_normal_threshold", 8);
 pref("network.http.rcwn.cache_queue_priority_threshold", 2);
 // We might attempt to race the cache with the network only if a resource
 // is smaller than this size.
 pref("network.http.rcwn.small_resource_size_kb", 256);
 
+pref("network.http.rcwn.min_wait_before_racing_ms", 0);
 pref("network.http.rcwn.max_wait_before_racing_ms", 500);
 
 // The ratio of the transaction count for the focused window and the count of
 // all available active connections.
 pref("network.http.focused_window_transaction_ratio", "0.9");
 
 // Whether or not we give more priority to active tab.
 // Note that this requires restart for changes to take effect.
@@ -3333,17 +3334,17 @@ pref("dom.ipc.processCount.file", 1);
 // WebExtensions only support a single extension process.
 pref("dom.ipc.processCount.extension", 1);
 
 // Don't use a native event loop in the content process.
 pref("dom.ipc.useNativeEventProcessing.content", false);
 
 // Quantum DOM scheduling:
 pref("dom.ipc.scheduler", false);
-pref("dom.ipc.scheduler.useMultipleQueues", false);
+pref("dom.ipc.scheduler.useMultipleQueues", true);
 pref("dom.ipc.scheduler.preemption", false);
 pref("dom.ipc.scheduler.threadCount", 2);
 pref("dom.ipc.scheduler.chaoticScheduling", false);
 
 // Disable support for SVG
 pref("svg.disabled", false);
 
 // Override default dom.ipc.processCount for some remote content process types.
--- a/netwerk/cookie/CookieServiceChild.cpp
+++ b/netwerk/cookie/CookieServiceChild.cpp
@@ -70,20 +70,16 @@ CookieServiceChild::CookieServiceChild()
     return;
   }
 
   // This corresponds to Release() in DeallocPCookieService.
   NS_ADDREF_THIS();
 
   NeckoChild::InitNeckoChild();
 
-  gNeckoChild->SetEventTargetForActor(
-    this,
-    SystemGroup::EventTargetFor(TaskCategory::Other));
-
   // Create a child PCookieService actor.
   gNeckoChild->SendPCookieServiceConstructor(this);
 
   mIPCOpen = true;
 
   mTLDService = do_GetService(NS_EFFECTIVETLDSERVICE_CONTRACTID);
   NS_ASSERTION(mTLDService, "couldn't get TLDService");
 
--- a/netwerk/dns/nsEffectiveTLDService.cpp
+++ b/netwerk/dns/nsEffectiveTLDService.cpp
@@ -207,19 +207,20 @@ nsEffectiveTLDService::GetBaseDomainInte
     return NS_ERROR_INVALID_ARG;
 
   // Check if we're dealing with an IPv4/IPv6 hostname, and return
   PRNetAddr addr;
   PRStatus result = PR_StringToNetAddr(aHostname.get(), &addr);
   if (result == PR_SUCCESS)
     return NS_ERROR_HOST_IS_IP_ADDRESS;
 
-  // Lookup in the cache if this is a normal query.
+  // Lookup in the cache if this is a normal query. This is restricted to
+  // main thread-only as the cache is not thread-safe.
   TLDCacheEntry* entry = nullptr;
-  if (aAdditionalParts == 1) {
+  if (aAdditionalParts == 1 && NS_IsMainThread()) {
     if (LookupForAdd(aHostname, &entry)) {
       // There was a match, just return the cached value.
       aBaseDomain = entry->mBaseDomain;
       if (trailingDot) {
         aBaseDomain.Append('.');
       }
 
       return NS_OK;
--- a/netwerk/protocol/http/nsHttpChannel.cpp
+++ b/netwerk/protocol/http/nsHttpChannel.cpp
@@ -123,16 +123,17 @@ namespace {
 
 // Monotonically increasing ID for generating unique cache entries per
 // intercepted channel.
 static uint64_t gNumIntercepted = 0;
 static bool sRCWNEnabled = false;
 static uint32_t sRCWNQueueSizeNormal = 50;
 static uint32_t sRCWNQueueSizePriority = 10;
 static uint32_t sRCWNSmallResourceSizeKB = 256;
+static uint32_t sRCWNMinWaitMs = 0;
 static uint32_t sRCWNMaxWaitMs = 500;
 
 // True if the local cache should be bypassed when processing a request.
 #define BYPASS_LOCAL_CACHE(loadFlags) \
         (loadFlags & (nsIRequest::LOAD_BYPASS_CACHE | \
                       nsICachingChannel::LOAD_BYPASS_LOCAL_CACHE))
 
 #define RECOVER_FROM_CACHE_FILE_ERROR(result) \
@@ -620,17 +621,17 @@ nsHttpChannel::ConnectOnTailUnblock()
     // When racing, if OnCacheEntryAvailable is called before AsyncOpenURI
     // returns, then we may not have started reading from the cache.
     // If the content is valid, we should attempt to do so, as technically the
     // cache has won the race.
     if (mRaceCacheWithNetwork && mCachedContentIsValid) {
         Unused << ReadFromCache(true);
     }
 
-    return TriggerNetwork(0);
+    return TriggerNetwork();
 }
 
 nsresult
 nsHttpChannel::TryHSTSPriming()
 {
     bool isHttpScheme;
     nsresult rv = mURI->SchemeIs("http", &isHttpScheme);
     NS_ENSURE_SUCCESS(rv, rv);
@@ -4508,17 +4509,17 @@ nsHttpChannel::OnCacheEntryAvailableInte
         // request was already sent (see bug 1377223).
         AccumulateCategorical(Telemetry::LABELS_NETWORK_RACE_CACHE_VALIDATION::NotSent);
     }
 
     if (mRaceCacheWithNetwork && mCachedContentIsValid) {
         Unused << ReadFromCache(true);
     }
 
-    return TriggerNetwork(0);
+    return TriggerNetwork();
 }
 
 nsresult
 nsHttpChannel::OnNormalCacheEntryAvailable(nsICacheEntry *aEntry,
                                            bool aNew,
                                            nsresult aEntryStatus)
 {
     mCacheEntriesToWaitFor &= ~WAIT_FOR_CACHE_ENTRY;
@@ -6129,16 +6130,17 @@ nsHttpChannel::AsyncOpen(nsIStreamListen
 
     static bool sRCWNInited = false;
     if (!sRCWNInited) {
         sRCWNInited = true;
         Preferences::AddBoolVarCache(&sRCWNEnabled, "network.http.rcwn.enabled");
         Preferences::AddUintVarCache(&sRCWNQueueSizeNormal, "network.http.rcwn.cache_queue_normal_threshold");
         Preferences::AddUintVarCache(&sRCWNQueueSizePriority, "network.http.rcwn.cache_queue_priority_threshold");
         Preferences::AddUintVarCache(&sRCWNSmallResourceSizeKB, "network.http.rcwn.small_resource_size_kb");
+        Preferences::AddUintVarCache(&sRCWNMinWaitMs, "network.http.rcwn.min_wait_before_racing_ms");
         Preferences::AddUintVarCache(&sRCWNMaxWaitMs, "network.http.rcwn.max_wait_before_racing_ms");
     }
 
     rv = NS_CheckPortSafety(mURI);
     if (NS_FAILED(rv)) {
         ReleaseListeners();
         return rv;
     }
@@ -9297,17 +9299,53 @@ nsHttpChannel::Test_triggerDelayedOpenCa
     std::function<void(nsHttpChannel*)> cacheOpenFunc = nullptr;
     std::swap(cacheOpenFunc, mCacheOpenFunc);
     cacheOpenFunc(this);
 
     return NS_OK;
 }
 
 nsresult
-nsHttpChannel::TriggerNetwork(int32_t aTimeout)
+nsHttpChannel::TriggerNetworkWithDelay(uint32_t aDelay)
+{
+    MOZ_ASSERT(NS_IsMainThread(), "Must be called on the main thread");
+
+    LOG(("nsHttpChannel::TriggerNetworkWithDelay [this=%p, delay=%u]\n",
+         this, aDelay));
+
+    if (mCanceled) {
+        LOG(("  channel was canceled.\n"));
+        return mStatus;
+    }
+
+    // If a network request has already gone out, there is no point in
+    // doing this again.
+    if (mNetworkTriggered) {
+        LOG(("  network already triggered. Returning.\n"));
+        return NS_OK;
+    }
+
+    if (!aDelay) {
+        // We cannot call TriggerNetwork() directly here, because it would
+        // cause performance regression in tp6 tests, see bug 1398847.
+        return NS_DispatchToMainThread(
+            NewRunnableMethod("net::nsHttpChannel::TriggerNetworkWithDelay",
+                              this, &nsHttpChannel::TriggerNetwork),
+            NS_DISPATCH_NORMAL);
+    }
+
+    if (!mNetworkTriggerTimer) {
+        mNetworkTriggerTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
+    }
+    mNetworkTriggerTimer->InitWithCallback(this, aDelay, nsITimer::TYPE_ONE_SHOT);
+    return NS_OK;
+}
+
+nsresult
+nsHttpChannel::TriggerNetwork()
 {
     MOZ_ASSERT(NS_IsMainThread(), "Must be called on the main thread");
 
     LOG(("nsHttpChannel::TriggerNetwork [this=%p]\n", this));
 
     if (mCanceled) {
         LOG(("  channel was canceled.\n"));
         return mStatus;
@@ -9315,46 +9353,39 @@ nsHttpChannel::TriggerNetwork(int32_t aT
 
     // If a network request has already gone out, there is no point in
     // doing this again.
     if (mNetworkTriggered) {
         LOG(("  network already triggered. Returning.\n"));
         return NS_OK;
     }
 
-    if (!aTimeout) {
-        mNetworkTriggered = true;
-        if (mNetworkTriggerTimer) {
-            mNetworkTriggerTimer->Cancel();
-            mNetworkTriggerTimer = nullptr;
-        }
-
-        // If we are waiting for a proxy request, that means we can't trigger
-        // the next step just yet. We need for mConnectionInfo to be non-null
-        // before we call TryHSTSPriming. OnProxyAvailable will trigger
-        // BeginConnect, and Connect will call TryHSTSPriming even if it's
-        // for the cache callbacks.
-        if (mProxyRequest) {
-            LOG(("  proxy request in progress. Delaying network trigger.\n"));
-            mWaitingForProxy = true;
-            return NS_OK;
-        }
-
-        if (mCacheAsyncOpenCalled && !mOnCacheAvailableCalled) {
-            mRaceCacheWithNetwork = true;
-        }
-
-        LOG(("  triggering network\n"));
-        return TryHSTSPriming();
-    }
-
-    LOG(("  setting timer to trigger network: %d ms\n", aTimeout));
-    mNetworkTriggerTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
-    mNetworkTriggerTimer->InitWithCallback(this, aTimeout, nsITimer::TYPE_ONE_SHOT);
-    return NS_OK;
+    mNetworkTriggered = true;
+    if (mNetworkTriggerTimer) {
+        mNetworkTriggerTimer->Cancel();
+        mNetworkTriggerTimer = nullptr;
+    }
+
+    // If we are waiting for a proxy request, that means we can't trigger
+    // the next step just yet. We need for mConnectionInfo to be non-null
+    // before we call TryHSTSPriming. OnProxyAvailable will trigger
+    // BeginConnect, and Connect will call TryHSTSPriming even if it's
+    // for the cache callbacks.
+    if (mProxyRequest) {
+        LOG(("  proxy request in progress. Delaying network trigger.\n"));
+        mWaitingForProxy = true;
+        return NS_OK;
+    }
+
+    if (mCacheAsyncOpenCalled && !mOnCacheAvailableCalled) {
+        mRaceCacheWithNetwork = true;
+    }
+
+    LOG(("  triggering network\n"));
+    return TryHSTSPriming();
 }
 
 nsresult
 nsHttpChannel::MaybeRaceCacheWithNetwork()
 {
     // Don't trigger the network if the load flags say so.
     if (mLoadFlags & (LOAD_ONLY_FROM_CACHE | LOAD_NO_NETWORK_IO)) {
         return NS_OK;
@@ -9375,43 +9406,42 @@ nsHttpChannel::MaybeRaceCacheWithNetwork
         mRaceDelay = 0;
     } else {
         // Give cache a headstart of 3 times the average cache entry open time.
         mRaceDelay = CacheFileUtils::CachePerfStats::GetAverage(
                      CacheFileUtils::CachePerfStats::ENTRY_OPEN, true) * 3;
         // We use microseconds in CachePerfStats but we need milliseconds
         // for TriggerNetwork.
         mRaceDelay /= 1000;
-        if (mRaceDelay > sRCWNMaxWaitMs) {
-            mRaceDelay = sRCWNMaxWaitMs;
-        }
-    }
-
-    MOZ_ASSERT(sRCWNEnabled, "The pref must be truned on.");
+    }
+
+    mRaceDelay = clamped<uint32_t>(mRaceDelay, sRCWNMinWaitMs, sRCWNMaxWaitMs);
+
+    MOZ_ASSERT(sRCWNEnabled, "The pref must be turned on.");
     LOG(("nsHttpChannel::MaybeRaceCacheWithNetwork [this=%p, delay=%u]\n",
          this, mRaceDelay));
 
-    return TriggerNetwork(mRaceDelay);
+    return TriggerNetworkWithDelay(mRaceDelay);
 }
 
 NS_IMETHODIMP
 nsHttpChannel::Test_triggerNetwork(int32_t aTimeout)
 {
     MOZ_ASSERT(NS_IsMainThread(), "Must be called on the main thread");
-    return TriggerNetwork(aTimeout);
+    return TriggerNetworkWithDelay(aTimeout);
 }
 
 NS_IMETHODIMP
 nsHttpChannel::Notify(nsITimer *aTimer)
 {
     RefPtr<nsHttpChannel> self(this);
     if (aTimer == mCacheOpenTimer) {
         return Test_triggerDelayedOpenCacheEntry();
     } else if (aTimer == mNetworkTriggerTimer) {
-        return TriggerNetwork(0);
+        return TriggerNetwork();
     } else {
         MOZ_CRASH("Unknown timer");
     }
 
     return NS_OK;
 }
 
 bool
--- a/netwerk/protocol/http/nsHttpChannel.h
+++ b/netwerk/protocol/http/nsHttpChannel.h
@@ -717,17 +717,18 @@ private:
         RESPONSE_FROM_NETWORK = 2   // response coming from the network
     };
     Atomic<ResponseSource, Relaxed> mFirstResponseSource;
 
     // Determines if it's possible and advisable to race the network request
     // with the cache fetch, and proceeds to do so.
     nsresult MaybeRaceCacheWithNetwork();
 
-    nsresult TriggerNetwork(int32_t aTimeout);
+    nsresult TriggerNetworkWithDelay(uint32_t aDelay);
+    nsresult TriggerNetwork();
     void CancelNetworkRequest(nsresult aStatus);
     // Timer used to delay the network request, or to trigger the network
     // request if retrieving the cache entry takes too long.
     nsCOMPtr<nsITimer> mNetworkTriggerTimer;
     // Is true if the network request has been triggered.
     bool mNetworkTriggered = false;
     bool mWaitingForProxy = false;
     // Is true if the onCacheEntryAvailable callback has been called.
--- a/netwerk/test/unit/xpcshell.ini
+++ b/netwerk/test/unit/xpcshell.ini
@@ -385,16 +385,18 @@ skip-if = os == "android"
 [test_bug1279246.js]
 [test_throttlequeue.js]
 [test_throttlechannel.js]
 [test_throttling.js]
 [test_separate_connections.js]
 [test_rusturl.js]
 [test_trackingProtection_annotateChannels.js]
 [test_race_cache_with_network.js]
+# temporarily disabled because it is failing after landing bug 1398847
+skip-if = true
 [test_channel_priority.js]
 [test_bug1312774_http1.js]
 [test_1351443-missing-NewChannel2.js]
 [test_bug1312782_http1.js]
 [test_bug1355539_http1.js]
 [test_bug1378385_http1.js]
 [test_tls_flags_separate_connections.js]
 [test_tls_flags.js]
--- a/security/sandbox/win/src/sandboxbroker/sandboxBroker.cpp
+++ b/security/sandbox/win/src/sandboxbroker/sandboxBroker.cpp
@@ -18,16 +18,17 @@
 #include "mozilla/WindowsVersion.h"
 #include "nsAppDirectoryServiceDefs.h"
 #include "nsCOMPtr.h"
 #include "nsDirectoryServiceDefs.h"
 #include "nsIFile.h"
 #include "nsIProperties.h"
 #include "nsServiceManagerUtils.h"
 #include "nsString.h"
+#include "nsTHashtable.h"
 #include "sandbox/win/src/sandbox.h"
 #include "sandbox/win/src/security_level.h"
 #include "WinUtils.h"
 
 namespace mozilla
 {
 
 sandbox::BrokerServices *SandboxBroker::sBrokerService = nullptr;
@@ -45,16 +46,19 @@ static UniquePtr<nsString> sContentTempD
 static UniquePtr<nsString> sRoamingAppDataDir;
 static UniquePtr<nsString> sLocalAppDataDir;
 
 static LazyLogModule sSandboxBrokerLog("SandboxBroker");
 
 #define LOG_E(...) MOZ_LOG(sSandboxBrokerLog, LogLevel::Error, (__VA_ARGS__))
 #define LOG_W(...) MOZ_LOG(sSandboxBrokerLog, LogLevel::Warning, (__VA_ARGS__))
 
+// Used to store whether we have accumulated an error combination for this session.
+static UniquePtr<nsTHashtable<nsCStringHashKey>> sLaunchErrors;
+
 /* static */
 void
 SandboxBroker::Initialize(sandbox::BrokerServices* aBrokerServices)
 {
   sBrokerService = aBrokerServices;
 
   wchar_t exePath[MAX_PATH];
   if (!::GetModuleFileNameW(nullptr, exePath, MAX_PATH)) {
@@ -130,16 +134,17 @@ SandboxBroker::SandboxBroker()
   } else {
     mPolicy = nullptr;
   }
 }
 
 bool
 SandboxBroker::LaunchApp(const wchar_t *aPath,
                          const wchar_t *aArguments,
+                         GeckoProcessType aProcessType,
                          const bool aEnableLogging,
                          void **aProcessHandle)
 {
   if (!sBrokerService || !mPolicy) {
     return false;
   }
 
   // Set stdout and stderr, to allow inheritance for logging.
@@ -201,19 +206,35 @@ SandboxBroker::LaunchApp(const wchar_t *
   // Ceate the sandboxed process
   PROCESS_INFORMATION targetInfo = {0};
   sandbox::ResultCode result;
   sandbox::ResultCode last_warning = sandbox::SBOX_ALL_OK;
   DWORD last_error = ERROR_SUCCESS;
   result = sBrokerService->SpawnTarget(aPath, aArguments, mPolicy,
                                        &last_warning, &last_error, &targetInfo);
   if (sandbox::SBOX_ALL_OK != result) {
-    Telemetry::Accumulate(Telemetry::SANDBOX_FAILED_LAUNCH, result);
+    nsAutoCString key;
+    key.AppendASCII(XRE_ChildProcessTypeToString(aProcessType));
+    key.AppendLiteral("/0x");
+    key.AppendInt(static_cast<uint32_t>(last_error), 16);
+
+    if (!sLaunchErrors) {
+      sLaunchErrors = MakeUnique<nsTHashtable<nsCStringHashKey>>();
+      ClearOnShutdown(&sLaunchErrors);
+    }
+
+    // Only accumulate for each combination once per session.
+    if (!sLaunchErrors->Contains(key)) {
+      Telemetry::Accumulate(Telemetry::SANDBOX_FAILED_LAUNCH_KEYED, key, result);
+      sLaunchErrors->PutEntry(key);
+    }
+
     LOG_E("Failed (ResultCode %d) to SpawnTarget with last_error=%d, last_warning=%d",
           result, last_error, last_warning);
+
     return false;
   } else if (sandbox::SBOX_ALL_OK != last_warning) {
     // If there was a warning (but the result was still ok), log it and proceed.
     LOG_W("Warning on SpawnTarget with last_error=%d, last_warning=%d",
           last_error, last_warning);
   }
 
   // The sandboxed process is started in a suspended state, resume it now that
--- a/security/sandbox/win/src/sandboxbroker/sandboxBroker.h
+++ b/security/sandbox/win/src/sandboxbroker/sandboxBroker.h
@@ -6,16 +6,17 @@
 
 #ifndef __SECURITY_SANDBOX_SANDBOXBROKER_H__
 #define __SECURITY_SANDBOX_SANDBOXBROKER_H__
 
 #include <stdint.h>
 #include <windows.h>
 
 #include "base/child_privileges.h"
+#include "nsXULAppAPI.h"
 
 namespace sandbox {
   class BrokerServices;
   class TargetPolicy;
 }
 
 namespace mozilla {
 
@@ -29,16 +30,17 @@ public:
   /**
    * Cache directory paths for use in policy rules. Must be called on main
    * thread.
    */
   static void CacheRulesDirectories();
 
   bool LaunchApp(const wchar_t *aPath,
                  const wchar_t *aArguments,
+                 GeckoProcessType aProcessType,
                  const bool aEnableLogging,
                  void **aProcessHandle);
   virtual ~SandboxBroker();
 
   // Security levels for different types of processes
 #if defined(MOZ_CONTENT_SANDBOX)
   void SetSecurityLevelForContentProcess(int32_t aSandboxLevel,
                                          base::ChildPrivileges aPrivs);
--- a/taskcluster/ci/balrog/kind.yml
+++ b/taskcluster/ci/balrog/kind.yml
@@ -8,13 +8,12 @@ transforms:
    - taskgraph.transforms.name_sanity:transforms
    - taskgraph.transforms.balrog:transforms
    - taskgraph.transforms.task:transforms
 
 kind-dependencies:
   - beetmover
   - beetmover-l10n
   - beetmover-repackage
-  - beetmover-partials
 
 only-for-attributes:
   - nightly
   - signed
deleted file mode 100644
--- a/taskcluster/ci/beetmover-partials/kind.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-loader: taskgraph.loader.single_dep:loader
-
-transforms:
-   - taskgraph.transforms.name_sanity:transforms
-   - taskgraph.transforms.beetmover_repackage_l10n:transforms
-   - taskgraph.transforms.beetmover_repackage:transforms
-   - taskgraph.transforms.beetmover_partials:transforms
-   - taskgraph.transforms.task:transforms
-
-kind-dependencies:
-  - partials-signing
--- a/taskcluster/ci/docker-image/kind.yml
+++ b/taskcluster/ci/docker-image/kind.yml
@@ -21,10 +21,8 @@ jobs:
   valgrind-build:
     symbol: I(vb)
   lint:
     symbol: I(lnt)
   android-gradle-build:
     symbol: I(agb)
   index-task:
     symbol: I(idx)
-  partial-update-generator:
-    symbol: I(pg)
deleted file mode 100644
--- a/taskcluster/ci/partials-signing/kind.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-loader: taskgraph.loader.single_dep:loader
-
-transforms:
-  - taskgraph.transforms.name_sanity:transforms
-  - taskgraph.transforms.partials_signing:transforms
-  - taskgraph.transforms.task:transforms
-
-kind-dependencies:
-  - partials
deleted file mode 100644
--- a/taskcluster/ci/partials/kind.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-loader: taskgraph.loader.single_dep:loader
-
-transforms:
-  - taskgraph.transforms.name_sanity:transforms
-  - taskgraph.transforms.partials:transforms
-  - taskgraph.transforms.task:transforms
-
-kind-dependencies:
-  - repackage-signing
-
-only-for-attributes:
-  - nightly
-
-only-for-build-platforms:
-  - macosx64-nightly/opt
-  - win32-nightly/opt
-  - win64-nightly/opt
-  - linux-nightly/opt
-  - linux64-nightly/opt
--- a/taskcluster/ci/test/tests.yml
+++ b/taskcluster/ci/test/tests.yml
@@ -1142,32 +1142,41 @@ reftest:
             linux64-qr/.*: 1
             windows10-64-asan.*: 3
             default: default
 
 reftest-gpu:
     description: "Reftest GPU run"
     suite: reftest/reftest-gpu
     treeherder-symbol: tc-R(Rg)
+    chunks:
+        by-test-platform:
+            # Remove special casing when debug isn't using BBB anymore
+            windows7-32.*/debug: 1
+            default: 8
     run-on-projects:
         by-test-platform:
             windows10.*: []
             windows8-64.*: []
             default: built-projects
     worker-type:
         by-test-platform:
             windows7-32.*/debug: buildbot-bridge/buildbot-bridge
             default: null
     instance-size: default
     virtualization: virtual-with-gpu
     max-run-time: 3600
     mozharness:
         script: desktop_unittest.py
         no-read-buildbot-config: true
-        chunked: false
+        chunked:
+            # Remove special casing when debug isn't using BBB anymore
+            by-test-platform:
+                windows7-32.*/debug: false
+                default: true
         config:
             by-test-platform:
                 windows.*:
                     - unittests/win_taskcluster_unittest.py
                 macosx.*:
                     - unittests/mac_unittest.py
                 linux.*:
                     - unittests/linux_unittest.py
rename from taskcluster/docker/partial-update-generator/Dockerfile
rename to taskcluster/docker/funsize-update-generator/Dockerfile
--- a/taskcluster/docker/partial-update-generator/Dockerfile
+++ b/taskcluster/docker/funsize-update-generator/Dockerfile
@@ -20,20 +20,17 @@ RUN for i in 1 2 3 4 5; do freshclam --v
 # python-pip installs a lot of dependencies increasing the size of an image
 # drastically. Using easy_install saves us almost 200M.
 RUN easy_install pip
 RUN pip install -r /tmp/requirements.txt
 
 # scripts
 RUN mkdir /home/worker/bin
 COPY scripts/* /home/worker/bin/
-
 COPY runme.sh /runme.sh
 RUN chmod 755 /home/worker/bin/* /runme.sh
 RUN mkdir /home/worker/keys
 COPY *.pubkey /home/worker/keys/
 
 ENV           HOME          /home/worker
 ENV           SHELL         /bin/bash
 ENV           USER          worker
 ENV           LOGNAME       worker
-
-CMD ["/runme.sh"]
rename from taskcluster/docker/partial-update-generator/Makefile
rename to taskcluster/docker/funsize-update-generator/Makefile
rename from taskcluster/docker/partial-update-generator/README
rename to taskcluster/docker/funsize-update-generator/README
rename from taskcluster/docker/partial-update-generator/dep.pubkey
rename to taskcluster/docker/funsize-update-generator/dep.pubkey
rename from taskcluster/docker/partial-update-generator/nightly_sha1.pubkey
rename to taskcluster/docker/funsize-update-generator/nightly_sha1.pubkey
rename from taskcluster/docker/partial-update-generator/nightly_sha384.pubkey
rename to taskcluster/docker/funsize-update-generator/nightly_sha384.pubkey
rename from taskcluster/docker/partial-update-generator/release_sha1.pubkey
rename to taskcluster/docker/funsize-update-generator/release_sha1.pubkey
rename from taskcluster/docker/partial-update-generator/release_sha384.pubkey
rename to taskcluster/docker/funsize-update-generator/release_sha384.pubkey
rename from taskcluster/docker/partial-update-generator/requirements.txt
rename to taskcluster/docker/funsize-update-generator/requirements.txt
rename from taskcluster/docker/partial-update-generator/runme.sh
rename to taskcluster/docker/funsize-update-generator/runme.sh
rename from taskcluster/docker/partial-update-generator/scripts/funsize.py
rename to taskcluster/docker/funsize-update-generator/scripts/funsize.py
--- a/taskcluster/docker/partial-update-generator/scripts/funsize.py
+++ b/taskcluster/docker/funsize-update-generator/scripts/funsize.py
@@ -22,17 +22,16 @@ log = logging.getLogger(__name__)
 ALLOWED_URL_PREFIXES = [
     "http://download.cdn.mozilla.net/pub/mozilla.org/firefox/nightly/",
     "http://download.cdn.mozilla.net/pub/firefox/nightly/",
     "https://mozilla-nightly-updates.s3.amazonaws.com",
     "https://queue.taskcluster.net/",
     "http://ftp.mozilla.org/",
     "http://download.mozilla.org/",
     "https://archive.mozilla.org/",
-    "https://queue.taskcluster.net/v1/task/",
 ]
 
 DEFAULT_FILENAME_TEMPLATE = "{appName}-{branch}-{version}-{platform}-" \
                             "{locale}-{from_buildid}-{to_buildid}.partial.mar"
 
 
 def verify_signature(mar, certs):
     log.info("Checking %s signature", mar)
@@ -282,21 +281,17 @@ def main():
                       "previousBuildNumber", "toVersion",
                       "toBuildNumber"):
             if field in e:
                 mar_data[field] = e[field]
         mar_data.update(complete_mars)
         # if branch not set explicitly use repo-name
         mar_data["branch"] = e.get("branch",
                                    mar_data["repo"].rstrip("/").split("/")[-1])
-        if 'dest_mar' in e:
-            mar_name = e['dest_mar']
-        else:
-            # default to formatted name if not specified
-            mar_name = args.filename_template.format(**mar_data)
+        mar_name = args.filename_template.format(**mar_data)
         mar_data["mar"] = mar_name
         dest_mar = os.path.join(work_env.workdir, mar_name)
         # TODO: download these once
         work_env.download_buildsystem_bits(repo=mar_data["repo"],
                                            revision=mar_data["revision"])
         generate_partial(work_env, from_path, path, dest_mar,
                          mar_data["ACCEPTED_MAR_CHANNEL_IDS"],
                          mar_data["version"],
rename from taskcluster/docker/partial-update-generator/scripts/mbsdiff_hook.sh
rename to taskcluster/docker/funsize-update-generator/scripts/mbsdiff_hook.sh
--- a/taskcluster/docs/kinds.rst
+++ b/taskcluster/docs/kinds.rst
@@ -188,24 +188,16 @@ the language in the final artifact names
 
 beetmover-repackage
 -------------------
 
 Beetmover-repackage is beetmover but for tasks that need an intermediate step
 between signing and packaging, such as OSX. For more details see the definitions
 of the Beetmover kind above and the repackage kind below.
 
-beetmover-partials
-------------------
-
-Beetmover-partials is beetmover but for the partial updates that have been
-generated. Not every build produces partial updates, and so these are kept
-separate from the regular beetmover jobs to avoid situations where the completes
-are not uploaded.
-
 checksums-signing
 -----------------
 Checksums-signing take as input the checksums file generated by beetmover tasks
 and sign it via the signing scriptworkers. Returns the same file signed and
 additional detached signature.
 
 beetmover-checksums
 -------------------
@@ -236,18 +228,8 @@ repackage-l10n
 --------------
 Repackage-L10n is a ```Repackage``` task split up to be suitable for use after l10n repacks.
 
 
 repackage-signing
 -----------------
 Repackage-signing take the repackaged installers (windows) and update packaging (with
 the signed internal bits) and signs them.
-
-partials
---------
-Partials takes the complete.mar files produced in previous tasks and generates partial
-updates between previous nightly releases and the new one. Requires a release_history
-in the parameters. See ``mach release-history`` if doing this manually.
-
-partials-signing
-----------------
-Partials-signing takes the partial updates produced in Partials and signs them.
--- a/taskcluster/docs/parameters.rst
+++ b/taskcluster/docs/parameters.rst
@@ -102,22 +102,16 @@ syntax or reading a project-specific con
     one of the functions in ``taskcluster/taskgraph/target_tasks.py``.
 
 ``optimize_target_tasks``
     If true, then target tasks are eligible for optimization.
 
 ``include_nightly``
     If true, then nightly tasks are eligible for optimization.
 
-``release_history``
-   History of recent releases by platform and locale, used when generating
-   partial updates for nightly releases.
-   Suitable contents can be generated with ``mach release-history``,
-   which will print to the console by default.
-
 Morphed Set
 -----------
 
 ``morph_templates``
     Dict of JSON-e templates to apply to each task, keyed by template name.
     Values are extra context that will be available to the template under the
     ``input.<template>`` key. Available templates live in
     ``taskcluster/taskgraph/templates``. Enabled on try only.
--- a/taskcluster/mach_commands.py
+++ b/taskcluster/mach_commands.py
@@ -499,28 +499,8 @@ class TaskClusterImagesProvider(object):
         try:
             if context_only is None:
                 build_image(image_name)
             else:
                 build_context(image_name, context_only)
         except Exception:
             traceback.print_exc()
             sys.exit(1)
-
-
-@CommandProvider
-class TaskClusterPartialsData(object):
-    @Command('release-history', category="ci",
-             description="Query balrog for release history used by enable partials generation")
-    @CommandArgument('-b', '--branch',
-                     help="The gecko project branch used in balrog, such as "
-                          "mozilla-central, release, date")
-    @CommandArgument('--product', default='Firefox',
-                     help="The product identifier, such as 'Firefox'")
-    def generate_partials_builds(self, product, branch):
-        from taskgraph.util.partials import populate_release_history
-        try:
-            import yaml
-            release_history = {'release_history': populate_release_history(product, branch)}
-            print(yaml.safe_dump(release_history, allow_unicode=True, default_flow_style=False))
-        except Exception:
-            traceback.print_exc()
-            sys.exit(1)
--- a/taskcluster/taskgraph/decision.py
+++ b/taskcluster/taskgraph/decision.py
@@ -13,17 +13,16 @@ import re
 import time
 import yaml
 
 from .generator import TaskGraphGenerator
 from .create import create_tasks
 from .parameters import Parameters
 from .taskgraph import TaskGraph
 from .actions import render_actions_json
-from taskgraph.util.partials import populate_release_history
 from . import GECKO
 
 from taskgraph.util.templates import Templates
 from taskgraph.util.time import (
     json_time_from_now,
     current_json_time,
 )
 
@@ -103,17 +102,16 @@ def taskgraph_decision(options):
      * processing decision task command-line options into parameters
      * running task-graph generation exactly the same way the other `mach
        taskgraph` commands do
      * generating a set of artifacts to memorialize the graph
      * calling TaskCluster APIs to create the graph
     """
 
     parameters = get_decision_parameters(options)
-
     # create a TaskGraphGenerator instance
     tgg = TaskGraphGenerator(
         root_dir=options['root'],
         parameters=parameters)
 
     # write out the parameters used to generate this graph
     write_artifact('parameters.yml', dict(**parameters))
 
@@ -199,23 +197,16 @@ def get_decision_parameters(options):
             task_config = json.load(fh)
         parameters['morph_templates'] = task_config.get('templates', {})
         parameters['target_task_labels'] = task_config.get('tasks')
 
     # `target_tasks_method` has higher precedence than `project` parameters
     if options.get('target_tasks_method'):
         parameters['target_tasks_method'] = options['target_tasks_method']
 
-    # If the target method is nightly, we should build partials. This means
-    # knowing what has been released previously.
-    # An empty release_history is fine, it just means no partials will be built
-    parameters.setdefault('release_history', dict())
-    if 'nightly' in parameters.get('target_tasks_method', ''):
-        parameters['release_history'] = populate_release_history('Firefox', project)
-
     return Parameters(parameters)
 
 
 def write_artifact(filename, data):
     logger.info('writing artifact file `{}`'.format(filename))
     if not os.path.isdir(ARTIFACTS_DIR):
         os.mkdir(ARTIFACTS_DIR)
     path = os.path.join(ARTIFACTS_DIR, filename)
--- a/taskcluster/taskgraph/parameters.py
+++ b/taskcluster/taskgraph/parameters.py
@@ -23,17 +23,16 @@ PARAMETER_NAMES = set([
     'message',
     'morph_templates',
     'moz_build_date',
     'optimize_target_tasks',
     'owner',
     'project',
     'pushdate',
     'pushlog_id',
-    'release_history',
     'target_task_labels',
     'target_tasks_method',
 ])
 
 TRY_ONLY_PARAMETERS = set([
     'morph_templates',
     'target_task_labels',
 ])
--- a/taskcluster/taskgraph/transforms/balrog.py
+++ b/taskcluster/taskgraph/transforms/balrog.py
@@ -51,38 +51,31 @@ def validate(config, jobs):
 
 
 @transforms.add
 def make_task_description(config, jobs):
     for job in jobs:
         dep_job = job['dependent-task']
 
         treeherder = job.get('treeherder', {})
-        treeherder.setdefault('symbol', 'c-Up(N)')
+        treeherder.setdefault('symbol', 'tc-Up(N)')
         dep_th_platform = dep_job.task.get('extra', {}).get(
             'treeherder', {}).get('machine', {}).get('platform', '')
         treeherder.setdefault('platform',
                               "{}/opt".format(dep_th_platform))
         treeherder.setdefault('tier', 1)
         treeherder.setdefault('kind', 'build')
 
         attributes = copy_attributes_from_dependent_job(dep_job)
 
-        treeherder_job_symbol = dep_job.attributes.get('locale', 'N')
-
         if dep_job.attributes.get('locale'):
-            treeherder['symbol'] = 'c-Up({})'.format(treeherder_job_symbol)
+            treeherder['symbol'] = 'tc-Up({})'.format(dep_job.attributes.get('locale'))
             attributes['locale'] = dep_job.attributes.get('locale')
 
         label = job['label']
-        if 'partials' in dep_job.kind:
-            label = "partials-{}".format(label)
-            treeherder['symbol'] = 'cp-Up({})'.format(treeherder_job_symbol)
-            treeherder['tier'] = 3  # remove once proven stable
-
         description = (
             "Balrog submission for locale '{locale}' for build '"
             "{build_platform}/{build_type}'".format(
                 locale=attributes.get('locale', 'en-US'),
                 build_platform=attributes.get('build_platform'),
                 build_type=attributes.get('build_type')
             )
         )
@@ -96,16 +89,17 @@ def make_task_description(config, jobs):
         }]
 
         server_scope = get_balrog_server_scope(config)
         channel_scopes = get_balrog_channel_scopes(config)
 
         task = {
             'label': label,
             'description': description,
+            # do we have to define worker type somewhere?
             'worker-type': 'scriptworker-prov-v1/balrogworker-v1',
             'worker': {
                 'implementation': 'balrog',
                 'upstream-artifacts': upstream_artifacts,
             },
             'scopes': [server_scope] + channel_scopes,
             'dependencies': {'beetmover': dep_job.label},
             'attributes': attributes,
deleted file mode 100644
--- a/taskcluster/taskgraph/transforms/beetmover_partials.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-"""
-Add partial update artifacts to a beetmover task.
-"""
-
-from __future__ import absolute_import, print_function, unicode_literals
-
-from taskgraph.transforms.base import TransformSequence
-from taskgraph.util.partials import (get_balrog_platform_name,
-                                     get_partials_artifacts,
-                                     get_partials_artifact_map)
-
-import logging
-logger = logging.getLogger(__name__)
-
-transforms = TransformSequence()
-
-
-def generate_upstream_artifacts(release_history, platform, locale=None):
-    if not locale or locale == 'en-US':
-        artifact_prefix = 'public/build'
-    else:
-        artifact_prefix = 'public/build/{}'.format(locale)
-
-    artifacts = get_partials_artifacts(release_history, platform, locale)
-
-    upstream_artifacts = [{
-        'taskId': {'task-reference': '<partials-signing>'},
-        'taskType': 'signing',
-        'paths': ["{}/{}".format(artifact_prefix, p)
-                  for p in artifacts],
-        'locale': locale or 'en-US',
-    }]
-
-    return upstream_artifacts
-
-
-@transforms.add
-def make_partials_artifacts(config, jobs):
-    for job in jobs:
-        locale = job["attributes"].get("locale")
-        if locale:
-            job['treeherder']['symbol'] = 'pBM({})'.format(locale)
-        else:
-            locale = 'en-US'
-            job['treeherder']['symbol'] = 'pBM(N)'
-
-        # Remove when proved reliable
-        job['treeherder']['tier'] = 3
-
-        platform = job["attributes"]["build_platform"]
-
-        platform = get_balrog_platform_name(platform)
-        upstream_artifacts = generate_upstream_artifacts(
-            config.params.get('release_history'), platform, locale
-        )
-
-        job['worker']['upstream-artifacts'].extend(upstream_artifacts)
-
-        extra = list()
-
-        artifact_map = get_partials_artifact_map(
-            config.params.get('release_history'), platform, locale)
-        for artifact in artifact_map:
-            extra.append({
-                'locale': locale,
-                'artifact_name': artifact,
-                'buildid': artifact_map[artifact],
-                'platform': platform,
-            })
-
-        job.setdefault('extra', {})
-        job['extra']['partials'] = extra
-
-        yield job
--- a/taskcluster/taskgraph/transforms/beetmover_repackage.py
+++ b/taskcluster/taskgraph/transforms/beetmover_repackage.py
@@ -210,24 +210,16 @@ def make_task_description(config, jobs):
 
         repackage_name = "repackage"
         # repackage-l10n actually uses the repackage depname here
         repackage_dependencies = {"repackage":
                                   dep_job.dependencies[repackage_name]
                                   }
         dependencies.update(repackage_dependencies)
 
-        # If this isn't a direct dependency, it won't be in there.
-        if 'repackage-signing' not in dependencies:
-            repackage_signing_name = "repackage-signing"
-            repackage_signing_deps = {"repackage-signing":
-                                      dep_job.dependencies[repackage_signing_name]
-                                      }
-            dependencies.update(repackage_signing_deps)
-
         attributes = copy_attributes_from_dependent_job(dep_job)
         if job.get('locale'):
             attributes['locale'] = job['locale']
 
         bucket_scope = get_beetmover_bucket_scope(config)
         action_scope = get_beetmover_action_scope(config)
 
         task = {
@@ -276,16 +268,17 @@ def generate_upstream_artifacts(build_ta
 
     for ref, tasktype, mapping in zip(task_refs, tasktypes, mapping):
         plarform_was_previously_matched_by_regex = None
         for platform_regex, paths in mapping.iteritems():
             if platform_regex.match(platform) is not None:
                 _check_platform_matched_only_one_regex(
                     tasktype, platform, plarform_was_previously_matched_by_regex, platform_regex
                 )
+
                 upstream_artifacts.append({
                     "taskId": {"task-reference": ref},
                     "taskType": tasktype,
                     "paths": ["{}/{}".format(artifact_prefix, path) for path in paths],
                     "locale": locale or "en-US",
                 })
                 plarform_was_previously_matched_by_regex = platform_regex
 
@@ -301,22 +294,18 @@ least 2 regular expressions. First match
 "{second_matched}"'.format(
             task_type=task_type, platform=platform,
             first_matched=plarform_was_previously_matched_by_regex.pattern,
             second_matched=platform_regex.pattern
         ))
 
 
 def is_valid_beetmover_job(job):
-    # beetmover after partials-signing should have six dependencies.
-    # windows builds w/o partials don't have docker-image, so fewer
-    # dependencies
-    if 'partials-signing' in job['dependencies'].keys():
-        expected_dep_count = 6
-    elif any(b in job['attributes']['build_platform'] for b in _WINDOWS_BUILD_PLATFORMS):
+    # windows builds don't have docker-image, so fewer dependencies
+    if any(b in job['attributes']['build_platform'] for b in _WINDOWS_BUILD_PLATFORMS):
         expected_dep_count = 4
     else:
         expected_dep_count = 5
 
     return (len(job["dependencies"]) == expected_dep_count and
             any(['repackage' in j for j in job['dependencies']]))
 
 
@@ -327,17 +316,16 @@ def make_task_worker(config, jobs):
             raise NotImplementedError("Beetmover_repackage must have five dependencies.")
 
         locale = job["attributes"].get("locale")
         platform = job["attributes"]["build_platform"]
         build_task = None
         build_signing_task = None
         repackage_task = None
         repackage_signing_task = None
-
         for dependency in job["dependencies"].keys():
             if 'repackage-signing' in dependency:
                 repackage_signing_task = dependency
             elif 'repackage' in dependency:
                 repackage_task = dependency
             elif 'signing' in dependency:
                 # catches build-signing and nightly-l10n-signing
                 build_signing_task = dependency
deleted file mode 100644
--- a/taskcluster/taskgraph/transforms/partials.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-"""
-Transform the partials task into an actual task description.
-"""
-from __future__ import absolute_import, print_function, unicode_literals
-
-from taskgraph.transforms.base import TransformSequence
-from taskgraph.util.attributes import copy_attributes_from_dependent_job
-from taskgraph.util.partials import get_balrog_platform_name, get_builds
-from taskgraph.util.taskcluster import get_taskcluster_artifact_prefix
-
-import logging
-logger = logging.getLogger(__name__)
-
-transforms = TransformSequence()
-
-
-def _generate_task_output_files(filenames, locale=None):
-    locale_output_path = '{}/'.format(locale) if locale else ''
-
-    data = list()
-    for filename in filenames:
-        data.append({
-            'type': 'file',
-            'path': '/home/worker/artifacts/{}'.format(filename),
-            'name': 'public/build/{}{}'.format(locale_output_path, filename)
-        })
-    data.append({
-        'type': 'file',
-        'path': '/home/worker/artifacts/manifest.json',
-        'name': 'public/build/{}manifest.json'.format(locale_output_path)
-    })
-    return data
-
-
-@transforms.add
-def make_task_description(config, jobs):
-    # If no balrog release history, then don't generate partials
-    if not config.params.get('release_history'):
-        return
-    for job in jobs:
-        dep_job = job['dependent-task']
-
-        treeherder = job.get('treeherder', {})
-        treeherder.setdefault('symbol', 'p(N)')
-
-        label = job.get('label', "partials-{}".format(dep_job.label))
-        dep_th_platform = dep_job.task.get('extra', {}).get(
-            'treeherder', {}).get('machine', {}).get('platform', '')
-
-        treeherder.setdefault('platform',
-                              "{}/opt".format(dep_th_platform))
-        treeherder.setdefault('kind', 'build')
-        treeherder.setdefault('tier', 3)
-
-        dependent_kind = str(dep_job.kind)
-        dependencies = {dependent_kind: dep_job.label}
-        signing_dependencies = dep_job.dependencies
-        # This is so we get the build task etc in our dependencies to
-        # have better beetmover support.
-        dependencies.update(signing_dependencies)
-
-        attributes = copy_attributes_from_dependent_job(dep_job)
-        locale = dep_job.attributes.get('locale')
-        if locale:
-            attributes['locale'] = locale
-            treeherder['symbol'] = "p({})".format(locale)
-
-        build_locale = locale or 'en-US'
-
-        builds = get_builds(config.params['release_history'], dep_th_platform,
-                            build_locale)
-
-        # If the list is empty there's no available history for this platform
-        # and locale combination, so we can't build any partials.
-        if not builds:
-            continue
-
-        signing_task = None
-        for dependency in dependencies.keys():
-            if 'repackage-signing' in dependency:
-                signing_task = dependency
-        signing_task_ref = '<{}>'.format(signing_task)
-
-        extra = {'funsize': {'partials': list()}}
-        update_number = 1
-        artifact_prefix = get_taskcluster_artifact_prefix(signing_task_ref, locale=locale)
-        artifact_path = "{}{}".format(artifact_prefix, 'target.complete.mar')
-        for build in builds:
-            extra['funsize']['partials'].append({
-                'locale': build_locale,
-                'from_mar': builds[build]['mar_url'],
-                'to_mar': {'task-reference': artifact_path},
-                'platform': get_balrog_platform_name(dep_th_platform),
-                'branch': config.params['project'],
-                'update_number': update_number,
-                'dest_mar': build,
-            })
-            update_number += 1
-
-        cot = extra.setdefault('chainOfTrust', {})
-        cot.setdefault('inputs', {})['docker-image'] = {"task-reference": "<docker-image>"}
-
-        worker = {
-            'artifacts': _generate_task_output_files(builds.keys(), locale),
-            'implementation': 'docker-worker',
-            'docker-image': {'in-tree': 'partial-update-generator'},
-            'os': 'linux',
-            'max-run-time': 3600,
-            'chain-of-trust': True,
-            'env': {
-                'SHA1_SIGNING_CERT': 'nightly_sha1',
-                'SHA384_SIGNING_CERT': 'nightly_sha384'
-            }
-        }
-
-        level = config.params['level']
-
-        task = {
-            'label': label,
-            'description': "{} Partials".format(
-                dep_job.task["metadata"]["description"]),
-            'worker-type': 'aws-provisioner-v1/gecko-%s-b-linux' % level,
-            'dependencies': dependencies,
-            'attributes': attributes,
-            'run-on-projects': dep_job.attributes.get('run_on_projects'),
-            'treeherder': treeherder,
-            'extra': extra,
-            'worker': worker,
-        }
-
-        yield task
deleted file mode 100644
--- a/taskcluster/taskgraph/transforms/partials_signing.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-"""
-Transform the partials task into an actual task description.
-"""
-from __future__ import absolute_import, print_function, unicode_literals
-
-from taskgraph.transforms.base import TransformSequence
-from taskgraph.util.attributes import copy_attributes_from_dependent_job
-from taskgraph.util.scriptworker import get_signing_cert_scope_per_platform
-from taskgraph.util.partials import get_balrog_platform_name, get_partials_artifacts
-
-import logging
-logger = logging.getLogger(__name__)
-
-transforms = TransformSequence()
-
-
-def generate_upstream_artifacts(release_history, platform, locale=None):
-    artifact_prefix = 'public/build'
-    if locale:
-        artifact_prefix = 'public/build/{}'.format(locale)
-    else:
-        locale = 'en-US'
-
-    artifacts = get_partials_artifacts(release_history, platform, locale)
-
-    upstream_artifacts = [{
-        "taskId": {"task-reference": '<partials>'},
-        "taskType": 'partials',
-        "paths": ["{}/{}".format(artifact_prefix, p)
-                  for p in artifacts],
-        "formats": ["mar_sha384"],
-    }]
-
-    return upstream_artifacts
-
-
-@transforms.add
-def make_task_description(config, jobs):
-    for job in jobs:
-        dep_job = job['dependent-task']
-
-        treeherder = job.get('treeherder', {})
-        treeherder.setdefault('symbol', 'ps(N)')
-
-        dep_th_platform = dep_job.task.get('extra', {}).get(
-            'treeherder', {}).get('machine', {}).get('platform', '')
-        label = job.get('label', "partials-signing-{}".format(dep_job.label))
-        dep_th_platform = dep_job.task.get('extra', {}).get(
-            'treeherder', {}).get('machine', {}).get('platform', '')
-        treeherder.setdefault('platform',
-                              "{}/opt".format(dep_th_platform))
-        treeherder.setdefault('kind', 'build')
-        treeherder.setdefault('tier', 3)
-
-        dependent_kind = str(dep_job.kind)
-        dependencies = {dependent_kind: dep_job.label}
-        signing_dependencies = dep_job.dependencies
-        # This is so we get the build task etc in our dependencies to
-        # have better beetmover support.
-        dependencies.update(signing_dependencies)
-
-        attributes = copy_attributes_from_dependent_job(dep_job)
-        locale = dep_job.attributes.get('locale')
-        if locale:
-            attributes['locale'] = locale
-            treeherder['symbol'] = 'ps({})'.format(locale)
-
-        balrog_platform = get_balrog_platform_name(dep_th_platform)
-        upstream_artifacts = generate_upstream_artifacts(config.params['release_history'],
-                                                         balrog_platform, locale)
-
-        build_platform = dep_job.attributes.get('build_platform')
-        is_nightly = dep_job.attributes.get('nightly')
-        signing_cert_scope = get_signing_cert_scope_per_platform(
-            build_platform, is_nightly, config
-        )
-        scopes = [signing_cert_scope, 'project:releng:signing:format:mar_sha384']
-        task = {
-            'label': label,
-            'description': "{} Partials".format(
-                dep_job.task["metadata"]["description"]),
-            'worker-type': 'scriptworker-prov-v1/signing-linux-v1',
-            'worker': {'implementation': 'scriptworker-signing',
-                           'upstream-artifacts': upstream_artifacts,
-                           'max-run-time': 3600},
-            'dependencies': dependencies,
-            'attributes': attributes,
-            'scopes': scopes,
-            'run-on-projects': dep_job.attributes.get('run_on_projects'),
-            'treeherder': treeherder,
-        }
-
-        yield task
--- a/taskcluster/taskgraph/transforms/repackage.py
+++ b/taskcluster/taskgraph/transforms/repackage.py
@@ -5,20 +5,22 @@
 Transform the repackage task into an actual task description.
 """
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 from taskgraph.transforms.base import TransformSequence
 from taskgraph.util.attributes import copy_attributes_from_dependent_job
 from taskgraph.util.schema import validate_schema, Schema
-from taskgraph.util.taskcluster import get_taskcluster_artifact_prefix
 from taskgraph.transforms.task import task_description_schema
 from voluptuous import Any, Required, Optional
 
+_TC_ARTIFACT_LOCATION = \
+        'https://queue.taskcluster.net/v1/task/{task_id}/artifacts/public/build/{postfix}'
+
 transforms = TransformSequence()
 
 # Voluptuous uses marker objects as dictionary *keys*, but they are not
 # comparable, so we cast all of the keys back to regular strings
 task_description_schema = {str(k): v for k, v in task_description_schema.schema.iteritems()}
 
 # shortcut for a string where task references are allowed
 taskref_or_string = Any(
@@ -196,18 +198,18 @@ def _generate_task_mozharness_config(bui
             return ['repackage/linux{}_signed.py'.format(bits)]
         elif build_platform.startswith('win'):
             return ['repackage/win{}_signed.py'.format(bits)]
 
     raise NotImplementedError('Unsupported build_platform: "{}"'.format(build_platform))
 
 
 def _generate_task_env(build_platform, build_task_ref, signing_task_ref, locale=None):
-    mar_prefix = get_taskcluster_artifact_prefix(build_task_ref, postfix='host/bin/', locale=None)
-    signed_prefix = get_taskcluster_artifact_prefix(signing_task_ref, locale=locale)
+    mar_prefix = _generate_taskcluster_prefix(build_task_ref, postfix='host/bin/', locale=None)
+    signed_prefix = _generate_taskcluster_prefix(signing_task_ref, locale=locale)
 
     if build_platform.startswith('linux') or build_platform.startswith('macosx'):
         tarball_extension = 'bz2' if build_platform.startswith('linux') else 'gz'
         return {
             'SIGNED_INPUT': {'task-reference': '{}target.tar.{}'.format(
                 signed_prefix, tarball_extension
             )},
             'UNSIGNED_MAR': {'task-reference': '{}mar'.format(mar_prefix)},
@@ -224,16 +226,23 @@ def _generate_task_env(build_platform, b
             task_env['SIGNED_SETUP_STUB'] = {
                 'task-reference': '{}setup-stub.exe'.format(signed_prefix),
             }
         return task_env
 
     raise NotImplementedError('Unsupported build_platform: "{}"'.format(build_platform))
 
 
+def _generate_taskcluster_prefix(task_id, postfix='', locale=None):
+    if locale:
+        postfix = '{}/{}'.format(locale, postfix)
+
+    return _TC_ARTIFACT_LOCATION.format(task_id=task_id, postfix=postfix)
+
+
 def _generate_task_output_files(build_platform, locale=None):
     locale_output_path = '{}/'.format(locale) if locale else ''
 
     if build_platform.startswith('linux') or build_platform.startswith('macosx'):
         output_files = [{
             'type': 'file',
             'path': '/builds/worker/workspace/build/artifacts/{}target.complete.mar'
                     .format(locale_output_path),
--- a/taskcluster/taskgraph/transforms/repackage_signing.py
+++ b/taskcluster/taskgraph/transforms/repackage_signing.py
@@ -124,14 +124,28 @@ def make_repackage_signing_description(c
                        'max-run-time': 3600},
             'scopes': scopes,
             'dependencies': dependencies,
             'attributes': attributes,
             'run-on-projects': dep_job.attributes.get('run_on_projects'),
             'treeherder': treeherder,
         }
 
+        funsize_platforms = [
+            'linux-nightly',
+            'linux64-nightly',
+            'macosx64-nightly',
+            'win32-nightly',
+            'win64-nightly'
+        ]
+        if build_platform in funsize_platforms and is_nightly:
+            route_template = "project.releng.funsize.level-{level}.{project}"
+            task['routes'] = [
+                route_template.format(project=config.params['project'],
+                                      level=config.params['level'])
+            ]
+
         yield task
 
 
 def _generate_worker_type(signing_cert_scope):
     worker_type = 'depsigning' if 'dep-signing' in signing_cert_scope else 'signing-linux-v1'
     return 'scriptworker-prov-v1/{}'.format(worker_type)
--- a/taskcluster/taskgraph/transforms/task.py
+++ b/taskcluster/taskgraph/transforms/task.py
@@ -494,33 +494,29 @@ GROUP_NAMES = {
     'tc-W': 'Web platform tests executed by TaskCluster',
     'tc-W-e10s': 'Web platform tests executed by TaskCluster with e10s',
     'tc-X': 'Xpcshell tests executed by TaskCluster',
     'tc-X-e10s': 'Xpcshell tests executed by TaskCluster with e10s',
     'tc-L10n': 'Localised Repacks executed by Taskcluster',
     'tc-L10n-Rpk': 'Localized Repackaged Repacks executed by Taskcluster',
     'tc-BM-L10n': 'Beetmover for locales executed by Taskcluster',
     'tc-BMR-L10n': 'Beetmover repackages for locales executed by Taskcluster',
-    'c-Up': 'Balrog submission of complete updates',
+    'tc-Up': 'Balrog submission of updates, executed by Taskcluster',
     'tc-cs': 'Checksum signing executed by Taskcluster',
     'tc-rs': 'Repackage signing executed by Taskcluster',
     'tc-BMcs': 'Beetmover checksums, executed by Taskcluster',
     'Aries': 'Aries Device Image',
     'Nexus 5-L': 'Nexus 5-L Device Image',
     'I': 'Docker Image Builds',
     'TL': 'Toolchain builds for Linux 64-bits',
     'TM': 'Toolchain builds for OSX',
     'TW32': 'Toolchain builds for Windows 32-bits',
     'TW64': 'Toolchain builds for Windows 64-bits',
     'SM-tc': 'Spidermonkey builds',
     'pub': 'APK publishing',
-    'p': 'Partial generation',
-    'ps': 'Partials signing',
-    'pBM': 'Beetmover for partials',
-    'cp-Up': 'Balrog submission of updates, completes and partials',
 }
 UNKNOWN_GROUP_NAME = "Treeherder group {} has no name; add it to " + __file__
 
 V2_ROUTE_TEMPLATES = [
     "index.gecko.v2.{project}.latest.{product}.{job-name}",
     "index.gecko.v2.{project}.pushdate.{build_date_long}.{product}.{job-name}",
     "index.gecko.v2.{project}.pushlog-id.{pushlog_id}.{product}.{job-name}",
     "index.gecko.v2.{project}.revision.{head_rev}.{product}.{job-name}",
deleted file mode 100644
--- a/taskcluster/taskgraph/util/partials.py
+++ /dev/null
@@ -1,193 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-from __future__ import absolute_import, print_function, unicode_literals
-
-import requests
-import redo
-
-import logging
-logger = logging.getLogger(__name__)
-
-BALROG_API_ROOT = 'https://aus5.mozilla.org/api/v1'
-
-PLATFORM_RENAMES = {
-    'windows2012-32': 'win32',
-    'windows2012-64': 'win64',
-    'osx-cross': 'macosx64',
-}
-
-BALROG_PLATFORM_MAP = {
-    "linux": [
-        "Linux_x86-gcc3"
-    ],
-    "linux64": [
-        "Linux_x86_64-gcc3"
-    ],
-    "macosx64": [
-        "Darwin_x86_64-gcc3-u-i386-x86_64",
-        "Darwin_x86-gcc3-u-i386-x86_64",
-        "Darwin_x86-gcc3",
-        "Darwin_x86_64-gcc3"
-    ],
-    "win32": [
-        "WINNT_x86-msvc",
-        "WINNT_x86-msvc-x86",
-        "WINNT_x86-msvc-x64"
-    ],
-    "win64": [
-        "WINNT_x86_64-msvc",
-        "WINNT_x86_64-msvc-x64"
-    ]
-}
-
-
-def get_balrog_platform_name(platform):
-    """Convert build platform names into balrog platform names"""
-    if '-nightly' in platform:
-        platform = platform.replace('-nightly', '')
-    if '-devedition' in platform:
-        platform = platform.replace('-devedition', '')
-    return PLATFORM_RENAMES.get(platform, platform)
-
-
-def _sanitize_platform(platform):
-    platform = get_balrog_platform_name(platform)
-    if platform not in BALROG_PLATFORM_MAP:
-        return platform
-    return BALROG_PLATFORM_MAP[platform][0]
-
-
-def get_builds(release_history, platform, locale):
-    """Examine cached balrog release history and return the list of
-    builds we need to generate diffs from"""
-    platform = _sanitize_platform(platform)
-    return release_history.get(platform, {}).get(locale, {})
-
-
-def get_partials_artifacts(release_history, platform, locale):
-    platform = _sanitize_platform(platform)
-    return release_history.get(platform, {}).get(locale, {}).keys()
-
-
-def get_partials_artifact_map(release_history, platform, locale):
-    platform = _sanitize_platform(platform)
-    return {k: release_history[platform][locale][k]['buildid']
-            for k in release_history.get(platform, {}).get(locale, {})}
-
-
-def _retry_on_http_errors(url, verify, params, errors):
-    if params:
-        params_str = "&".join("=".join([k, str(v)])
-                              for k, v in params.iteritems())
-    else:
-        params_str = ''
-    logger.info("Connecting to %s?%s", url, params_str)
-    for _ in redo.retrier(sleeptime=5, max_sleeptime=30, attempts=10):
-        try:
-            req = requests.get(url, verify=verify, params=params)
-            req.raise_for_status()
-            return req
-        except requests.HTTPError as e:
-            if e.response.status_code in errors:
-                logger.exception("Got HTTP %s trying to reach %s",
-                                 e.response.status_code, url)
-            else:
-                raise
-    else:
-        raise
-
-
-def get_sorted_releases(product, branch):
-    """Returns a list of release names from Balrog.
-    :param product: product name, AKA appName
-    :param branch: branch name, e.g. mozilla-central
-    :return: a sorted list of release names, most recent first.
-    """
-    url = "{}/releases".format(BALROG_API_ROOT)
-    params = {
-        "product": product,
-        # Adding -nightly-2 (2 stands for the beginning of build ID
-        # based on date) should filter out release and latest blobs.
-        # This should be changed to -nightly-3 in 3000 ;)
-        "name_prefix": "{}-{}-nightly-2".format(product, branch),
-        "names_only": True
-    }
-    req = _retry_on_http_errors(
-        url=url, verify=True, params=params,
-        errors=[500])
-    releases = req.json()["names"]
-    releases = sorted(releases, reverse=True)
-    return releases
-
-
-def get_release_builds(release):
-    url = "{}/releases/{}".format(BALROG_API_ROOT, release)
-    req = _retry_on_http_errors(
-        url=url, verify=True, params=None,
-        errors=[500])
-    return req.json()
-
-
-def populate_release_history(product, branch, maxbuilds=4, maxsearch=10):
-    """Find relevant releases in Balrog
-    Not all releases have all platforms and locales, due
-    to Taskcluster migration.
-
-        Args:
-            product (str): capitalized product name, AKA appName, e.g. Firefox
-            branch (str): branch name (mozilla-central)
-            maxbuilds (int): Maximum number of historical releases to populate
-            maxsearch(int): Traverse at most this many releases, to avoid
-                working through the entire history.
-        Returns:
-            json object based on data from balrog api
-
-            results = {
-                'platform1': {
-                    'locale1': {
-                        'buildid1': mar_url,
-                        'buildid2': mar_url,
-                        'buildid3': mar_url,
-                    },
-                    'locale2': {
-                        'target.partial-1.mar': ('buildid1': 'mar_url'),
-                    }
-                },
-                'platform2': {
-                }
-            }
-        """
-    last_releases = get_sorted_releases(product, branch)
-
-    partial_mar_tmpl = 'target.partial-{}.mar'
-
-    builds = dict()
-    for release in last_releases[:maxsearch]:
-        # maxbuilds in all categories, don't make any more queries
-        full = len(builds) > 0 and all(
-            len(builds[platform][locale]) >= maxbuilds
-            for platform in builds for locale in builds[platform])
-        if full:
-            break
-        history = get_release_builds(release)
-
-        for platform in history['platforms']:
-            if 'alias' in history['platforms'][platform]:
-                continue
-            if platform not in builds:
-                builds[platform] = dict()
-            for locale in history['platforms'][platform]['locales']:
-                if locale not in builds[platform]:
-                    builds[platform][locale] = dict()
-                if len(builds[platform][locale]) >= maxbuilds:
-                    continue
-                buildid = history['platforms'][platform]['locales'][locale]['buildID']
-                url = history['platforms'][platform]['locales'][locale]['completes'][0]['fileUrl']
-                nextkey = len(builds[platform][locale]) + 1
-                builds[platform][locale][partial_mar_tmpl.format(nextkey)] = {
-                    'buildid': buildid,
-                    'mar_url': url,
-                }
-    return builds
--- a/taskcluster/taskgraph/util/taskcluster.py
+++ b/taskcluster/taskgraph/util/taskcluster.py
@@ -8,19 +8,16 @@ from __future__ import absolute_import, 
 
 import functools
 import yaml
 import requests
 from mozbuild.util import memoize
 from requests.packages.urllib3.util.retry import Retry
 from requests.adapters import HTTPAdapter
 
-_TC_ARTIFACT_LOCATION = \
-        'https://queue.taskcluster.net/v1/task/{task_id}/artifacts/public/build/{postfix}'
-
 
 @memoize
 def get_session():
     session = requests.Session()
     retry = Retry(total=5, backoff_factor=0.1,
                   status_forcelist=[500, 502, 503, 504])
     session.mount('http://', HTTPAdapter(max_retries=retry))
     session.mount('https://', HTTPAdapter(max_retries=retry))
@@ -99,15 +96,8 @@ def get_task_url(task_id, use_proxy=Fals
     else:
         TASK_URL = 'https://queue.taskcluster.net/v1/task/{}'
     return TASK_URL.format(task_id)
 
 
 def get_task_definition(task_id, use_proxy=False):
     response = _do_request(get_task_url(task_id, use_proxy))
     return response.json()
-
-
-def get_taskcluster_artifact_prefix(task_id, postfix='', locale=None):
-    if locale:
-        postfix = '{}/{}'.format(locale, postfix)
-
-    return _TC_ARTIFACT_LOCATION.format(task_id=task_id, postfix=postfix)
--- a/toolkit/components/extensions/Extension.jsm
+++ b/toolkit/components/extensions/Extension.jsm
@@ -377,17 +377,17 @@ this.ExtensionData = class {
     if (!this.uuid) {
       this.uuid = UUIDMap.get(this.id);
     }
     return `moz-extension://${this.uuid}/${path}`;
   }
 
   async readDirectory(path) {
     if (this.rootURI instanceof Ci.nsIFileURL) {
-      let uri = Services.io.newURI(this.rootURI.resolve("./" + path));
+      let uri = Services.io.newURI("./" + path, null, this.rootURI);
       let fullPath = uri.QueryInterface(Ci.nsIFileURL).file.path;
 
       let iter = new OS.File.DirectoryIterator(fullPath);
       let results = [];
 
       try {
         await iter.forEach(entry => {
           results.push(entry);
@@ -1713,14 +1713,13 @@ this.Langpack = class extends ExtensionD
         } else {
           // If the path is not a string, it's an object with path per platform
           // where the keys are taken from AppConstants.platform
           const platform = AppConstants.platform;
           if (platform in path) {
             chromeEntries.push(["locale", alias, language, path[platform]]);
           }
         }
-
       }
     }
     return chromeEntries;
   }
 };
--- a/toolkit/components/extensions/ExtensionChild.jsm
+++ b/toolkit/components/extensions/ExtensionChild.jsm
@@ -39,16 +39,17 @@ Cu.import("resource://gre/modules/Extens
 
 const {
   DefaultMap,
   EventEmitter,
   LimitedSet,
   defineLazyGetter,
   getMessageManager,
   getUniqueId,
+  getWinUtils,
   withHandlingUserInput,
 } = ExtensionUtils;
 
 const {
   EventManager,
   LocalAPIImplementation,
   LocaleData,
   NoCloneSpreadArgs,
@@ -679,18 +680,17 @@ class ProxyAPIImplementation extends Sch
 
   callFunctionNoReturn(args) {
     this.childApiManager.callParentFunctionNoReturn(this.path, args);
   }
 
   callAsyncFunction(args, callback, requireUserInput) {
     if (requireUserInput) {
       let context = this.childApiManager.context;
-      let winUtils = context.contentWindow.getInterface(Ci.nsIDOMWindowUtils);
-      if (!winUtils.isHandlingUserInput) {
+      if (!getWinUtils(context.contentWindow).isHandlingUserInput) {
         let err = new context.cloneScope.Error(`${this.path} may only be called from a user input handler`);
         return context.wrapPromise(Promise.reject(err), callback);
       }
     }
     return this.childApiManager.callParentAsyncFunction(this.path, args, callback);
   }
 
   addListener(listener, args) {
--- a/toolkit/components/extensions/ExtensionCommon.jsm
+++ b/toolkit/components/extensions/ExtensionCommon.jsm
@@ -18,17 +18,16 @@ this.EXPORTED_SYMBOLS = ["ExtensionCommo
 Cu.importGlobalProperties(["fetch"]);
 
 Cu.import("resource://gre/modules/Services.jsm");
 Cu.import("resource://gre/modules/XPCOMUtils.jsm");
 
 XPCOMUtils.defineLazyModuleGetters(this, {
   ConsoleAPI: "resource://gre/modules/Console.jsm",
   MessageChannel: "resource://gre/modules/MessageChannel.jsm",
-  Preferences: "resource://gre/modules/Preferences.jsm",
   PrivateBrowsingUtils: "resource://gre/modules/PrivateBrowsingUtils.jsm",
   Schemas: "resource://gre/modules/Schemas.jsm",
 });
 
 XPCOMUtils.defineLazyServiceGetter(this, "styleSheetService",
                                    "@mozilla.org/content/style-sheet-service;1",
                                    "nsIStyleSheetService");
 
@@ -41,17 +40,17 @@ var {
   DefaultWeakMap,
   EventEmitter,
   ExtensionError,
   defineLazyGetter,
   filterStack,
   getConsole,
   getInnerWindowID,
   getUniqueId,
-  instanceOf,
+  getWinUtils,
 } = ExtensionUtils;
 
 XPCOMUtils.defineLazyGetter(this, "console", getConsole);
 
 var ExtensionCommon;
 
 /**
  * A sentinel class to indicate that an array of values should be
@@ -191,18 +190,17 @@ class BaseContext {
     this.messageManager = null;
     this.docShell = null;
     this.contentWindow = null;
     this.innerWindowID = 0;
   }
 
   setContentWindow(contentWindow) {
     let {document} = contentWindow;
-    let docShell = contentWindow.QueryInterface(Ci.nsIInterfaceRequestor)
-                                .getInterface(Ci.nsIDocShell);
+    let {docShell} = document;
 
     this.innerWindowID = getInnerWindowID(contentWindow);
     this.messageManager = docShell.QueryInterface(Ci.nsIInterfaceRequestor)
                                   .getInterface(Ci.nsIContentFrameMessageManager);
 
     if (this.incognito == null) {
       this.incognito = PrivateBrowsingUtils.isContentWindowPrivate(contentWindow);
     }
@@ -372,18 +370,20 @@ class BaseContext {
    * @param {Error|object} error
    * @returns {Error}
    */
   normalizeError(error) {
     if (error instanceof this.cloneScope.Error) {
       return error;
     }
     let message, fileName;
-    if (instanceOf(error, "Object") || error instanceof ExtensionError ||
-        (typeof error == "object" && this.principal.subsumes(Cu.getObjectPrincipal(error)))) {
+    if (error && typeof error === "object" &&
+        (ChromeUtils.getClassName(error) === "Object" ||
+         error instanceof ExtensionError ||
+         this.principal.subsumes(Cu.getObjectPrincipal(error)))) {
       message = error.message;
       fileName = error.fileName;
     } else {
       Cu.reportError(error);
     }
     message = message || "An unexpected error occurred";
     return new this.cloneScope.Error(message, fileName);
   }
@@ -663,19 +663,17 @@ class LocalAPIImplementation extends Sch
   callFunctionNoReturn(args) {
     this.pathObj[this.name](...args);
   }
 
   callAsyncFunction(args, callback, requireUserInput) {
     let promise;
     try {
       if (requireUserInput) {
-        let winUtils = this.context.contentWindow
-                           .getInterface(Ci.nsIDOMWindowUtils);
-        if (!winUtils.isHandlingUserInput) {
+        if (!getWinUtils(this.context.contentWindow).isHandlingUserInput) {
           throw new ExtensionError(`${this.name} may only be called from a user input handler`);
         }
       }
       promise = this.pathObj[this.name](...args) || Promise.resolve();
     } catch (e) {
       promise = Promise.reject(e);
     }
     return this.context.wrapPromise(promise, callback);
@@ -1445,69 +1443,72 @@ LocaleData.prototype = {
     });
   },
 
   // Validates the contents of a locale JSON file, normalizes the
   // messages into a Map of message key -> localized string pairs.
   addLocale(locale, messages, extension) {
     let result = new Map();
 
+    let isPlainObject = obj => (obj && typeof obj === "object" &&
+                                ChromeUtils.getClassName(obj) === "Object");
+
     // Chrome does not document the semantics of its localization
     // system very well. It handles replacements by pre-processing
     // messages, replacing |$[a-zA-Z0-9@_]+$| tokens with the value of their
     // replacements. Later, it processes the resulting string for
     // |$[0-9]| replacements.
     //
     // Again, it does not document this, but it accepts any number
     // of sequential |$|s, and replaces them with that number minus
     // 1. It also accepts |$| followed by any number of sequential
     // digits, but refuses to process a localized string which
     // provides more than 9 substitutions.
-    if (!instanceOf(messages, "Object")) {
+    if (!isPlainObject(messages)) {
       extension.packagingError(`Invalid locale data for ${locale}`);
       return result;
     }
 
     for (let key of Object.keys(messages)) {
       let msg = messages[key];
 
-      if (!instanceOf(msg, "Object") || typeof(msg.message) != "string") {
+      if (!isPlainObject(msg) || typeof(msg.message) != "string") {
         extension.packagingError(`Invalid locale message data for ${locale}, message ${JSON.stringify(key)}`);
         continue;
       }
 
       // Substitutions are case-insensitive, so normalize all of their names
       // to lower-case.
       let placeholders = new Map();
-      if (instanceOf(msg.placeholders, "Object")) {
+      if (isPlainObject(msg.placeholders)) {
         for (let key of Object.keys(msg.placeholders)) {
           placeholders.set(key.toLowerCase(), msg.placeholders[key]);
         }
       }
 
       let replacer = (match, name) => {
         let replacement = placeholders.get(name.toLowerCase());
-        if (instanceOf(replacement, "Object") && "content" in replacement) {
+        if (isPlainObject(replacement) && "content" in replacement) {
           return replacement.content;
         }
         return "";
       };
 
       let value = msg.message.replace(/\$([A-Za-z0-9@_]+)\$/g, replacer);
 
       // Message names are also case-insensitive, so normalize them to lower-case.
       result.set(key.toLowerCase(), value);
     }
 
     this.messages.set(locale, result);
     return result;
   },
 
   get acceptLanguages() {
-    let result = Preferences.get("intl.accept_languages", "", Ci.nsIPrefLocalizedString);
+    let result = Services.prefs.getComplexValue("intl.accept_languages", Ci.nsIPrefLocalizedString).data;
     return result.split(/\s*,\s*/g);
   },
 
 
   get uiLocale() {
     return Services.locale.getAppLocaleAsBCP47();
   },
 };
--- a/toolkit/components/extensions/ExtensionPageChild.jsm
+++ b/toolkit/components/extensions/ExtensionPageChild.jsm
@@ -358,18 +358,17 @@ ExtensionPageChild = {
     let context = this.extensionContexts.get(windowId);
     if (context) {
       if (context.extension !== extension) {
         throw new Error("A different extension context already exists for this frame");
       }
       throw new Error("An extension context was already initialized for this frame");
     }
 
-    let mm = contentWindow.QueryInterface(Ci.nsIInterfaceRequestor)
-                          .getInterface(Ci.nsIDocShell)
+    let mm = contentWindow.document.docShell
                           .QueryInterface(Ci.nsIInterfaceRequestor)
                           .getInterface(Ci.nsIContentFrameMessageManager);
 
     let {viewType, tabId, devtoolsToolboxInfo} = getFrameData(mm) || {};
 
     let uri = contentWindow.document.documentURIObject;
 
     if (devtoolsToolboxInfo) {
--- a/toolkit/components/extensions/ExtensionParent.jsm
+++ b/toolkit/components/extensions/ExtensionParent.jsm
@@ -47,17 +47,16 @@ var {
 var {
   DefaultMap,
   DefaultWeakMap,
   ExtensionError,
   MessageManagerProxy,
   defineLazyGetter,
   promiseDocumentLoaded,
   promiseEvent,
-  promiseFileContents,
   promiseObserved,
 } = ExtensionUtils;
 
 const BASE_SCHEMA = "chrome://extensions/content/schemas/manifest.json";
 const CATEGORY_EXTENSION_MODULES = "webextension-modules";
 const CATEGORY_EXTENSION_SCHEMAS = "webextension-schemas";
 const CATEGORY_EXTENSION_SCRIPTS = "webextension-scripts";
 
@@ -480,18 +479,17 @@ class ExtensionPageContextParent extends
     this.extension.views.add(this);
 
     extension.emit("extension-proxy-context-load", this);
   }
 
   // The window that contains this context. This may change due to moving tabs.
   get xulWindow() {
     let win = this.xulBrowser.ownerGlobal;
-    return win.QueryInterface(Ci.nsIInterfaceRequestor).getInterface(Ci.nsIDocShell)
-              .QueryInterface(Ci.nsIDocShellTreeItem).rootTreeItem
+    return win.document.docShell.rootTreeItem
               .QueryInterface(Ci.nsIInterfaceRequestor).getInterface(Ci.nsIDOMWindow);
   }
 
   get currentWindow() {
     if (this.viewType !== "background") {
       return this.xulWindow;
     }
   }
@@ -1430,19 +1428,19 @@ StartupCache = {
   getBlob() {
     return new Uint8Array(aomStartup.encodeBlob(this._data));
   },
 
   _data: null,
   async _readData() {
     let result = new Map();
     try {
-      let data = await promiseFileContents(this.file);
+      let {buffer} = await OS.File.read(this.file);
 
-      result = aomStartup.decodeBlob(data);
+      result = aomStartup.decodeBlob(buffer);
     } catch (e) {
       if (!e.becauseNoSuchFile) {
         Cu.reportError(e);
       }
     }
 
     this._data = result;
     return result;
--- a/toolkit/components/extensions/ExtensionPermissions.jsm
+++ b/toolkit/components/extensions/ExtensionPermissions.jsm
@@ -22,18 +22,18 @@ let _initPromise;
 
 async function _lazyInit() {
   let path = OS.Path.join(OS.Constants.Path.profileDir, FILE_NAME);
 
   prefs = new JSONFile({path});
   prefs.data = {};
 
   try {
-    let blob = await ExtensionUtils.promiseFileContents(path);
-    prefs.data = JSON.parse(new TextDecoder().decode(blob));
+    let {buffer} = await OS.File.read(path);
+    prefs.data = JSON.parse(new TextDecoder().decode(buffer));
   } catch (e) {
     if (!e.becauseNoSuchFile) {
       Cu.reportError(e);
     }
   }
 }
 
 function lazyInit() {
--- a/toolkit/components/extensions/ExtensionUtils.jsm
+++ b/toolkit/components/extensions/ExtensionUtils.jsm
@@ -1,20 +1,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
 this.EXPORTED_SYMBOLS = ["ExtensionUtils"];
 
-const Ci = Components.interfaces;
-const Cc = Components.classes;
-const Cu = Components.utils;
-const Cr = Components.results;
+const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
 
 Cu.import("resource://gre/modules/Services.jsm");
 Cu.import("resource://gre/modules/XPCOMUtils.jsm");
 
 XPCOMUtils.defineLazyModuleGetter(this, "ConsoleAPI",
                                   "resource://gre/modules/Console.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "OS",
                                   "resource://gre/modules/osfile.jsm");
@@ -32,21 +29,16 @@ const appinfo = Cc["@mozilla.org/xre/app
 
 let nextId = 0;
 const uniqueProcessID = String(appinfo.uniqueProcessID);
 
 function getUniqueId() {
   return `${nextId++}-${uniqueProcessID}`;
 }
 
-async function promiseFileContents(path) {
-  let res = await OS.File.read(path);
-  return res.buffer;
-}
-
 
 /**
  * An Error subclass for which complete error messages are always passed
  * to extensions, rather than being interpreted as an unknown error.
  */
 class ExtensionError extends Error {}
 
 function filterStack(error) {
@@ -58,96 +50,56 @@ function runSafeSyncWithoutClone(f, ...a
   try {
     return f(...args);
   } catch (e) {
     dump(`Extension error: ${e} ${e.fileName} ${e.lineNumber}\n[[Exception stack\n${filterStack(e)}Current stack\n${filterStack(Error())}]]\n`);
     Cu.reportError(e);
   }
 }
 
-// Run a function and report exceptions.
-function runSafeWithoutClone(f, ...args) {
-  if (typeof(f) != "function") {
-    dump(`Extension error: expected function\n${filterStack(Error())}`);
-    return;
-  }
-
-  Promise.resolve().then(() => {
-    runSafeSyncWithoutClone(f, ...args);
-  });
-}
-
-// Run a function, cloning arguments into context.cloneScope, and
-// report exceptions. |f| is expected to be in context.cloneScope.
-function runSafeSync(context, f, ...args) {
-  if (context.unloaded) {
-    Cu.reportError("runSafeSync called after context unloaded");
-    return;
-  }
-
-  try {
-    args = Cu.cloneInto(args, context.cloneScope);
-  } catch (e) {
-    Cu.reportError(e);
-    dump(`runSafe failure: cloning into ${context.cloneScope}: ${e}\n\n${filterStack(Error())}`);
-  }
-  return runSafeSyncWithoutClone(f, ...args);
-}
-
-// Run a function, cloning arguments into context.cloneScope, and
-// report exceptions. |f| is expected to be in context.cloneScope.
-function runSafe(context, f, ...args) {
-  try {
-    args = Cu.cloneInto(args, context.cloneScope);
-  } catch (e) {
-    Cu.reportError(e);
-    dump(`runSafe failure: cloning into ${context.cloneScope}: ${e}\n\n${filterStack(Error())}`);
-  }
-  if (context.unloaded) {
-    dump(`runSafe failure: context is already unloaded ${filterStack(new Error())}\n`);
-    return undefined;
-  }
-  return runSafeWithoutClone(f, ...args);
-}
-
 // Return true if the given value is an instance of the given
 // native type.
 function instanceOf(value, type) {
-  return {}.toString.call(value) == `[object ${type}]`;
+  return (value && typeof value === "object" &&
+          ChromeUtils.getClassName(value) === type);
 }
 
 /**
  * Similar to a WeakMap, but creates a new key with the given
  * constructor if one is not present.
  */
 class DefaultWeakMap extends WeakMap {
   constructor(defaultConstructor, init) {
     super(init);
     this.defaultConstructor = defaultConstructor;
   }
 
   get(key) {
-    if (!this.has(key)) {
-      this.set(key, this.defaultConstructor(key));
+    let value = super.get(key);
+    if (value === undefined && !this.has(key)) {
+      value = this.defaultConstructor(key);
+      this.set(key, value);
     }
-    return super.get(key);
+    return value;
   }
 }
 
 class DefaultMap extends Map {
   constructor(defaultConstructor, init) {
     super(init);
     this.defaultConstructor = defaultConstructor;
   }
 
   get(key) {
-    if (!this.has(key)) {
-      this.set(key, this.defaultConstructor(key));
+    let value = super.get(key);
+    if (value === undefined && !this.has(key)) {
+      value = this.defaultConstructor(key);
+      this.set(key, value);
     }
-    return super.get(key);
+    return value;
   }
 }
 
 const _winUtils = new DefaultWeakMap(win => {
   return win.QueryInterface(Ci.nsIInterfaceRequestor)
             .getInterface(Ci.nsIDOMWindowUtils);
 });
 const getWinUtils = win => _winUtils.get(win);
@@ -182,35 +134,36 @@ class EventEmitter {
    * dispatchers may need to block on.
    *
    * @param {string} event
    *       The name of the event to listen for.
    * @param {function(string, ...any)} listener
    *        The listener to call when events are emitted.
    */
   on(event, listener) {
-    if (!this[LISTENERS].has(event)) {
-      this[LISTENERS].set(event, new Set());
+    let listeners = this[LISTENERS].get(event);
+    if (!listeners) {
+      listeners = new Set();
+      this[LISTENERS].set(event, listeners);
     }
 
-    this[LISTENERS].get(event).add(listener);
+    listeners.add(listener);
   }
 
   /**
    * Removes the given function as a listener for the given event.
    *
    * @param {string} event
    *       The name of the event to stop listening for.
    * @param {function(string, ...any)} listener
    *        The listener function to remove.
    */
   off(event, listener) {
-    if (this[LISTENERS].has(event)) {
-      let set = this[LISTENERS].get(event);
-
+    let set = this[LISTENERS].get(event);
+    if (set) {
       set.delete(listener);
       set.delete(this[ONCE_MAP].get(listener));
       if (!set.size) {
         this[LISTENERS].delete(event);
       }
     }
   }
 
@@ -299,17 +252,17 @@ class LimitedSet extends Set {
       // the entire loop even after we're done truncating.
       if (this.size > limit) {
         this.delete(item);
       }
     }
   }
 
   add(item) {
-    if (!this.has(item) && this.size >= this.limit + this.slop) {
+    if (this.size >= this.limit + this.slop && !this.has(item)) {
       this.truncate(this.limit - 1);
     }
     super.add(item);
   }
 }
 
 /**
  * Returns a Promise which resolves when the given document's DOM has
@@ -341,19 +294,17 @@ function promiseDocumentReady(doc) {
  * @returns {Promise<Document>}
  */
 function promiseDocumentLoaded(doc) {
   if (doc.readyState == "complete") {
     return Promise.resolve(doc);
   }
 
   return new Promise(resolve => {
-    doc.defaultView.addEventListener("load", function(event) {
-      resolve(doc);
-    }, {once: true});
+    doc.defaultView.addEventListener("load", () => resolve(doc), {once: true});
   });
 }
 
 /**
  * Returns a Promise which resolves when the given event is dispatched to the
  * given element.
  *
  * @param {Element} element
@@ -679,22 +630,18 @@ this.ExtensionUtils = {
   getUniqueId,
   filterStack,
   getWinUtils,
   instanceOf,
   normalizeTime,
   promiseDocumentLoaded,
   promiseDocumentReady,
   promiseEvent,
-  promiseFileContents,
   promiseObserved,
-  runSafe,
-  runSafeSync,
   runSafeSyncWithoutClone,
-  runSafeWithoutClone,
   withHandlingUserInput,
   DefaultMap,
   DefaultWeakMap,
   EventEmitter,
   ExtensionError,
   LimitedSet,
   MessageManagerProxy,
 };
--- a/toolkit/components/extensions/MessageChannel.jsm
+++ b/toolkit/components/extensions/MessageChannel.jsm
@@ -95,20 +95,17 @@
  *  },
  *
  */
 
 this.EXPORTED_SYMBOLS = ["MessageChannel"];
 
 /* globals MessageChannel */
 
-const Ci = Components.interfaces;
-const Cc = Components.classes;
-const Cu = Components.utils;
-const Cr = Components.results;
+const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
 
 Cu.import("resource://gre/modules/AppConstants.jsm");
 Cu.import("resource://gre/modules/ExtensionUtils.jsm");
 Cu.import("resource://gre/modules/Services.jsm");
 
 const {
   MessageManagerProxy,
 } = ExtensionUtils;
--- a/toolkit/components/extensions/Schemas.jsm
+++ b/toolkit/components/extensions/Schemas.jsm
@@ -16,17 +16,16 @@ Cu.importGlobalProperties(["URL"]);
 Cu.import("resource://gre/modules/AppConstants.jsm");
 Cu.import("resource://gre/modules/Services.jsm");
 Cu.import("resource://gre/modules/XPCOMUtils.jsm");
 
 Cu.import("resource://gre/modules/ExtensionUtils.jsm");
 var {
   DefaultMap,
   DefaultWeakMap,
-  instanceOf,
 } = ExtensionUtils;
 
 XPCOMUtils.defineLazyModuleGetter(this, "ExtensionParent",
                                   "resource://gre/modules/ExtensionParent.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "NetUtil",
                                   "resource://gre/modules/NetUtil.jsm");
 XPCOMUtils.defineLazyServiceGetter(this, "contentPolicyService",
                                    "@mozilla.org/addons/content-policy;1",
@@ -1622,17 +1621,17 @@ class ObjectType extends Type {
           if (Object.keys(this.properties).length ||
               this.patternProperties.length ||
               !(this.additionalProperties instanceof AnyType)) {
             throw new Error("InternalError: isInstanceOf can only be used " +
                             "with objects that are otherwise unrestricted");
           }
         }
 
-        if (!instanceOf(value, this.isInstanceOf)) {
+        if (ChromeUtils.getClassName(value) !== this.isInstanceOf) {
           return context.error(`Object must be an instance of ${this.isInstanceOf}`,
                                `be an instance of ${this.isInstanceOf}`);
         }
 
         // This is kind of a hack, but we can't normalize things that
         // aren't JSON, so we just return them.
         return this.postprocess({value}, context);
       }
--- a/toolkit/components/extensions/ext-cookies.js
+++ b/toolkit/components/extensions/ext-cookies.js
@@ -167,25 +167,25 @@ const query = function* (detailsIn, prop
   if (isPrivate) {
     storeId = PRIVATE_STORE;
   } else if ("storeId" in details) {
     storeId = details.storeId;
   }
 
   // We can use getCookiesFromHost for faster searching.
   let enumerator;
-  let uri;
+  let url;
   let originAttributes = {
     userContextId,
     privateBrowsingId: isPrivate ? 1 : 0,
   };
   if ("url" in details) {
     try {
-      uri = Services.io.newURI(details.url).QueryInterface(Ci.nsIURL);
-      enumerator = Services.cookies.getCookiesFromHost(uri.host, originAttributes);
+      url = new URL(details.url);
+      enumerator = Services.cookies.getCookiesFromHost(url.host, originAttributes);
     } catch (ex) {
       // This often happens for about: URLs
       return;
     }
   } else if ("domain" in details) {
     enumerator = Services.cookies.getCookiesFromHost(details.domain, originAttributes);
   } else {
     enumerator = Services.cookies.getCookiesWithOriginAttributes(JSON.stringify(originAttributes));
@@ -206,31 +206,30 @@ const query = function* (detailsIn, prop
 
       // path == cookiePath, but without the redundant string compare.
       if (path.length == cookiePath.length) {
         return true;
       }
 
       // URL path is a substring of the cookie path, so it matches if, and
       // only if, the next character is a path delimiter.
-      let pathDelimiters = ["/", "?", "#", ";"];
-      return pathDelimiters.includes(path[cookiePath.length]);
+      return path[cookiePath.length] === "/";
     }
 
     // "Restricts the retrieved cookies to those that would match the given URL."
-    if (uri) {
-      if (!domainMatches(uri.host)) {
+    if (url) {
+      if (!domainMatches(url.host)) {
         return false;
       }
 
-      if (cookie.isSecure && uri.scheme != "https") {
+      if (cookie.isSecure && url.protocol != "https:") {
         return false;
       }
 
-      if (!pathMatches(uri.pathQueryRef)) {
+      if (!pathMatches(url.pathname)) {
         return false;
       }
     }
 
     if ("name" in details && details.name != cookie.name) {
       return false;
     }
 
@@ -255,18 +254,17 @@ const query = function* (detailsIn, prop
     // Check that the extension has permissions for this host.
     if (!context.extension.whiteListedHosts.matchesCookie(cookie)) {
       return false;
     }
 
     return true;
   }
 
-  while (enumerator.hasMoreElements()) {
-    let cookie = enumerator.getNext().QueryInterface(Ci.nsICookie2);
+  for (const cookie of XPCOMUtils.IterSimpleEnumerator(enumerator, Ci.nsICookie2)) {
     if (matches(cookie)) {
       yield {cookie, isPrivate, storeId};
     }
   }
 };
 
 this.cookies = class extends ExtensionAPI {
   getAPI(context) {
@@ -286,27 +284,27 @@ this.cookies = class extends ExtensionAP
         getAll: function(details) {
           let allowed = ["url", "name", "domain", "path", "secure", "session", "storeId"];
           let result = Array.from(query(details, allowed, context), convertCookie);
 
           return Promise.resolve(result);
         },
 
         set: function(details) {
-          let uri = Services.io.newURI(details.url).QueryInterface(Ci.nsIURL);
+          let uri = Services.io.newURI(details.url);
 
           let path;
           if (details.path !== null) {
             path = details.path;
           } else {
             // This interface essentially emulates the behavior of the
             // Set-Cookie header. In the case of an omitted path, the cookie
             // service uses the directory path of the requesting URL, ignoring
             // any filename or query parameters.
-            path = uri.directory;
+            path = uri.QueryInterface(Ci.nsIURL).directory;
           }
 
           let name = details.name !== null ? details.name : "";
           let value = details.value !== null ? details.value : "";
           let secure = details.secure !== null ? details.secure : false;
           let httpOnly = details.httpOnly !== null ? details.httpOnly : false;
           let isSession = details.expirationDate === null;
           let expiry = isSession ? Number.MAX_SAFE_INTEGER : details.expirationDate;
--- a/toolkit/components/extensions/ext-runtime.js
+++ b/toolkit/components/extensions/ext-runtime.js
@@ -119,22 +119,22 @@ this.runtime = class extends ExtensionAP
 
         setUninstallURL: function(url) {
           if (url.length == 0) {
             return Promise.resolve();
           }
 
           let uri;
           try {
-            uri = Services.io.newURI(url);
+            uri = new URL(url);
           } catch (e) {
             return Promise.reject({message: `Invalid URL: ${JSON.stringify(url)}`});
           }
 
-          if (uri.scheme != "http" && uri.scheme != "https") {
+          if (uri.protocol != "http:" && uri.protocol != "https:") {
             return Promise.reject({message: "url must have the scheme http or https"});
           }
 
           extension.uninstallURL = url;
           return Promise.resolve();
         },
       },
     };
--- a/toolkit/components/extensions/ext-tabs-base.js
+++ b/toolkit/components/extensions/ext-tabs-base.js
@@ -689,19 +689,18 @@ class WindowBase {
   }
 
   /**
    * @property {nsIXULWindow} xulWindow
    *        The nsIXULWindow object for this browser window.
    *        @readonly
    */
   get xulWindow() {
-    return this.window.QueryInterface(Ci.nsIInterfaceRequestor)
-               .getInterface(Ci.nsIDocShell)
-               .treeOwner.QueryInterface(Ci.nsIInterfaceRequestor)
+    return this.window.document.docShell.treeOwner
+               .QueryInterface(Ci.nsIInterfaceRequestor)
                .getInterface(Ci.nsIXULWindow);
   }
 
   /**
    * Returns true if this window is the current window for the given extension
    * context, false otherwise.
    *
    * @param {BaseContext} context
@@ -1202,18 +1201,16 @@ class WindowTrackerBase extends EventEmi
 
     this._listeners = new DefaultMap(() => new Set());
 
     this._statusListeners = new DefaultWeakMap(listener => {
       return new StatusListener(listener);
     });
 
     this._windowIds = new DefaultWeakMap(window => {
-      window.QueryInterface(Ci.nsIInterfaceRequestor);
-
       return getWinUtils(window).outerWindowID;
     });
   }
 
   isBrowserWindow(window) {
     let {documentElement} = window.document;
 
     return documentElement.getAttribute("windowtype") === "navigator:browser";
--- a/toolkit/components/extensions/ext-theme.js
+++ b/toolkit/components/extensions/ext-theme.js
@@ -6,16 +6,20 @@ Cu.import("resource://gre/modules/Servic
 
 XPCOMUtils.defineLazyModuleGetter(this, "LightweightThemeManager",
                                   "resource://gre/modules/LightweightThemeManager.jsm");
 
 XPCOMUtils.defineLazyGetter(this, "gThemesEnabled", () => {
   return Services.prefs.getBoolPref("extensions.webextensions.themes.enabled");
 });
 
+var {
+  getWinUtils,
+} = ExtensionUtils;
+
 const ICONS = Services.prefs.getStringPref("extensions.webextensions.themes.icons.buttons", "").split(",");
 
 /** Class representing a theme. */
 class Theme {
   /**
    * Creates a theme instance.
    *
    * @param {string} baseURI The base URI of the extension, used to
@@ -38,19 +42,17 @@ class Theme {
    *
    * @param {Object} details Theme part of the manifest. Supported
    *   properties can be found in the schema under ThemeType.
    * @param {Object} targetWindow The window to apply the theme to. Omitting
    *   this parameter will apply the theme globally.
    */
   load(details, targetWindow) {
     if (targetWindow) {
-      this.lwtStyles.window = targetWindow
-        .QueryInterface(Ci.nsIInterfaceRequestor)
-        .getInterface(Ci.nsIDOMWindowUtils).outerWindowID;
+      this.lwtStyles.window = getWinUtils(targetWindow).outerWindowID;
     }
 
     if (details.colors) {
       this.loadColors(details.colors);
     }
 
     if (details.images) {
       this.loadImages(details.images);
--- a/toolkit/components/extensions/ext-toolkit.js
+++ b/toolkit/components/extensions/ext-toolkit.js
@@ -10,16 +10,18 @@
           getContainerForCookieStoreId: false,
           isValidCookieStoreId:false, isContainerCookieStoreId:false,
           isDefaultCookieStoreId: false, isPrivateCookieStoreId:false,
           EventManager: false, InputEventManager: false */
 
 XPCOMUtils.defineLazyModuleGetter(this, "ContextualIdentityService",
                                   "resource://gre/modules/ContextualIdentityService.jsm");
 
+Cu.importGlobalProperties(["URL"]);
+
 Cu.import("resource://gre/modules/ExtensionCommon.jsm");
 
 global.EventEmitter = ExtensionUtils.EventEmitter;
 global.EventManager = ExtensionCommon.EventManager;
 global.InputEventManager = class extends EventManager {
   constructor(...args) {
     super(...args);
     this.inputHandling = true;
--- a/toolkit/components/extensions/test/mochitest/chrome_cleanup_script.js
+++ b/toolkit/components/extensions/test/mochitest/chrome_cleanup_script.js
@@ -1,15 +1,22 @@
 "use strict";
 
 /* global addMessageListener, sendAsyncMessage */
 
 Components.utils.import("resource://gre/modules/AppConstants.jsm");
 Components.utils.import("resource://gre/modules/Services.jsm");
 
+let listener = msg => {
+  void (msg instanceof Components.interfaces.nsIConsoleMessage);
+  dump(`Console message: ${msg}\n`);
+};
+
+Services.console.registerListener(listener);
+
 let getBrowserApp, getTabBrowser;
 if (AppConstants.MOZ_BUILD_APP === "mobile/android") {
   getBrowserApp = win => win.BrowserApp;
   getTabBrowser = tab => tab.browser;
 } else {
   getBrowserApp = win => win.gBrowser;
   getTabBrowser = tab => tab.linkedBrowser;
 }
@@ -25,16 +32,18 @@ function* iterBrowserWindows() {
 }
 
 let initialTabs = new Map();
 for (let win of iterBrowserWindows()) {
   initialTabs.set(win, new Set(getBrowserApp(win).tabs));
 }
 
 addMessageListener("check-cleanup", extensionId => {
+  Services.console.unregisterListener(listener);
+
   let results = {
     extraWindows: [],
     extraTabs: [],
   };
 
   for (let win of iterBrowserWindows()) {
     if (initialTabs.has(win)) {
       let tabs = initialTabs.get(win);
--- a/toolkit/components/passwordmgr/content/passwordManager.js
+++ b/toolkit/components/passwordmgr/content/passwordManager.js
@@ -122,17 +122,17 @@ function setFilter(aFilterString) {
 let signonsTreeView = {
   // Keep track of which favicons we've fetched or started fetching.
   // Maps a login origin to a favicon URL.
   _faviconMap: new Map(),
   _filterSet: [],
   // Coalesce invalidations to avoid repeated flickering.
   _invalidateTask: new DeferredTask(() => {
     signonsTree.treeBoxObject.invalidateColumn(signonsTree.columns.siteCol);
-  }, 10),
+  }, 10, 0),
   _lastSelectedRanges: [],
   selection: null,
 
   rowCount: 0,
   setTree(tree) {},
   getImageSrc(row, column) {
     if (column.element.getAttribute("id") !== "siteCol") {
       return "";
--- a/toolkit/components/telemetry/Histograms.json
+++ b/toolkit/components/telemetry/Histograms.json
@@ -12575,25 +12575,26 @@
     "bug_numbers": [1286865],
     "expires_in_version": "never",
     "releaseChannelCollection": "opt-out",
     "kind": "count",
     "keyed": true,
     "cpp_guard": "XP_LINUX",
     "description": "System calls blocked by a seccomp-bpf sandbox policy; limited to syscalls where we would crash on Nightly.  The key is generally the architecture and syscall ID but in some cases we include non-personally-identifying information from the syscall arguments; see the function SubmitToTelemetry in security/sandbox/linux/reporter/SandboxReporter.cpp for details."
   },
-  "SANDBOX_FAILED_LAUNCH": {
+  "SANDBOX_FAILED_LAUNCH_KEYED": {
     "record_in_processes": ["main"],
     "alert_emails": ["bowen@mozilla.com"],
-    "expires_in_version": "60",
-    "kind": "enumerated",
+    "expires_in_version": "never",
+    "kind": "enumerated",
+    "keyed": true,
     "n_values": 50,
     "bug_numbers": [1368600],
     "cpp_guard": "XP_WIN",
-    "description": "Error code when a Windows sandboxed process fails to launch. See https://dxr.mozilla.org/mozilla-central/search?q=ResultCode++path%3Asandbox_types.h&redirect=true for definitions of the error codes."
+    "description": "Error code when a Windows sandboxed process fails to launch, keyed by process type and Windows error code. See https://dxr.mozilla.org/mozilla-central/search?q=ResultCode++path%3Asandbox_types.h&redirect=true for definitions of the error codes."
   },
   "SYNC_WORKER_OPERATION": {
     "record_in_processes": ["main", "content"],
     "alert_emails": ["amarchesini@mozilla.com", "khuey@mozilla.com" ],
     "bug_numbers": [1267904],
     "expires_in_version": "never",
     "kind": "exponential",
     "high": 5000,
--- a/toolkit/components/telemetry/TelemetryController.jsm
+++ b/toolkit/components/telemetry/TelemetryController.jsm
@@ -730,17 +730,18 @@ var Impl = {
         TelemetryModules.start();
 
         this._delayedInitTaskDeferred.resolve();
       } catch (e) {
         this._delayedInitTaskDeferred.reject(e);
       } finally {
         this._delayedInitTask = null;
       }
-    }, this._testMode ? TELEMETRY_TEST_DELAY : TELEMETRY_DELAY);
+    }, this._testMode ? TELEMETRY_TEST_DELAY : TELEMETRY_DELAY,
+       this._testMode ? 0 : undefined);
 
     AsyncShutdown.sendTelemetry.addBlocker("TelemetryController: shutting down",
                                            () => this.shutdown(),
                                            () => this._getState());
 
     this._delayedInitTask.arm();
     return this._delayedInitTaskDeferred.promise;
   },
--- a/toolkit/components/telemetry/TelemetrySession.jsm
+++ b/toolkit/components/telemetry/TelemetrySession.jsm
@@ -1536,17 +1536,18 @@ var Impl = {
       this._initialized = true;
 
       this.attachObservers();
       this.gatherMemory();
 
       if (Telemetry.canRecordExtended) {
         GCTelemetry.init();
       }
-    }, testing ? TELEMETRY_TEST_DELAY : TELEMETRY_DELAY);
+    }, testing ? TELEMETRY_TEST_DELAY : TELEMETRY_DELAY,
+    testing ? 0 : undefined);
 
     delayedTask.arm();
   },
 
   getFlashVersion: function getFlashVersion() {
     let host = Cc["@mozilla.org/plugin/host;1"].getService(Ci.nsIPluginHost);
     let tags = host.getPluginTags();
 
--- a/toolkit/modules/DeferredTask.jsm
+++ b/toolkit/modules/DeferredTask.jsm
@@ -85,41 +85,48 @@ this.EXPORTED_SYMBOLS = [
 // Globals
 
 const { classes: Cc, interfaces: Ci, utils: Cu, results: Cr } = Components;
 
 Cu.import("resource://gre/modules/XPCOMUtils.jsm");
 
 XPCOMUtils.defineLazyModuleGetter(this, "PromiseUtils",
                                   "resource://gre/modules/PromiseUtils.jsm");
-XPCOMUtils.defineLazyModuleGetter(this, "Task",
-                                  "resource://gre/modules/Task.jsm");
 
 const Timer = Components.Constructor("@mozilla.org/timer;1", "nsITimer",
                                      "initWithCallback");
 
 // DeferredTask
 
 /**
  * Sets up a task whose execution can be triggered after a delay.
  *
  * @param aTaskFn
- *        Function or generator function to execute.  This argument is passed to
- *        the "Task.spawn" method every time the task should be executed.  This
+ *        Function to execute.  If the function returns a promise, the task is
+ *        not considered complete until that promise resolves.  This
  *        task is never re-entered while running.
  * @param aDelayMs
  *        Time between executions, in milliseconds.  Multiple attempts to run
  *        the task before the delay has passed are coalesced.  This time of
  *        inactivity is guaranteed to pass between multiple executions of the
  *        task, except on finalization, when the task may restart immediately
  *        after the previous execution finished.
+ * @param aIdleTimeoutMs
+ *        The maximum time to wait for an idle slot on the main thread after
+ *        aDelayMs have elapsed. If omitted, waits indefinitely for an idle
+ *        callback.
  */
-this.DeferredTask = function(aTaskFn, aDelayMs) {
+this.DeferredTask = function(aTaskFn, aDelayMs, aIdleTimeoutMs) {
   this._taskFn = aTaskFn;
   this._delayMs = aDelayMs;
+  this._timeoutMs = aIdleTimeoutMs;
+
+  if (aTaskFn.isGenerator()) {
+    Cu.reportError(new Error("Unexpected generator function passed to DeferredTask"));
+  }
 }
 
 this.DeferredTask.prototype = {
   /**
    * Function or generator function to execute.
    */
   _taskFn: null,
 
@@ -292,23 +299,24 @@ this.DeferredTask.prototype = {
 
       // Indicate that the execution of the task has finished.  This happens
       // synchronously with the previous state changes in the function.
       this._runningPromise = null;
     })().catch(Cu.reportError));
   },
 
   /**
-   * Executes the associated task and catches exceptions.
+   * Executes the associated task in an idle callback and catches exceptions.
    */
   async _runTask() {
     try {
-      let result = this._taskFn();
-      if (Object.prototype.toString.call(result) == "[object Generator]") {
-        await Task.spawn(result); // eslint-disable-line mozilla/no-task
+      // If we're being finalized, execute the task immediately, so we don't
+      // risk blocking async shutdown longer than necessary.
+      if (this._finalized || this._timeoutMs === 0) {
+        await this._taskFn();
       } else {
-        await result;
+        await PromiseUtils.idleDispatch(this._taskFn, this._timeoutMs);
       }
     } catch (ex) {
       Cu.reportError(ex);
     }
   },
 };
--- a/toolkit/modules/JSONFile.jsm
+++ b/toolkit/modules/JSONFile.jsm
@@ -32,17 +32,16 @@ this.EXPORTED_SYMBOLS = [
   "JSONFile",
 ];
 
 // Globals
 
 const { classes: Cc, interfaces: Ci, utils: Cu, results: Cr } = Components;
 
 Cu.import("resource://gre/modules/XPCOMUtils.jsm");
-Cu.import("resource://gre/modules/Services.jsm");
 
 XPCOMUtils.defineLazyModuleGetter(this, "AsyncShutdown",
                                   "resource://gre/modules/AsyncShutdown.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "DeferredTask",
                                   "resource://gre/modules/DeferredTask.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "FileUtils",
                                   "resource://gre/modules/FileUtils.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "OS",
--- a/toolkit/modules/PromiseUtils.jsm
+++ b/toolkit/modules/PromiseUtils.jsm
@@ -1,28 +1,55 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict"
 
 this.EXPORTED_SYMBOLS = ["PromiseUtils"];
 
+Components.utils.import("resource://gre/modules/Services.jsm");
 Components.utils.import("resource://gre/modules/Timer.jsm");
 
 this.PromiseUtils = {
   /*
    * Creates a new pending Promise and provide methods to resolve and reject this Promise.
    *
    * @return {Deferred} an object consisting of a pending Promise "promise"
    * and methods "resolve" and "reject" to change its state.
    */
   defer() {
     return new Deferred();
   },
+
+  /**
+   * Requests idle dispatch to the main thread for the given callback,
+   * and returns a promise which resolves to the callback's return value
+   * when it has been executed.
+   *
+   * @param {function} callback
+   * @param {integer} [timeout]
+   *        An optional timeout, after which the callback will be
+   *        executed immediately if idle dispatch has not yet occurred.
+   *
+   * @returns {Promise}
+   */
+  idleDispatch(callback, timeout = 0) {
+    return new Promise((resolve, reject) => {
+      Services.tm.idleDispatchToMainThread(
+        () => {
+          try {
+            resolve(callback());
+          } catch (e) {
+            reject(e);
+          }
+        },
+        timeout);
+    });
+  },
 }
 
 /**
  * The definition of Deferred object which is returned by PromiseUtils.defer(),
  * It contains a Promise and methods to resolve/reject it.
  */
 function Deferred() {
   /* A method to resolve the associated Promise with the value passed.
--- a/toolkit/modules/addons/WebRequest.jsm
+++ b/toolkit/modules/addons/WebRequest.jsm
@@ -81,18 +81,17 @@ class HeaderChanger {
 
     this.originalHeaders = new Map();
     for (let [name, value] of this.iterHeaders()) {
       this.originalHeaders.set(name.toLowerCase(), {name, value});
     }
   }
 
   toArray() {
-    return Array.from(this.originalHeaders,
-                      ([key, {name, value}]) => ({name, value}));
+    return Array.from(this.originalHeaders.values());
   }
 
   validateHeaders(headers) {
     // We should probably use schema validation for this.
 
     if (!Array.isArray(headers)) {
       return false;
     }
@@ -668,39 +667,40 @@ HttpObserverManager = {
     if (policyType == "websocket" && ["http", "https"].includes(uri.scheme)) {
       uri = Services.io.newURI(`ws${uri.spec.substring(4)}`);
     }
 
     if (filter.types && !filter.types.includes(policyType)) {
       return false;
     }
 
-    return WebRequestCommon.urlMatches(uri, filter.urls);
+    return !filter.urls || filter.urls.matches(uri);
   },
 
   get resultsMap() {
     delete this.resultsMap;
     this.resultsMap = new Map(Object.keys(Cr).map(name => [Cr[name], name]));
     return this.resultsMap;
   },
 
-  maybeError({channel}, extraData = null) {
-    if (!(extraData && extraData.error) && channel.securityInfo) {
-      let securityInfo = channel.securityInfo.QueryInterface(Ci.nsITransportSecurityInfo);
+  maybeError({channel}) {
+    // FIXME: Move to ChannelWrapper.
+
+    let {securityInfo} = channel;
+    if (securityInfo instanceof Ci.nsITransportSecurityInfo) {
       if (NSSErrorsService.isNSSErrorCode(securityInfo.errorCode)) {
         let nsresult = NSSErrorsService.getXPCOMFromNSSError(securityInfo.errorCode);
-        extraData = {error: NSSErrorsService.getErrorMessage(nsresult)};
+        return {error: NSSErrorsService.getErrorMessage(nsresult)};
       }
     }
-    if (!(extraData && extraData.error)) {
-      if (!Components.isSuccessCode(channel.status)) {
-        extraData = {error: this.resultsMap.get(channel.status) || "NS_ERROR_NET_UNKNOWN"};
-      }
+
+    if (!Components.isSuccessCode(channel.status)) {
+      return {error: this.resultsMap.get(channel.status) || "NS_ERROR_NET_UNKNOWN"};
     }
-    return extraData;
+    return null;
   },
 
   errorCheck(channel) {
     let errorData = this.maybeError(channel);
     if (errorData) {
       this.runChannelListener(channel, "onError", errorData);
     }
     return errorData;
@@ -801,17 +801,17 @@ HttpObserverManager = {
           commonData = this.getRequestData(channel, extraData);
           if (includeStatus) {
             commonData.statusCode = channel.statusCode;
             commonData.statusLine = channel.statusLine;
           }
         }
         let data = Object.assign({}, commonData);
 
-        if (registerFilter) {
+        if (registerFilter && opts.blocking) {
           this.registerChannel(channel, opts);
         }
 
         if (opts.requestHeaders) {
           requestHeaders = requestHeaders || new RequestHeaderChanger(channel);
           data.requestHeaders = requestHeaders.toArray();
         }
 
@@ -838,38 +838,31 @@ HttpObserverManager = {
     } catch (e) {
       Cu.reportError(e);
     }
 
     return this.applyChanges(kind, channel, handlerResults, requestHeaders, responseHeaders);
   },
 
   async applyChanges(kind, channel, handlerResults, requestHeaders, responseHeaders) {
-    let asyncHandlers = handlerResults.filter(({result}) => isThenable(result));
-    let isAsync = asyncHandlers.length > 0;
-    let shouldResume = false;
+    let shouldResume = !channel.suspended;
 
     try {
-      if (isAsync) {
-        shouldResume = !channel.suspended;
-        channel.suspended = true;
-
-        for (let value of asyncHandlers) {
+      for (let {opts, result} of handlerResults) {
+        if (isThenable(result)) {
+          channel.suspended = true;
           try {
-            value.result = await value.result;
+            result = await result;
           } catch (e) {
             Cu.reportError(e);
-            value.result = {};
+            continue;
           }
-        }
-      }
-
-      for (let {opts, result} of handlerResults) {
-        if (!result || typeof result !== "object") {
-          continue;
+          if (!result || typeof result !== "object") {
+            continue;
+          }
         }
 
         if (result.cancel) {
           channel.suspended = false;
           channel.cancel(Cr.NS_ERROR_ABORT);
 
           this.errorCheck(channel);
           return;
@@ -888,32 +881,30 @@ HttpObserverManager = {
         if (opts.requestHeaders && result.requestHeaders && requestHeaders) {
           requestHeaders.applyChanges(result.requestHeaders);
         }
 
         if (opts.responseHeaders && result.responseHeaders && responseHeaders) {
           responseHeaders.applyChanges(result.responseHeaders);
         }
 
-        if (kind === "authRequired" && opts.blocking && result.authCredentials) {
-          if (channel.authPromptCallback) {
-            channel.authPromptCallback(result.authCredentials);
-          }
+        if (kind === "authRequired" && result.authCredentials && channel.authPromptCallback) {
+          channel.authPromptCallback(result.authCredentials);
         }
       }
       // If a listener did not cancel the request or provide credentials, we
       // forward the auth request to the base handler.
-      if (kind === "authRequired") {
-        if (channel.authPromptForward) {
-          channel.authPromptForward();
-        }
+      if (kind === "authRequired" && channel.authPromptForward) {
+        channel.authPromptForward();
       }
 
-      if (kind === "modify") {
+      if (kind === "modify" && this.listeners.afterModify.size) {
         await this.runChannelListener(channel, "afterModify");
+      } else if (kind !== "onError") {
+        this.errorCheck(channel);
       }
     } catch (e) {
       Cu.reportError(e);
     }
 
     // Only resume the channel if it was suspended by this call.
     if (shouldResume) {
       channel.suspended = false;
--- a/toolkit/modules/tests/xpcshell/test_DeferredTask.js
+++ b/toolkit/modules/tests/xpcshell/test_DeferredTask.js
@@ -187,28 +187,16 @@ add_test(function test_arm_async_functio
     await Promise.resolve();
     run_next_test();
   }, 50);
 
   deferredTask.arm();
 });
 
 /**
- * Checks that "arm" accepts a Task.jsm generator function.
- */
-add_test(function test_arm_async_generator() {
-  let deferredTask = new DeferredTask(function* () {
-    yield Promise.resolve();
-    run_next_test();
-  }, 50);
-
-  deferredTask.arm();
-});
-
-/**
  * Checks that an armed task can be disarmed.
  */
 add_test(function test_disarm() {
   // Create a task that will run later.
   let deferredTask = new DeferredTask(function() {
     do_throw("This task should not run.");
   }, 2 * T);
   deferredTask.arm();
--- a/toolkit/mozapps/extensions/DeferredSave.jsm
+++ b/toolkit/mozapps/extensions/DeferredSave.jsm
@@ -183,17 +183,17 @@ this.DeferredSave.prototype = {
   _startTimer() {
     if (!this._pending) {
       return;
     }
 
       this.logger.debug("Starting timer");
     if (!this._timer)
       this._timer = MakeTimer();
-    this._timer.initWithCallback(() => this._deferredSave(),
+    this._timer.initWithCallback(() => this._timerCallback(),
                                  this._delay, Ci.nsITimer.TYPE_ONE_SHOT);
   },
 
   /**
    * Mark the current stored data dirty, and schedule a flush to disk
    * @return A Promise<integer> that will be resolved after the data is written to disk;
    *         the promise is resolved with the number of bytes written.
    */
@@ -207,16 +207,20 @@ this.DeferredSave.prototype = {
       this._pending = PromiseUtils.defer();
       // Wait until the most recent write completes or fails (if it hasn't already)
       // and then restart our timer
       this._writing.then(count => this._startTimer(), error => this._startTimer());
     }
     return this._pending.promise;
   },
 
+  _timerCallback() {
+    Services.tm.idleDispatchToMainThread(() => this._deferredSave());
+  },
+
   _deferredSave() {
     let pending = this._pending;
     this._pending = null;
     let writing = this._writing;
     this._writing = pending.promise;
 
     // In either the success or the exception handling case, we don't need to handle
     // the error from _writing here; it's already being handled in another then()
--- a/xpcom/base/nsCycleCollector.cpp
+++ b/xpcom/base/nsCycleCollector.cpp
@@ -2285,17 +2285,17 @@ CCGraphBuilder::DoneAddingRoots()
   mGraph.mRootCount = mGraph.MapCount();
 
   mCurrNode = new NodePool::Enumerator(mGraph.mNodes);
 }
 
 MOZ_NEVER_INLINE bool
 CCGraphBuilder::BuildGraph(SliceBudget& aBudget)
 {
-  const intptr_t kNumNodesBetweenTimeChecks = 1000;
+  const intptr_t kNumNodesBetweenTimeChecks = 500;
   const intptr_t kStep = SliceBudget::CounterReset / kNumNodesBetweenTimeChecks;
 
   MOZ_ASSERT(mCurrNode);
 
   while (!aBudget.isOverBudget() && !mCurrNode->IsDone()) {
     PtrInfo* pi = mCurrNode->GetNext();
     if (!pi) {
       MOZ_CRASH();
--- a/xpcom/threads/LabeledEventQueue.cpp
+++ b/xpcom/threads/LabeledEventQueue.cpp
@@ -8,18 +8,38 @@
 #include "mozilla/dom/TabChild.h"
 #include "mozilla/dom/TabGroup.h"
 #include "mozilla/Scheduler.h"
 #include "mozilla/SchedulerGroup.h"
 #include "nsQueryObject.h"
 
 using namespace mozilla::dom;
 
+LinkedList<SchedulerGroup>* LabeledEventQueue::sSchedulerGroups;
+size_t LabeledEventQueue::sLabeledEventQueueCount;
+SchedulerGroup* LabeledEventQueue::sCurrentSchedulerGroup;
+
 LabeledEventQueue::LabeledEventQueue()
 {
+  // LabeledEventQueue should only be used by one consumer since it uses a
+  // single static sSchedulerGroups field. It's hard to assert this, though, so
+  // we assert NS_IsMainThread(), which is a reasonable proxy.
+  MOZ_ASSERT(NS_IsMainThread());
+
+  if (sLabeledEventQueueCount++ == 0) {
+    sSchedulerGroups = new LinkedList<SchedulerGroup>();
+  }
+}
+
+LabeledEventQueue::~LabeledEventQueue()
+{
+  if (--sLabeledEventQueueCount == 0) {
+    delete sSchedulerGroups;
+    sSchedulerGroups = nullptr;
+  }
 }
 
 static SchedulerGroup*
 GetSchedulerGroup(nsIRunnable* aEvent)
 {
   RefPtr<SchedulerGroup::Runnable> groupRunnable = do_QueryObject(aEvent);
   if (!groupRunnable) {
     // It's not labeled.
@@ -89,18 +109,24 @@ LabeledEventQueue::PutEvent(already_AddR
   }
 
   mNumEvents++;
   epoch->mNumEvents++;
 
   RunnableEpochQueue* queue = isLabeled ? mLabeled.LookupOrAdd(group) : &mUnlabeled;
   queue->Push(QueueEntry(event.forget(), epoch->mEpochNumber));
 
-  if (group && !group->isInList()) {
-    mSchedulerGroups.insertBack(group);
+  if (group && group->EnqueueEvent() == SchedulerGroup::NewlyQueued) {
+    // This group didn't have any events before. Add it to the
+    // sSchedulerGroups list.
+    MOZ_ASSERT(!group->isInList());
+    sSchedulerGroups->insertBack(group);
+    if (!sCurrentSchedulerGroup) {
+      sCurrentSchedulerGroup = group;
+    }
   }
 }
 
 void
 LabeledEventQueue::PopEpoch()
 {
   Epoch& epoch = mEpochs.FirstElement();
   MOZ_ASSERT(epoch.mNumEvents > 0);
@@ -108,85 +134,119 @@ LabeledEventQueue::PopEpoch()
     mEpochs.Pop();
   } else {
     epoch.mNumEvents--;
   }
 
   mNumEvents--;
 }
 
+// Returns the next SchedulerGroup after |aGroup| in sSchedulerGroups. Wraps
+// around to the beginning of the list when we hit the end.
+/* static */ SchedulerGroup*
+LabeledEventQueue::NextSchedulerGroup(SchedulerGroup* aGroup)
+{
+  SchedulerGroup* result = aGroup->getNext();
+  if (!result) {
+    result = sSchedulerGroups->getFirst();
+  }
+  return result;
+}
+
 already_AddRefed<nsIRunnable>
 LabeledEventQueue::GetEvent(EventPriority* aPriority,
                             const MutexAutoLock& aProofOfLock)
 {
   if (mEpochs.IsEmpty()) {
     return nullptr;
   }
 
   Epoch epoch = mEpochs.FirstElement();
   if (!epoch.IsLabeled()) {
     QueueEntry entry = mUnlabeled.FirstElement();
-    if (IsReadyToRun(entry.mRunnable, nullptr)) {
-      PopEpoch();
-      mUnlabeled.Pop();
-      MOZ_ASSERT(entry.mEpochNumber == epoch.mEpochNumber);
-      MOZ_ASSERT(entry.mRunnable.get());
-      return entry.mRunnable.forget();
+    if (!IsReadyToRun(entry.mRunnable, nullptr)) {
+      return nullptr;
     }
+
+    PopEpoch();
+    mUnlabeled.Pop();
+    MOZ_ASSERT(entry.mEpochNumber == epoch.mEpochNumber);
+    MOZ_ASSERT(entry.mRunnable.get());
+    return entry.mRunnable.forget();
+  }
+
+  if (!sCurrentSchedulerGroup) {
+    return nullptr;
   }
 
   // Move active tabs to the front of the queue. The mAvoidActiveTabCount field
   // prevents us from preferentially processing events from active tabs twice in
   // a row. This scheme is designed to prevent starvation.
   if (TabChild::HasActiveTabs() && mAvoidActiveTabCount <= 0) {
     for (TabChild* tabChild : TabChild::GetActiveTabs()) {
       SchedulerGroup* group = tabChild->TabGroup();
-      if (!group->isInList() || group == mSchedulerGroups.getFirst()) {
+      if (!group->isInList() || group == sCurrentSchedulerGroup) {
         continue;
       }
 
       // For each active tab we move to the front of the queue, we have to
       // process two SchedulerGroups (the active tab and another one, presumably
       // a background group) before we prioritize active tabs again.
       mAvoidActiveTabCount += 2;
 
-      group->removeFrom(mSchedulerGroups);
-      mSchedulerGroups.insertFront(group);
+      // We move |group| right before sCurrentSchedulerGroup and then set
+      // sCurrentSchedulerGroup to group.
+      MOZ_ASSERT(group != sCurrentSchedulerGroup);
+      group->removeFrom(*sSchedulerGroups);
+      sCurrentSchedulerGroup->setPrevious(group);
+      sCurrentSchedulerGroup = group;
     }
   }
 
-  // Iterate over SchedulerGroups in order. Each time we pass by a
-  // SchedulerGroup, we move it to the back of the list. This ensures that we
-  // process SchedulerGroups in a round-robin order (ignoring active tab
-  // prioritization).
-  SchedulerGroup* firstGroup = mSchedulerGroups.getFirst();
+  // Iterate over each SchedulerGroup once, starting at sCurrentSchedulerGroup.
+  SchedulerGroup* firstGroup = sCurrentSchedulerGroup;
   SchedulerGroup* group = firstGroup;
   do {
-    RunnableEpochQueue* queue = mLabeled.Get(group);
-    MOZ_ASSERT(queue);
-    MOZ_ASSERT(!queue->IsEmpty());
+    mAvoidActiveTabCount--;
 
-    mAvoidActiveTabCount--;
-    SchedulerGroup* next = group->removeAndGetNext();
-    mSchedulerGroups.insertBack(group);
+    RunnableEpochQueue* queue = mLabeled.Get(group);
+    if (!queue) {
+      // This can happen if |group| is in a different LabeledEventQueue than |this|.
+      group = NextSchedulerGroup(group);
+      continue;
+    }
+    MOZ_ASSERT(!queue->IsEmpty());
 
     QueueEntry entry = queue->FirstElement();
     if (entry.mEpochNumber == epoch.mEpochNumber &&
         IsReadyToRun(entry.mRunnable, group)) {
+      sCurrentSchedulerGroup = NextSchedulerGroup(group);
+
       PopEpoch();
 
+      if (group->DequeueEvent() == SchedulerGroup::NoLongerQueued) {
+        // Now we can take group out of sSchedulerGroups.
+        if (sCurrentSchedulerGroup == group) {
+          // Since we changed sCurrentSchedulerGroup above, we'll only get here
+          // if |group| was the only element in sSchedulerGroups. In that case
+          // set sCurrentSchedulerGroup to null.
+          MOZ_ASSERT(group->getNext() == nullptr);
+          MOZ_ASSERT(group->getPrevious() == nullptr);
+          sCurrentSchedulerGroup = nullptr;
+        }
+        group->removeFrom(*sSchedulerGroups);
+      }
       queue->Pop();
       if (queue->IsEmpty()) {
         mLabeled.Remove(group);
-        group->removeFrom(mSchedulerGroups);
       }
       return entry.mRunnable.forget();
     }
 
-    group = next;
+    group = NextSchedulerGroup(group);
   } while (group != firstGroup);
 
   return nullptr;
 }
 
 bool
 LabeledEventQueue::IsEmpty(const MutexAutoLock& aProofOfLock)
 {
--- a/xpcom/threads/LabeledEventQueue.h
+++ b/xpcom/threads/LabeledEventQueue.h
@@ -25,16 +25,17 @@ class SchedulerGroup;
 // from its queue. Ideally the heuristic should give precedence to
 // SchedulerGroups corresponding to the foreground tabs. The correctness of this
 // data structure relies on the invariant that events from different
 // SchedulerGroups cannot affect each other.
 class LabeledEventQueue final : public AbstractEventQueue
 {
 public:
   LabeledEventQueue();
+  ~LabeledEventQueue();
 
   void PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
                 EventPriority aPriority,
                 const MutexAutoLock& aProofOfLock) final;
   already_AddRefed<nsIRunnable> GetEvent(EventPriority* aPriority,
                                          const MutexAutoLock& aProofOfLock) final;
 
   bool IsEmpty(const MutexAutoLock& aProofOfLock) final;
@@ -122,23 +123,30 @@ private:
     Epoch NextEpoch(bool aIsLabeled) const
     {
       MOZ_ASSERT(aIsLabeled == !IsLabeled());
       return Epoch(mEpochNumber + 1, aIsLabeled);
     }
   };
 
   void PopEpoch();
+  static SchedulerGroup* NextSchedulerGroup(SchedulerGroup* aGroup);
 
   using RunnableEpochQueue = Queue<QueueEntry, 32>;
   using LabeledMap = nsClassHashtable<nsRefPtrHashKey<SchedulerGroup>, RunnableEpochQueue>;
   using EpochQueue = Queue<Epoch, 8>;
 
-  // List of SchedulerGroups that have events in the queue.
-  LinkedList<SchedulerGroup> mSchedulerGroups;
+  // List of SchedulerGroups that might have events. This is static, so it
+  // covers all LabeledEventQueues. If a SchedulerGroup is in this list, it may
+  // not have an event in *this* LabeledEventQueue (although it will have an
+  // event in *some* LabeledEventQueue). sCurrentSchedulerGroup cycles through
+  // the elements of sSchedulerGroups in order.
+  static LinkedList<SchedulerGroup>* sSchedulerGroups;
+  static size_t sLabeledEventQueueCount;
+  static SchedulerGroup* sCurrentSchedulerGroup;
 
   LabeledMap mLabeled;
   RunnableEpochQueue mUnlabeled;
   EpochQueue mEpochs;
   size_t mNumEvents = 0;
 
   // Number of SchedulerGroups that must be processed before we prioritize an
   // active tab. This field is designed to guarantee a 1:1 interleaving between
--- a/xpcom/threads/PrioritizedEventQueue.cpp
+++ b/xpcom/threads/PrioritizedEventQueue.cpp
@@ -117,17 +117,17 @@ template<class InnerQueueT>
 EventPriority
 PrioritizedEventQueue<InnerQueueT>::SelectQueue(bool aUpdateState,
                                                 const MutexAutoLock& aProofOfLock)
 {
   bool highPending = !mHighQueue->IsEmpty(aProofOfLock);
   bool normalPending = !mNormalQueue->IsEmpty(aProofOfLock);
   size_t inputCount = mInputQueue->Count(aProofOfLock);
 
-  if (aUpdateState && mInputQueueState == STATE_ENABLED &&
+  if (mInputQueueState == STATE_ENABLED &&
       mInputHandlingStartTime.IsNull() && inputCount > 0) {
     mInputHandlingStartTime =
       InputEventStatistics::Get()
       .GetInputHandlingStartTime(inputCount);
   }
 
   // We check the different queues in the following order. The conditions we use
   // are meant to avoid starvation and to ensure that we don't process an event
@@ -269,17 +269,17 @@ PrioritizedEventQueue<InnerQueueT>::HasR
 {
   mHasPendingEventsPromisedIdleEvent = false;
 
   EventPriority queue = SelectQueue(false, aProofOfLock);
 
   if (queue == EventPriority::High) {
     return mHighQueue->HasReadyEvent(aProofOfLock);
   } else if (queue == EventPriority::Input) {
-    return mIdleQueue->HasReadyEvent(aProofOfLock);
+    return mInputQueue->HasReadyEvent(aProofOfLock);
   } else if (queue == EventPriority::Normal) {
     return mNormalQueue->HasReadyEvent(aProofOfLock);
   }
 
   MOZ_ASSERT(queue == EventPriority::Idle);
 
   // If we get here, then both the high and normal queues are empty.
 
--- a/xpcom/threads/SchedulerGroup.h
+++ b/xpcom/threads/SchedulerGroup.h
@@ -70,16 +70,46 @@ public:
   }
 
   // Ensure that it's valid to access the TabGroup at this time.
   void ValidateAccess() const
   {
     MOZ_ASSERT(IsSafeToRun());
   }
 
+  enum EnqueueStatus
+  {
+    NewlyQueued,
+    AlreadyQueued,
+  };
+
+  // Records that this SchedulerGroup had an event enqueued in some
+  // queue. Returns whether the SchedulerGroup was already in a queue before
+  // EnqueueEvent() was called.
+  EnqueueStatus EnqueueEvent()
+  {
+    mEventCount++;
+    return mEventCount == 1 ? NewlyQueued : AlreadyQueued;
+  }
+
+  enum DequeueStatus
+  {
+    StillQueued,
+    NoLongerQueued,
+  };
+
+  // Records that this SchedulerGroup had an event dequeued from some
+  // queue. Returns whether the SchedulerGroup is still in a queue after
+  // DequeueEvent() returns.
+  DequeueStatus DequeueEvent()
+  {
+    mEventCount--;
+    return mEventCount == 0 ? NoLongerQueued : StillQueued;
+  }
+
   class Runnable final : public mozilla::Runnable
                        , public nsIRunnablePriority
                        , public nsILabelableRunnable
   {
   public:
     Runnable(already_AddRefed<nsIRunnable>&& aRunnable,
              SchedulerGroup* aGroup);
 
@@ -162,16 +192,20 @@ protected:
   // Shuts down this dispatcher. If aXPCOMShutdown is true, invalidates this
   // dispatcher.
   void Shutdown(bool aXPCOMShutdown);
 
   static MOZ_THREAD_LOCAL(bool) sTlsValidatingAccess;
 
   bool mIsRunning;
 
+  // Number of events that are currently enqueued for this SchedulerGroup
+  // (across all queues).
+  size_t mEventCount = 0;
+
   nsCOMPtr<nsISerialEventTarget> mEventTargets[size_t(TaskCategory::Count)];
   RefPtr<AbstractThread> mAbstractThreads[size_t(TaskCategory::Count)];
 };
 
 NS_DEFINE_STATIC_IID_ACCESSOR(SchedulerGroup::Runnable, NS_SCHEDULERGROUPRUNNABLE_IID);
 
 } // namespace mozilla