Merge autoland to central, a=merge
authorWes Kocher <wkocher@mozilla.com>
Thu, 21 Sep 2017 16:29:32 -0700
changeset 435519 d6d6fd889f7bd2ca57d92189e63441a58a356c39
parent 435424 ca7d18dbacbf103d74a3213d8d08a7c3e4def9a2 (current diff)
parent 435518 526fae334c8b3a26b468405185740137186a8314 (diff)
child 435543 68b1c083356d77c81cb6f929165f7553b2835120
push id1618
push userCallek@gmail.com
push dateThu, 11 Jan 2018 17:45:48 +0000
treeherdermozilla-release@882ca853e05a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge autoland to central, a=merge MozReview-Commit-ID: KNeXnxjnn5u
browser/themes/osx/menu-back.png
browser/themes/windows/menu-back.png
testing/talos/talos/tests/perf-reftest/bloom_basic.manifest
testing/talos/talos/tests/perf-reftest/bloom_basic_ref.manifest
--- a/browser/base/content/browser.js
+++ b/browser/base/content/browser.js
@@ -1392,16 +1392,18 @@ var gBrowserInit = {
 
       try {
         gBrowser.swapBrowsersAndCloseOther(gBrowser.selectedTab, tabToOpen);
       } catch (e) {
         Cu.reportError(e);
       }
     }
 
+    this._setInitialFocus();
+
     // Wait until chrome is painted before executing code not critical to making the window visible
     this._boundDelayedStartup = this._delayedStartup.bind(this);
     window.addEventListener("MozAfterPaint", this._boundDelayedStartup);
 
     this._loadHandled = true;
   },
 
   _cancelDelayedStartup() {
@@ -1599,17 +1601,17 @@ var gBrowserInit = {
       this._schedulePerWindowIdleTasks();
       document.documentElement.setAttribute("sessionrestored", "true");
     });
 
     Services.obs.notifyObservers(window, "browser-delayed-startup-finished");
     TelemetryTimestamps.add("delayedStartupFinished");
   },
 
-  _handleURIToLoad() {
+  _setInitialFocus() {
     let initiallyFocusedElement = document.commandDispatcher.focusedElement;
 
     let firstBrowserPaintDeferred = {};
     firstBrowserPaintDeferred.promise = new Promise(resolve => {
       firstBrowserPaintDeferred.resolve = resolve;
     });
 
     let mm = window.messageManager;
@@ -1621,16 +1623,41 @@ var gBrowserInit = {
     let initialBrowser = gBrowser.selectedBrowser;
     mm.addMessageListener("Browser:FirstNonBlankPaint",
                           function onFirstNonBlankPaint() {
       mm.removeMessageListener("Browser:FirstNonBlankPaint", onFirstNonBlankPaint);
       initialBrowser.removeAttribute("blank");
     });
 
     this._uriToLoadPromise.then(uriToLoad => {
+      if ((isBlankPageURL(uriToLoad) || uriToLoad == "about:privatebrowsing") &&
+          focusAndSelectUrlBar()) {
+        return;
+      }
+
+      if (gBrowser.selectedBrowser.isRemoteBrowser) {
+        // If the initial browser is remote, in order to optimize for first paint,
+        // we'll defer switching focus to that browser until it has painted.
+        firstBrowserPaintDeferred.promise.then(() => {
+          // If focus didn't move while we were waiting for first paint, we're okay
+          // to move to the browser.
+          if (document.commandDispatcher.focusedElement == initiallyFocusedElement) {
+            gBrowser.selectedBrowser.focus();
+          }
+        });
+      } else {
+        // If the initial browser is not remote, we can focus the browser
+        // immediately with no paint performance impact.
+        gBrowser.selectedBrowser.focus();
+      }
+    });
+  },
+
+  _handleURIToLoad() {
+    this._uriToLoadPromise.then(uriToLoad => {
       if (!uriToLoad || uriToLoad == "about:blank") {
         return;
       }
 
       // We don't check if uriToLoad is a XULElement because this case has
       // already been handled before first paint, and the argument cleared.
       if (uriToLoad instanceof Ci.nsIArray) {
         let count = uriToLoad.length;
@@ -1676,39 +1703,16 @@ var gBrowserInit = {
                 window.arguments[7], !!window.arguments[7], window.arguments[8]);
         window.focus();
       } else {
         // Note: loadOneOrMoreURIs *must not* be called if window.arguments.length >= 3.
         // Such callers expect that window.arguments[0] is handled as a single URI.
         loadOneOrMoreURIs(uriToLoad, Services.scriptSecurityManager.getSystemPrincipal());
       }
     });
-
-    this._uriToLoadPromise.then(uriToLoad => {
-      if ((isBlankPageURL(uriToLoad) || uriToLoad == "about:privatebrowsing") &&
-          focusAndSelectUrlBar()) {
-        return;
-      }
-
-      if (gBrowser.selectedBrowser.isRemoteBrowser) {
-        // If the initial browser is remote, in order to optimize for first paint,
-        // we'll defer switching focus to that browser until it has painted.
-        firstBrowserPaintDeferred.promise.then(() => {
-          // If focus didn't move while we were waiting for first paint, we're okay
-          // to move to the browser.
-          if (document.commandDispatcher.focusedElement == initiallyFocusedElement) {
-            gBrowser.selectedBrowser.focus();
-          }
-        });
-      } else {
-        // If the initial browser is not remote, we can focus the browser
-        // immediately with no paint performance impact.
-        gBrowser.selectedBrowser.focus();
-      }
-    });
   },
 
   /**
    * Use this function as an entry point to schedule tasks that
    * need to run once per window after startup, and can be scheduled
    * by using an idle callback.
    *
    * The functions scheduled here will fire from idle callbacks
--- a/browser/base/content/tabbrowser.xml
+++ b/browser/base/content/tabbrowser.xml
@@ -1847,22 +1847,18 @@
               postData: aPostDatas[i],
               userContextId: aUserContextId,
               triggeringPrincipal: aTriggeringPrincipal,
             });
             if (targetTabIndex !== -1)
               this.moveTabTo(tab, ++tabNum);
           }
 
-          if (!aLoadInBackground) {
-            if (firstTabAdded) {
-              // .selectedTab setter focuses the content area
-              this.selectedTab = firstTabAdded;
-            } else
-              this.selectedBrowser.focus();
+          if (firstTabAdded && !aLoadInBackground) {
+            this.selectedTab = firstTabAdded;
           }
         ]]></body>
       </method>
 
       <method name="updateBrowserRemoteness">
         <parameter name="aBrowser"/>
         <parameter name="aShouldBeRemote"/>
         <parameter name="aOptions"/>
--- a/browser/base/content/test/static/browser_all_files_referenced.js
+++ b/browser/base/content/test/static/browser_all_files_referenced.js
@@ -121,17 +121,17 @@ var whitelist = [
   // browser/extensions/pdfjs/content/web/viewer.js#7450
   {file: "resource://pdf.js/web/debugger.js"},
 
   // These are used in content processes. They are actually referenced.
   {file: "resource://shield-recipe-client-content/shield-content-frame.js"},
   {file: "resource://shield-recipe-client-content/shield-content-process.js"},
 
   // New L10n API that is not yet used in production
-  {file: "resource://gre/modules/DOMLocalization.jsm"},
+  {file: "chrome://global/content/l10n.js"},
 
   // Starting from here, files in the whitelist are bugs that need fixing.
   // Bug 1339424 (wontfix?)
   {file: "chrome://browser/locale/taskbar.properties",
    platforms: ["linux", "macosx"]},
   // Bug 1316187
   {file: "chrome://global/content/customizeToolbar.xul"},
   // Bug 1343837
--- a/browser/base/content/test/urlbar/browser.ini
+++ b/browser/base/content/test/urlbar/browser.ini
@@ -115,16 +115,17 @@ support-files =
   file_urlbar_edit_dos.html
 [browser_urlbar_searchsettings.js]
 [browser_urlbar_search_speculative_connect.js]
 [browser_urlbar_search_speculative_connect_engine.js]
 support-files =
   searchSuggestionEngine2.xml
   searchSuggestionEngine.sjs
 [browser_urlbar_search_speculative_connect_mousedown.js]
+[browser_urlbar_search_no_speculative_connect_with_client_cert.js]
 [browser_urlbar_stop_pending.js]
 support-files =
   slow-page.sjs
 [browser_urlbar_remoteness_switch.js]
 run-if = e10s
 [browser_urlHighlight.js]
 [browser_wyciwyg_urlbarCopying.js]
 subsuite = clipboard
copy from browser/base/content/test/urlbar/browser_urlbar_search_speculative_connect_mousedown.js
copy to browser/base/content/test/urlbar/browser_urlbar_search_no_speculative_connect_with_client_cert.js
--- a/browser/base/content/test/urlbar/browser_urlbar_search_speculative_connect_mousedown.js
+++ b/browser/base/content/test/urlbar/browser_urlbar_search_no_speculative_connect_with_client_cert.js
@@ -1,73 +1,181 @@
+/* eslint-disable mozilla/no-arbitrary-setTimeout */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
-// This test ensures that we setup a speculative network connection to
-// the site in mousedown event before the http request happens(in mouseup).
+// Tests that we don't speculatively connect when user certificates are installed
+
+const { MockRegistrar } =
+  Cu.import("resource://testing-common/MockRegistrar.jsm", {});
+
+const certService = Cc["@mozilla.org/security/local-cert-service;1"]
+                      .getService(Ci.nsILocalCertService);
+const certOverrideService = Cc["@mozilla.org/security/certoverride;1"]
+                              .getService(Ci.nsICertOverrideService);
+
+const host = "localhost";
+let uri;
+let handshakeDone = false;
+let expectingChooseCertificate = false;
+let chooseCertificateCalled = false;
+
+const clientAuthDialogs = {
+  chooseCertificate(ctx, hostname, port, organization, issuerOrg, certList,
+                    selectedIndex) {
+    ok(expectingChooseCertificate,
+       `${expectingChooseCertificate ? "" : "not "}expecting chooseCertificate to be called`);
+    is(certList.length, 1, "should have only one client certificate available");
+    selectedIndex.value = 0;
+    chooseCertificateCalled = true;
+    return true;
+  },
+
+  QueryInterface: XPCOMUtils.generateQI([Ci.nsIClientAuthDialogs]),
+};
+
+function startServer(cert) {
+  let tlsServer = Cc["@mozilla.org/network/tls-server-socket;1"]
+                    .createInstance(Ci.nsITLSServerSocket);
+  tlsServer.init(-1, true, -1);
+  tlsServer.serverCert = cert;
+
+  let input, output;
 
-let gHttpServer = null;
-let gScheme = "http";
-let gHost = "localhost"; // 'localhost' by default.
-let gPort = -1;
-let gIsSpeculativeConnected = false;
+  let listener = {
+    onSocketAccepted(socket, transport) {
+      info("Accepted TLS client connection");
+      let connectionInfo = transport.securityInfo
+                           .QueryInterface(Ci.nsITLSServerConnectionInfo);
+      connectionInfo.setSecurityObserver(listener);
+      input = transport.openInputStream(0, 0, 0);
+      output = transport.openOutputStream(0, 0, 0);
+    },
+
+    onHandshakeDone(socket, status) {
+      info("TLS handshake done");
+      handshakeDone = true;
+
+      input.asyncWait({
+        onInputStreamReady(readyInput) {
+          try {
+            let request = NetUtil.readInputStreamToString(readyInput,
+                                                          readyInput.available());
+            ok(request.startsWith("GET /") && request.includes("HTTP/1.1"),
+               "expecting an HTTP/1.1 GET request");
+            let response = "HTTP/1.1 200 OK\r\nContent-Type:text/plain\r\n" +
+                           "Connection:Close\r\nContent-Length:2\r\n\r\nOK";
+            output.write(response, response.length);
+          } catch (e) {
+            // This will fail when we close the speculative connection.
+          }
+        }
+      }, 0, 0, Services.tm.currentThread);
+    },
+
+    onStopListening() {
+      info("onStopListening");
+      input.close();
+      output.close();
+    }
+  };
+
+  tlsServer.setSessionCache(false);
+  tlsServer.setSessionTickets(false);
+  tlsServer.setRequestClientCertificate(Ci.nsITLSServerSocket.REQUEST_ALWAYS);
+
+  tlsServer.asyncListen(listener);
+
+  return tlsServer;
+}
+
+let server;
 
 add_task(async function setup() {
-  gHttpServer = runHttpServer(gScheme, gHost, gPort);
-  // The server will be run on a random port if the port number wasn't given.
-  gPort = gHttpServer.identity.primaryPort;
-
-  await PlacesTestUtils.addVisits([{
-    uri: `${gScheme}://${gHost}:${gPort}`,
-    title: "test visit for speculative connection",
-    transition: Ci.nsINavHistoryService.TRANSITION_TYPED,
-  }]);
-
   await SpecialPowers.pushPrefEnv({
     set: [["browser.urlbar.autoFill", true],
           // Turn off search suggestion so we won't speculative connect to the search engine.
           ["browser.search.suggest.enabled", false],
           ["browser.urlbar.speculativeConnect.enabled", true],
           // In mochitest this number is 0 by default but we have to turn it on.
           ["network.http.speculative-parallel-limit", 6],
           // The http server is using IPv4, so it's better to disable IPv6 to avoid weird
           // networking problem.
-          ["network.dns.disableIPv6", true]],
+          ["network.dns.disableIPv6", true],
+          ["security.default_personal_cert", "Ask Every Time"]],
   });
 
+  let clientAuthDialogsCID =
+    MockRegistrar.register("@mozilla.org/nsClientAuthDialogs;1",
+                           clientAuthDialogs);
+
+  let cert = await new Promise((resolve, reject) => {
+    certService.getOrCreateCert("speculative-connect", {
+      handleCert(c, rv) {
+        if (!Components.isSuccessCode(rv)) {
+          reject(rv);
+          return;
+        }
+        resolve(c);
+      }
+    });
+  });
+  server = startServer(cert);
+  uri = `https://${host}:${server.port}/`;
+  info(`running tls server at ${uri}`);
+  await PlacesTestUtils.addVisits([{
+    uri,
+    title: "test visit for speculative connection",
+    transition: Ci.nsINavHistoryService.TRANSITION_TYPED,
+  }]);
+
+  let overrideBits = Ci.nsICertOverrideService.ERROR_UNTRUSTED |
+                     Ci.nsICertOverrideService.ERROR_MISMATCH;
+  certOverrideService.rememberValidityOverride("localhost", server.port, cert,
+                                               overrideBits, true);
+
   registerCleanupFunction(async function() {
     await PlacesUtils.history.clear();
-    gHttpServer.identity.remove(gScheme, gHost, gPort);
-    gHttpServer.stop(() => {
-      gHttpServer = null;
-    });
+    MockRegistrar.unregister(clientAuthDialogsCID);
+    certOverrideService.clearValidityOverride("localhost", server.port);
   });
 });
 
-add_task(async function popup_mousedown_tests() {
+add_task(async function popup_mousedown_no_client_cert_dialog_until_navigate_test() {
   const test = {
     // To not trigger autofill, search keyword starts from the second character.
-    search: gHost.substr(1, 4),
-    completeValue: `${gScheme}://${gHost}:${gPort}/`
+    search: host.substr(1, 4),
+    completeValue: uri
   };
   info(`Searching for '${test.search}'`);
   await promiseAutocompleteResultPopup(test.search, window, true);
-  // Check if the first result is with type "searchengine"
   let controller = gURLBar.popup.input.controller;
-  // The first item should be 'Search with ...' thus we wan the second.
+  // The first item should be 'Search with ...' thus we want the second.
   let value = controller.getFinalCompleteValueAt(1);
   info(`The value of the second item is ${value}`);
   is(value, test.completeValue, "The second item has the url we visited.");
 
   await BrowserTestUtils.waitForCondition(() => {
     return !!gURLBar.popup.richlistbox.childNodes[1] &&
            is_visible(gURLBar.popup.richlistbox.childNodes[1]);
   }, "the node is there.");
 
+  expectingChooseCertificate = false;
   let listitem = gURLBar.popup.richlistbox.childNodes[1];
-  EventUtils.synthesizeMouse(listitem, 10, 10, {type: "mousedown"}, window);
+  EventUtils.synthesizeMouseAtCenter(listitem, {type: "mousedown"}, window);
   is(gURLBar.popup.richlistbox.selectedIndex, 1, "The second item is selected");
-  await promiseSpeculativeConnection(gHttpServer);
-  is(gHttpServer.connectionNumber, 1, `${gHttpServer.connectionNumber} speculative connection has been setup.`);
+
+  // We shouldn't have triggered a speculative connection, because a client
+  // certificate is installed.
+  SimpleTest.requestFlakyTimeout("Wait for UI");
+  await new Promise(resolve => setTimeout(resolve, 200));
+
+  // Now mouseup, expect that we choose a client certificate, and expect that
+  // we successfully load a page.
+  expectingChooseCertificate = true;
+  EventUtils.synthesizeMouseAtCenter(listitem, {type: "mouseup"}, window);
+  await BrowserTestUtils.browserLoaded(gBrowser.selectedBrowser);
+  ok(chooseCertificateCalled, "chooseCertificate must have been called");
+  server.close();
 });
--- a/browser/components/customizableui/PanelMultiView.jsm
+++ b/browser/components/customizableui/PanelMultiView.jsm
@@ -427,17 +427,25 @@ this.PanelMultiView = class {
       } else {
         this._transitionHeight(() => {
           viewNode.removeAttribute("current");
           this._currentSubView = null;
           this.node.setAttribute("viewtype", "main");
         });
       }
     } else if (this.panelViews) {
-      this._mainView.setAttribute("current", "true");
+      // Make sure to hide all subviews, except for the mainView.
+      let mainView = this._mainView;
+      for (let panelview of this._panelViews) {
+        if (panelview == mainView)
+          panelview.setAttribute("current", true);
+        else
+          panelview.removeAttribute("current");
+      }
+      this.node.setAttribute("viewtype", "main");
     }
 
     if (!this.panelViews) {
       this._shiftMainView();
     }
   }
 
   showSubView(aViewId, aAnchor, aPreviousView) {
@@ -521,31 +529,27 @@ this.PanelMultiView = class {
 
       this._currentSubView = viewNode;
       if (this.panelViews) {
         if (viewNode.id == this._mainViewId) {
           this.node.setAttribute("viewtype", "main");
         } else {
           this.node.setAttribute("viewtype", "subview");
         }
+        // If we've got an older transition still running, make sure to clean it up.
+        await this._cleanupTransitionPhase();
         if (!playTransition) {
           viewNode.setAttribute("current", true);
           this.descriptionHeightWorkaround(viewNode);
         }
       }
 
       // Now we have to transition the panel.
       if (this.panelViews && playTransition) {
-        if (aAnchor)
-          aAnchor.setAttribute("open", true);
-
-        await this._transitionViews(previousViewNode, viewNode, reverse, previousRect);
-
-        if (aAnchor)
-          aAnchor.removeAttribute("open");
+        await this._transitionViews(previousViewNode, viewNode, reverse, previousRect, aAnchor);
 
         this._dispatchViewEvent(viewNode, "ViewShown");
         this._updateKeyboardFocus(viewNode);
       } else if (!this.panelViews) {
         this._transitionHeight(() => {
           viewNode.setAttribute("current", true);
           if (viewNode.id == this._mainViewId) {
             this.node.setAttribute("viewtype", "main");
@@ -572,37 +576,39 @@ this.PanelMultiView = class {
    * @param {panelview} previousViewNode Node that is currently shown as active,
    *                                     but is about to be transitioned away.
    * @param {panelview} viewNode         Node that will becode the active view,
    *                                     after the transition has finished.
    * @param {Boolean}   reverse          Whether we're navigation back to a
    *                                     previous view or forward to a next view.
    * @param {Object}    previousRect     Rect object, with the same structure as
    *                                     a DOMRect, of the `previousViewNode`.
-   * @param {Function}  callback         Function that will be invoked when the
-   *                                     transition is finished or when the
-   *                                     operation was canceled (early return).
+   * @param {Element}   anchor           the anchor for which we're opening
+   *                                     a new panelview, if any
    */
-  async _transitionViews(previousViewNode, viewNode, reverse, previousRect) {
+  async _transitionViews(previousViewNode, viewNode, reverse, previousRect, anchor) {
     // There's absolutely no need to show off our epic animation skillz when
     // the panel's not even open.
     if (this._panel.state != "open") {
       return;
     }
 
     const {window, document} = this;
 
     if (this._autoResizeWorkaroundTimer)
       window.clearTimeout(this._autoResizeWorkaroundTimer);
 
     this._transitionDetails = {
       phase: TRANSITION_PHASES.START,
-      previousViewNode, viewNode, reverse
+      previousViewNode, viewNode, reverse, anchor
     };
 
+    if (anchor)
+      anchor.setAttribute("open", "true");
+
     // Set the viewContainer dimensions to make sure only the current view is
     // visible.
     this._viewContainer.style.height = Math.max(previousRect.height, this._mainViewHeight) + "px";
     this._viewContainer.style.width = previousRect.width + "px";
     // Lock the dimensions of the window that hosts the popup panel.
     let rect = this._panel.popupBoxObject.getOuterScreenRect();
     this._panel.setAttribute("width", rect.width);
     this._panel.setAttribute("height", rect.height);
@@ -699,27 +705,30 @@ this.PanelMultiView = class {
    * Attempt to clean up the attributes and properties set by `_transitionViews`
    * above. Which attributes and properties depends on the phase the transition
    * was left from - normally that'd be `TRANSITION_PHASES.END`.
    */
   async _cleanupTransitionPhase() {
     if (!this._transitionDetails)
       return;
 
-    let {phase, previousViewNode, viewNode, reverse, resolve, listener} = this._transitionDetails;
+    let {phase, previousViewNode, viewNode, reverse, resolve, listener, anchor} = this._transitionDetails;
     this._transitionDetails = null;
 
     // Do the things we _always_ need to do whenever the transition ends or is
     // interrupted.
     this._dispatchViewEvent(previousViewNode, "ViewHiding");
     previousViewNode.removeAttribute("current");
     if (reverse)
       this._resetKeyNavigation(previousViewNode);
     this.descriptionHeightWorkaround(viewNode);
 
+    if (anchor)
+      anchor.removeAttribute("open");
+
     if (phase >= TRANSITION_PHASES.START) {
       this._panel.removeAttribute("width");
       this._panel.removeAttribute("height");
       // Myeah, panel layout auto-resizing is a funky thing. We'll wait
       // another few milliseconds to remove the width and height 'fixtures',
       // to be sure we don't flicker annoyingly.
       // NB: HACK! Bug 1363756 is there to fix this.
       this._autoResizeWorkaroundTimer = this.window.setTimeout(() => {
@@ -992,17 +1001,16 @@ this.PanelMultiView = class {
       case "popuphidden":
         // WebExtensions consumers can hide the popup from viewshowing, or
         // mid-transition, which disrupts our state:
         this._viewShowing = null;
         this._transitioning = false;
         this.node.removeAttribute("panelopen");
         this.showMainView();
         if (this.panelViews) {
-          this._cleanupTransitionPhase();
           for (let panelView of this._viewStack.children) {
             if (panelView.nodeName != "children") {
               panelView.__lastKnownBoundingRect = null;
               panelView.style.removeProperty("min-width");
               panelView.style.removeProperty("max-width");
             }
           }
           this.window.removeEventListener("keydown", this);
--- a/browser/components/customizableui/content/panelUI.inc.xul
+++ b/browser/components/customizableui/content/panelUI.inc.xul
@@ -416,38 +416,36 @@
                      closebuttonhidden="true"
                      secondarybuttonlabel="&updateAvailable.cancelButton.label;"
                      secondarybuttonaccesskey="&updateAvailable.cancelButton.accesskey;"
                      dropmarkerhidden="true"
                      checkboxhidden="true"
                      buttonhighlight="true"
                      hidden="true">
     <popupnotificationcontent id="update-available-notification-content" orient="vertical">
-      <description id="update-available-description">&updateAvailable.message;
-        <label id="update-available-whats-new" class="text-link" value="&updateAvailable.whatsnew.label;" />
-      </description>
+      <description id="update-available-description">&updateAvailable.message;</description>
+      <label id="update-available-whats-new" class="text-link" value="&updateAvailable.whatsnew.label;" />
     </popupnotificationcontent>
   </popupnotification>
 
   <popupnotification id="appMenu-update-manual-notification"
                      popupid="update-manual"
                      label="&updateManual.header.message;"
                      buttonlabel="&updateManual.acceptButton.label;"
                      buttonaccesskey="&updateManual.acceptButton.accesskey;"
                      closebuttonhidden="true"
                      secondarybuttonlabel="&updateManual.cancelButton.label;"
                      secondarybuttonaccesskey="&updateManual.cancelButton.accesskey;"
                      dropmarkerhidden="true"
                      checkboxhidden="true"
                      buttonhighlight="true"
                      hidden="true">
     <popupnotificationcontent id="update-manual-notification-content" orient="vertical">
-      <description id="update-manual-description">&updateManual.message;
-        <label id="update-manual-whats-new" class="text-link" value="&updateManual.whatsnew.label;" />
-      </description>
+      <description id="update-manual-description">&updateManual.message;</description>
+      <label id="update-manual-whats-new" class="text-link" value="&updateManual.whatsnew.label;" />
     </popupnotificationcontent>
   </popupnotification>
 
   <popupnotification id="appMenu-update-restart-notification"
                      popupid="update-restart"
                      label="&updateRestart.header.message2;"
                      buttonlabel="&updateRestart.acceptButton.label;"
                      buttonaccesskey="&updateRestart.acceptButton.accesskey;"
--- a/browser/components/customizableui/content/panelUI.js
+++ b/browser/components/customizableui/content/panelUI.js
@@ -527,17 +527,22 @@ const PanelUI = {
     // Since the library is the first view shown, we don't want to add a blocker
     // to the event, which would make PanelMultiView wait to show it.
     let container = this.clearLibraryRecentHighlights();
     if (!this.libraryRecentHighlightsEnabled) {
       this._loadingRecentHighlights = false;
       return;
     }
 
-    let highlights = await NewTabUtils.activityStreamLinks.getHighlights({ withFavicons: true });
+    let highlights = await NewTabUtils.activityStreamLinks.getHighlights({
+      // As per bug 1402023, hard-coded limit, until Activity Stream develops a
+      // richer list.
+      numItems: 6,
+      withFavicons: true
+    });
     // If there's nothing to display, or the panel is already hidden, get out.
     if (!highlights.length || viewNode.panelMultiView.getAttribute("panelopen") != "true") {
       this._loadingRecentHighlights = false;
       return;
     }
 
     container.hidden = container.previousSibling.hidden =
       container.previousSibling.previousSibling.hidden = false;
--- a/browser/components/downloads/DownloadsSubview.jsm
+++ b/browser/components/downloads/DownloadsSubview.jsm
@@ -55,19 +55,21 @@ class DownloadsSubview extends Downloads
     if (!contextMenu) {
       contextMenu = this.document.getElementById("downloadsContextMenu").cloneNode(true);
       contextMenu.setAttribute("closemenu", "none");
       contextMenu.setAttribute("id", this.context);
       contextMenu.removeAttribute("onpopupshown");
       contextMenu.setAttribute("onpopupshowing",
         "DownloadsSubview.updateContextMenu(document.popupNode, this);");
       contextMenu.setAttribute("onpopuphidden", "DownloadsSubview.onContextMenuHidden(this);")
-      let clearButton = contextMenu.querySelector("menuitem[command='downloadsCmd_clearDownloads'");
+      let clearButton = contextMenu.querySelector("menuitem[command='downloadsCmd_clearDownloads']");
       clearButton.hidden = false;
       clearButton.previousSibling.hidden = true;
+      contextMenu.querySelector("menuitem[command='cmd_delete']")
+        .setAttribute("command", "downloadsCmd_delete");
     }
     this.panelview.appendChild(contextMenu);
     this.container.setAttribute("context", this.context);
 
     this._downloadsData = DownloadsCommon.getData(this.window, true, true, true);
     this._downloadsData.addView(this);
   }
 
@@ -225,18 +227,27 @@ class DownloadsSubview extends Downloads
       button = button.parentNode;
     }
     menu.setAttribute("state", button.getAttribute("state"));
     if (button.hasAttribute("exists"))
       menu.setAttribute("exists", button.getAttribute("exists"));
     else
       menu.removeAttribute("exists");
     menu.classList.toggle("temporary-block", button.classList.contains("temporary-block"));
-    menu.querySelector("menuitem[command='downloadsCmd_clearDownloads'").disabled =
-      !DownloadsSubview.canClearDownloads(button);
+    for (let menuitem of menu.getElementsByTagName("menuitem")) {
+      let command = menuitem.getAttribute("command");
+      if (!command)
+        continue;
+      if (command == "downloadsCmd_clearDownloads") {
+        menuitem.disabled = !DownloadsSubview.canClearDownloads(button);
+      } else {
+        menuitem.disabled = !button._shell.isCommandEnabled(command);
+      }
+    }
+
     // The menu anchorNode property is not available long enough to be used elsewhere,
     // so tack it another property name.
     menu._anchorNode = button;
   }
 
   /**
    * Right after the context menu was hidden, perform a bit of cleanup.
    *
--- a/browser/components/downloads/DownloadsViewUI.jsm
+++ b/browser/components/downloads/DownloadsViewUI.jsm
@@ -402,16 +402,17 @@ this.DownloadsViewUI.DownloadElementShel
           let partFile = new FileUtils.File(this.download.target.partFilePath);
           if (partFile.exists()) {
             return true;
           }
         }
 
         // This property is false if the download did not succeed.
         return this.download.target.exists;
+      case "downloadsCmd_delete":
       case "cmd_delete":
         // We don't want in-progress downloads to be removed accidentally.
         return this.download.stopped;
     }
     return DownloadsViewUI.isCommandName(aCommand) && !!this[aCommand];
   },
 
   doCommand(aCommand) {
@@ -463,16 +464,23 @@ this.DownloadsViewUI.DownloadElementShel
     let document = window.document;
 
     // Do not suggest a file name if we don't know the original target.
     let targetPath = this.download.target.path ?
                      OS.Path.basename(this.download.target.path) : null;
     window.DownloadURL(this.download.source.url, targetPath, document);
   },
 
+  downloadsCmd_delete() {
+    // Alias for the 'cmd_delete' command, because it may clash with another
+    // controller which causes unexpected behavior as different codepaths claim
+    // ownership.
+    this.cmd_delete();
+  },
+
   cmd_delete() {
     (async () => {
       // Remove the associated history element first, if any, so that the views
       // that combine history and session downloads won't resurrect the history
       // download into the view just before it is deleted permanently.
       try {
         await PlacesUtils.history.remove(this.download.source.url);
       } catch (ex) {
--- a/browser/components/extensions/ext-browser.js
+++ b/browser/components/extensions/ext-browser.js
@@ -660,17 +660,20 @@ class Tab extends TabBase {
       active: false,
       pinned: false,
       incognito: Boolean(tabData.state && tabData.state.isPrivate),
       lastAccessed: tabData.state ? tabData.state.lastAccessed : tabData.lastAccessed,
     };
 
     if (extension.tabManager.hasTabPermission(tabData)) {
       let entries = tabData.state ? tabData.state.entries : tabData.entries;
-      let entry = entries[entries.length - 1];
+      let lastTabIndex = tabData.state ? tabData.state.index : tabData.index;
+      // We need to take lastTabIndex - 1 because the index in the tab data is
+      // 1-based rather than 0-based.
+      let entry = entries[lastTabIndex - 1];
       result.url = entry.url;
       result.title = entry.title;
       if (tabData.image) {
         result.favIconUrl = tabData.image;
       }
     }
 
     return result;
--- a/browser/components/extensions/ext-browserAction.js
+++ b/browser/components/extensions/ext-browserAction.js
@@ -190,22 +190,16 @@ this.browserAction = class extends Exten
         let popupURL = this.getProperty(tab, "popup");
         this.tabManager.addActiveTabPermission(tab);
 
         // Popups are shown only if a popup URL is defined; otherwise
         // a "click" event is dispatched. This is done for compatibility with the
         // Google Chrome onClicked extension API.
         if (popupURL) {
           try {
-            if (event.target.closest("panelmultiview")) {
-              // FIXME: The line below needs to change eventually, but for now:
-              // ensure the view is _always_ visible _before_ `popup.attach()` is
-              // called. PanelMultiView.jsm dictates different behavior.
-              event.target.setAttribute("current", true);
-            }
             let popup = this.getPopup(document.defaultView, popupURL);
             let attachPromise = popup.attach(event.target);
             event.detail.addBlocker(attachPromise);
             await attachPromise;
             TelemetryStopwatch.finish(POPUP_OPEN_MS_HISTOGRAM, this);
             if (this.eventQueue.length) {
               let histogram = Services.telemetry.getHistogramById(POPUP_RESULT_HISTOGRAM);
               histogram.add("popupShown");
--- a/browser/components/extensions/test/browser/browser-common.ini
+++ b/browser/components/extensions/test/browser/browser-common.ini
@@ -40,17 +40,16 @@ skip-if = os == 'linux'
 [browser_ext_browserAction_disabled.js]
 [browser_ext_browserAction_pageAction_icon.js]
 [browser_ext_browserAction_pageAction_icon_permissions.js]
 [browser_ext_browserAction_popup.js]
 skip-if = debug && (os == 'linux' && bits == 32) # Bug 1313372
 [browser_ext_browserAction_popup_preload.js]
 skip-if = (os == 'win' && !debug) # bug 1352668
 [browser_ext_browserAction_popup_resize.js]
-skip-if = os == 'mac' # Bug 1374749 will re-enable this test again.
 [browser_ext_browserAction_simple.js]
 [browser_ext_browserAction_telemetry.js]
 [browser_ext_browserAction_theme_icons.js]
 [browser_ext_browsingData_formData.js]
 [browser_ext_browsingData_history.js]
 [browser_ext_browsingData_localStorage.js]
 [browser_ext_browsingData_pluginData.js]
 [browser_ext_browsingData_serviceWorkers.js]
--- a/browser/components/extensions/test/browser/browser_ext_browserAction_popup_resize.js
+++ b/browser/components/extensions/test/browser/browser_ext_browserAction_popup_resize.js
@@ -151,19 +151,24 @@ async function testPopupSize(standardsMo
     await closeBrowserAction(extension, browserWin);
   }
 
 
   // Test the PanelUI panel for a menu panel button.
   let widget = getBrowserActionWidget(extension);
   CustomizableUI.addWidgetToArea(widget.id, getCustomizableUIPanelID());
 
+  let panel = browserWin.PanelUI.overflowPanel;
+  let panelMultiView = panel.firstChild;
+  let widgetId = makeWidgetId(extension.id);
+  // The 'ViewShown' event is the only way to correctly determine when the extensions'
+  // panelview has finished transitioning and is fully in view.
+  let shownPromise = BrowserTestUtils.waitForEvent(panelMultiView, "ViewShown",
+    e => (e.originalTarget.id || "").includes(widgetId));
   let browser = await openPanel(extension, browserWin);
-
-  let panel = browserWin.PanelUI.overflowPanel;
   let origPanelRect = panel.getBoundingClientRect();
 
   // Check that the panel is still positioned as expected.
   let checkPanelPosition = () => {
     is(panel.getAttribute("side"), arrowSide, "Panel arrow is positioned as expected");
 
     let panelRect = panel.getBoundingClientRect();
     if (arrowSide == "top") {
@@ -178,26 +183,20 @@ async function testPopupSize(standardsMo
       ok(panelRect.top <= origPanelRect.top, `Panel has not shrunk from original size (${panelRect.top} <= ${origPanelRect.top})`);
 
       let panelTop = browserWin.mozInnerScreenY + panelRect.top;
       ok(panelTop >= browserWin.screen.availTop, `Top of popup should be on-screen. (${panelTop} >= ${browserWin.screen.availTop})`);
     }
   };
 
   await awaitBrowserLoaded(browser);
-
-  let panelview = browser.closest("panelview");
-  // Need to wait first for the forced panel width and for the panelview's border to be gone,
-  // then for layout to happen again. Otherwise the removal of the border between views in the
-  // panelmultiview trips up our width checking causing it to be off-by-one.
-  await BrowserTestUtils.waitForCondition(() => (!panel.hasAttribute("width") && (!panelview || !panelview.style.borderInlineStart)));
-  await promiseAnimationFrame(browserWin);
+  await shownPromise;
   // Wait long enough to make sure the initial resize debouncing timer has
   // expired.
-  await delay(100);
+  await delay(500);
 
   let dims = await promiseContentDimensions(browser);
 
   is(dims.isStandards, standardsMode, "Document has the expected compat mode");
 
   // If the browser's preferred height is smaller than the initial height of the
   // panel, then it will still take up the full available vertical space. Even
   // so, we need to check that we've gotten the preferred height calculation
--- a/browser/components/extensions/test/browser/browser_ext_sessions_getRecentlyClosed_tabs.js
+++ b/browser/components/extensions/test/browser/browser_ext_sessions_getRecentlyClosed_tabs.js
@@ -33,18 +33,29 @@ add_task(async function test_sessions_ge
   let extension = ExtensionTestUtils.loadExtension({
     manifest: {
       permissions: ["sessions", "tabs"],
     },
     background,
   });
 
   let win = await BrowserTestUtils.openNewBrowserWindow();
-  await BrowserTestUtils.loadURI(win.gBrowser.selectedBrowser, "about:mozilla");
-  await BrowserTestUtils.browserLoaded(win.gBrowser.selectedBrowser);
+  let tabBrowser = win.gBrowser.selectedBrowser;
+  for (let url of ["about:robots", "about:mozilla", "about:config"]) {
+    await BrowserTestUtils.loadURI(tabBrowser, url);
+    await BrowserTestUtils.browserLoaded(tabBrowser, false, url);
+  }
+
+  // Ensure that getRecentlyClosed returns correct results after the back
+  // button has been used.
+  let goBackPromise = BrowserTestUtils.waitForLocationChange(
+    win.gBrowser, "about:mozilla");
+  tabBrowser.goBack();
+  await goBackPromise;
+
   let expectedTabs = [];
   let tab = win.gBrowser.selectedTab;
   // Because there is debounce logic in ContentLinkHandler.jsm to reduce the
   // favicon loads, we have to wait some time before checking that icon was
   // stored properly. If that page doesn't have favicon links, let it timeout.
   try {
     await BrowserTestUtils.waitForCondition(() => {
       return gBrowser.getIcon(tab) != null;
--- a/browser/components/nsBrowserGlue.js
+++ b/browser/components/nsBrowserGlue.js
@@ -32,18 +32,20 @@ XPCOMUtils.defineLazyModuleGetters(this,
   BrowserUsageTelemetry: "resource:///modules/BrowserUsageTelemetry.jsm",
   ContentClick: "resource:///modules/ContentClick.jsm",
   ContextualIdentityService: "resource://gre/modules/ContextualIdentityService.jsm",
   DateTimePickerHelper: "resource://gre/modules/DateTimePickerHelper.jsm",
   DirectoryLinksProvider: "resource:///modules/DirectoryLinksProvider.jsm",
   ExtensionsUI: "resource:///modules/ExtensionsUI.jsm",
   Feeds: "resource:///modules/Feeds.jsm",
   FileUtils: "resource://gre/modules/FileUtils.jsm",
+  FileSource: "resource://gre/modules/L10nRegistry.jsm",
   FormValidationHandler: "resource:///modules/FormValidationHandler.jsm",
   Integration: "resource://gre/modules/Integration.jsm",
+  L10nRegistry: "resource://gre/modules/L10nRegistry.jsm",
   LightweightThemeManager: "resource://gre/modules/LightweightThemeManager.jsm",
   LoginHelper: "resource://gre/modules/LoginHelper.jsm",
   LoginManagerParent: "resource://gre/modules/LoginManagerParent.jsm",
   NetUtil: "resource://gre/modules/NetUtil.jsm",
   NewTabUtils: "resource://gre/modules/NewTabUtils.jsm",
   OS: "resource://gre/modules/osfile.jsm",
   PageActions: "resource:///modules/PageActions.jsm",
   PageThumbs: "resource://gre/modules/PageThumbs.jsm",
@@ -625,16 +627,24 @@ BrowserGlue.prototype = {
         headerURL: "resource:///chrome/browser/content/browser/defaultthemes/compact.header.png",
         iconURL: "resource:///chrome/browser/content/browser/defaultthemes/dark.icon.svg",
         textcolor: "white",
         accentcolor: "black",
         author: vendorShortName,
       });
     }
 
+
+    // Initialize the default l10n resource sources for L10nRegistry.
+    const locales = [AppConstants.INSTALL_LOCALE];
+    const toolkitSource = new FileSource("toolkit", locales, "resource://gre/localization/{locale}/");
+    L10nRegistry.registerSource(toolkitSource);
+    const appSource = new FileSource("app", locales, "resource://app/localization/{locale}/");
+    L10nRegistry.registerSource(appSource);
+
     Services.obs.notifyObservers(null, "browser-ui-startup-complete");
   },
 
   _checkForOldBuildUpdates() {
     // check for update if our build is old
     if (AppConstants.MOZ_UPDATER &&
         Services.prefs.getBoolPref("app.update.enabled") &&
         Services.prefs.getBoolPref("app.update.checkInstallTime")) {
--- a/browser/components/places/content/controller.js
+++ b/browser/components/places/content/controller.js
@@ -1629,38 +1629,49 @@ var PlacesControllerDragHelper = {
                    title: data.label,
                    type: PlacesUtils.TYPE_X_MOZ_URL}];
       } else
         throw new Error("bogus data was passed as a tab");
 
       for (let unwrapped of nodes) {
         let index = await insertionPoint.getIndex();
 
-        // Adjust insertion index to prevent reversal of dragged items. When you
-        // drag multiple elts upward: need to increment index or each successive
-        // elt will be inserted at the same index, each above the previous.
         if (index != -1 && unwrapped.itemGuid) {
           // Note: we use the parent from the existing bookmark as the sidebar
           // gives us an unwrapped.parent that is actually a query and not the real
           // parent.
           let existingBookmark = await PlacesUtils.bookmarks.fetch(unwrapped.itemGuid);
-          let dragginUp = parentGuid == existingBookmark.parentGuid &&
-                          index < existingBookmark.index;
 
-          if (dragginUp) {
-            index += movedCount++;
-          } else if (PlacesUIUtils.useAsyncTransactions) {
-            if (index == existingBookmark.index) {
-              // We're moving to the same index, so there's nothing for us to do.
-              continue;
+          // If we're dropping on the same folder, then we may need to adjust
+          // the index to insert at the correct place.
+          if (existingBookmark && parentGuid == existingBookmark.parentGuid) {
+            if (PlacesUIUtils.useAsyncTransactions) {
+              if (index < existingBookmark.index) {
+                // When you drag multiple elts upward: need to increment index or
+                // each successive elt will be inserted at the same index, each
+                // above the previous.
+                index += movedCount++;
+              } else if (index > existingBookmark.index) {
+                // If we're dragging down, we need to go one lower to insert at
+                // the real point as moving the element changes the index of
+                // everything below by 1.
+                index--;
+              } else {
+                // This isn't moving so we skip it.
+                continue;
+              }
+            } else {
+              // Sync Transactions. Adjust insertion index to prevent reversal
+              // of dragged items. When you drag multiple elts upward: need to
+              // increment index or each successive elt will be inserted at the
+              // same index, each above the previous.
+              if (index < existingBookmark.index) { // eslint-disable-line no-lonely-if
+                index += movedCount++;
+              }
             }
-            // If we're dragging down, we need to go one lower to insert at
-            // the real point as moving the element changes the index of
-            // everything below by 1.
-            index--;
           }
         }
 
         // If dragging over a tag container we should tag the item.
         if (insertionPoint.isTag) {
           let uri = NetUtil.newURI(unwrapped.uri);
           let tagItemId = insertionPoint.itemId;
           if (PlacesUIUtils.useAsyncTransactions)
--- a/browser/components/places/tests/browser/browser_controller_onDrop.js
+++ b/browser/components/places/tests/browser/browser_controller_onDrop.js
@@ -29,49 +29,66 @@ add_task(async function setup() {
       title: "bm1",
       url: "http://example1.com"
     }, {
       title: "bm2",
       url: "http://example2.com"
     }, {
       title: "bm3",
       url: "http://example3.com"
+    }, {
+      title: "folder1",
+      type: PlacesUtils.bookmarks.TYPE_FOLDER,
+      children: [{
+        title: "bm4",
+        url: "http://example1.com"
+      }, {
+        title: "bm5",
+        url: "http://example2.com"
+      }, {
+        title: "bm6",
+        url: "http://example3.com"
+      }]
     }]
   });
 
   bookmarkIds = await PlacesUtils.promiseManyItemIds([
     bookmarks[0].guid,
     bookmarks[1].guid,
     bookmarks[2].guid,
   ]);
 });
 
-async function run_drag_test(startBookmarkIndex, insertionIndex,
-                             realInsertionIndex, expectTransactionCreated = true) {
+async function run_drag_test(startBookmarkIndex, insertionIndex, newParentGuid,
+                             expectedInsertionIndex, expectTransactionCreated = true) {
   if (!PlacesUIUtils.useAsyncTransactions) {
     Assert.ok(true, "Skipping test as async transactions are turned off");
     return;
   }
 
+  if (!newParentGuid) {
+    newParentGuid = PlacesUtils.bookmarks.unfiledGuid;
+  }
+
   // Reset the stubs so that previous test runs don't count against us.
   PlacesUIUtils.getTransactionForData.reset();
   PlacesTransactions.batch.reset();
 
   let dragBookmark = bookmarks[startBookmarkIndex];
 
   await withSidebarTree("bookmarks", async (tree) => {
     tree.selectItems([PlacesUtils.unfiledBookmarksFolderId]);
     PlacesUtils.asContainer(tree.selectedNode).containerOpen = true;
 
     // Simulating a drag-drop with a tree view turns out to be really difficult
     // as you can't get a node for the source/target. Hence, we fake the
     // insertion point and drag data and call the function direct.
     let ip = new InsertionPoint({
       parentId: await PlacesUtils.promiseItemId(PlacesUtils.bookmarks.unfiledGuid),
-      parentGuid: PlacesUtils.bookmarks.unfiledGuid,
+      parentGuid: newParentGuid,
       index: insertionIndex,
       orientation: Ci.nsITreeView.DROP_ON
     });
 
     let bookmarkWithId = JSON.stringify(Object.assign({
       id: bookmarkIds.get(dragBookmark.guid),
       itemGuid: dragBookmark.guid,
       parent: PlacesUtils.unfiledBookmarksFolderId,
@@ -102,36 +119,42 @@ async function run_drag_test(startBookma
       "Should have called getTransactionForData at least once.");
 
     let args = PlacesUIUtils.getTransactionForData.args[0];
 
     Assert.deepEqual(args[0], JSON.parse(bookmarkWithId),
       "Should have called getTransactionForData with the correct unwrapped bookmark");
     Assert.equal(args[1], PlacesUtils.TYPE_X_MOZ_PLACE,
       "Should have called getTransactionForData with the correct flavor");
-    Assert.equal(args[2], PlacesUtils.bookmarks.unfiledGuid,
+    Assert.equal(args[2], newParentGuid,
       "Should have called getTransactionForData with the correct parent guid");
-    Assert.equal(args[3], realInsertionIndex,
+    Assert.equal(args[3], expectedInsertionIndex,
       "Should have called getTransactionForData with the correct index");
     Assert.equal(args[4], false,
       "Should have called getTransactionForData with a move");
   });
 }
 
 add_task(async function test_simple_move_down() {
   // When we move items down the list, we'll get a drag index that is one higher
   // than where we actually want to insert to - as the item is being moved up,
   // everything shifts down one. Hence the index to pass to the transaction should
   // be one less than the supplied index.
-  await run_drag_test(0, 2, 1);
+  await run_drag_test(0, 2, null, 1);
 });
 
 add_task(async function test_simple_move_up() {
   // When we move items up the list, we want the matching index to be passed to
   // the transaction as there's no changes below the item in the list.
-  await run_drag_test(2, 0, 0);
+  await run_drag_test(2, 0, null, 0);
 });
 
-add_task(async function test_simple_move_to_same() {
+add_task(async function test_simple_move_to_same_index() {
   // If we move to the same index, then we don't expect any transactions to be
   // created.
-  await run_drag_test(1, 1, 1, false);
+  await run_drag_test(1, 1, null, 1, false);
 });
+
+add_task(async function test_simple_move_different_folder() {
+  // When we move items to a different folder, the index should never change.
+  await run_drag_test(0, 2, bookmarks[3].guid, 2);
+  await run_drag_test(2, 0, bookmarks[3].guid, 0);
+});
--- a/browser/themes/linux/browser.css
+++ b/browser/themes/linux/browser.css
@@ -8,16 +8,17 @@
 
 @namespace url("http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul");
 @namespace html url("http://www.w3.org/1999/xhtml");
 
 %include ../shared/browser.inc.css
 
 :root {
   --tabs-border: rgba(0,0,0,.3);
+  --tab-line-color: highlight;
 
   --toolbar-non-lwt-bgcolor: -moz-dialog;
   --toolbar-non-lwt-textcolor: -moz-dialogtext;
   --toolbar-non-lwt-bgimage: linear-gradient(rgba(255,255,255,.15), rgba(255,255,255,.15));
   --toolbar-bgcolor: var(--toolbar-non-lwt-bgcolor);
   --toolbar-bgimage: var(--toolbar-non-lwt-bgimage);
 
   --toolbarbutton-border-radius: 4px;
@@ -171,30 +172,16 @@ menuitem.bookmark-item {
 }
 
 .bookmark-item[cutting] > .toolbarbutton-text,
 .bookmark-item[cutting] > .menu-iconic-left > .menu-iconic-text {
   opacity: 0.7;
 }
 
 %include ../shared/bookmarked-notification.inc.css
-
-.unified-nav-back[_moz-menuactive] {
-  list-style-image: url("moz-icon://stock/gtk-go-back-ltr?size=menu") !important;
-}
-.unified-nav-back[_moz-menuactive]:-moz-locale-dir(rtl) {
-  list-style-image: url("moz-icon://stock/gtk-go-back-rtl?size=menu") !important;
-}
-.unified-nav-forward[_moz-menuactive] {
-  list-style-image: url("moz-icon://stock/gtk-go-forward-ltr?size=menu") !important;
-}
-.unified-nav-forward[_moz-menuactive]:-moz-locale-dir(rtl) {
-  list-style-image: url("moz-icon://stock/gtk-go-forward-rtl?size=menu") !important;
-}
-
 %include ../shared/toolbarbuttons.inc.css
 %include ../shared/toolbarbutton-icons.inc.css
 %include ../shared/menupanel.inc.css
 
 /* Fullscreen window controls */
 #window-controls {
   -moz-box-align: start;
   margin-inline-start: 10px;
--- a/browser/themes/moz.build
+++ b/browser/themes/moz.build
@@ -10,8 +10,17 @@ with Files("**"):
 toolkit = CONFIG['MOZ_WIDGET_TOOLKIT']
 
 if toolkit == 'cocoa':
     DIRS += ['osx']
 elif toolkit in ('gtk2', 'gtk3'):
     DIRS += ['linux']
 else:
     DIRS += ['windows']
+
+with Files('osx/**'):
+    SCHEDULES.exclusive = ['macosx']
+
+with Files('linux/**'):
+    SCHEDULES.exclusive = ['linux']
+
+with Files('windows/**'):
+    SCHEDULES.exclusive = ['windows']
--- a/browser/themes/osx/browser.css
+++ b/browser/themes/osx/browser.css
@@ -9,16 +9,17 @@
 
 @namespace url("http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul");
 @namespace html url("http://www.w3.org/1999/xhtml");
 
 %include ../shared/browser.inc.css
 
 :root {
   --tabs-border: rgba(0,0,0,.3);
+  --tab-line-color: #0a84ff;
 
   --toolbar-non-lwt-bgcolor: #f9f9fa;
   --toolbar-non-lwt-textcolor: #0c0c0d;
   --toolbar-non-lwt-bgimage: none;
   --toolbar-bgcolor: var(--toolbar-non-lwt-bgcolor);
   --toolbar-bgimage: var(--toolbar-non-lwt-bgimage);
 
   --toolbarbutton-vertical-text-padding: calc(var(--toolbarbutton-inner-padding) + 1px);
@@ -315,38 +316,32 @@
 .findbar-button {
   background: none;
   box-shadow: none;
   border: none;
 }
 
 /* On Mac, native buttons keep their full opacity when they become disabled
  * and only the glyph or text on top of them becomes less opaque. */
-#back-button[disabled="true"] > .toolbarbutton-icon {
+#main-window:not([customizing]) #back-button[disabled="true"] > .toolbarbutton-icon {
   opacity: 1 !important;
   -moz-context-properties: fill, fill-opacity;
-  fill-opacity: 0.4;
+  /* Disabled toolbar buttons get an opacity of 0.4 which multiplies
+   * their fill-opacity of 0.7. calc() doesn't work here - we'd need
+   * to multiply two unitless numbers and that's invalid in CSS, so
+   * we need to hard code the value for now. */
+  fill-opacity: 0.28;
 }
 
 /* Inactive elements are faded out on OSX */
 .toolbarbutton-1:not(:hover):-moz-window-inactive,
 #main-window:not([customizing]) .toolbarbutton-1:-moz-window-inactive[disabled="true"] {
   opacity: 0.5;
 }
 
-.unified-nav-back[_moz-menuactive]:-moz-locale-dir(ltr),
-.unified-nav-forward[_moz-menuactive]:-moz-locale-dir(rtl) {
-  list-style-image: url("chrome://browser/skin/menu-back.png") !important;
-}
-
-.unified-nav-forward[_moz-menuactive]:-moz-locale-dir(ltr),
-.unified-nav-back[_moz-menuactive]:-moz-locale-dir(rtl) {
-  list-style-image: url("chrome://browser/skin/menu-forward.png") !important;
-}
-
 /* ----- FULLSCREEN WINDOW CONTROLS ----- */
 
 #minimize-button,
 #close-button,
 #fullscreen-button ~ #window-controls > #restore-button {
   display: none;
 }
 
--- a/browser/themes/osx/jar.mn
+++ b/browser/themes/osx/jar.mn
@@ -6,18 +6,16 @@ browser.jar:
 % skin browser classic/1.0 %skin/classic/browser/
 #include ../shared/jar.inc.mn
   skin/classic/browser/sanitizeDialog.css
   skin/classic/browser/aboutSessionRestore-window-icon.png
 * skin/classic/browser/syncedtabs/sidebar.css          (syncedtabs/sidebar.css)
 * skin/classic/browser/browser.css
 * skin/classic/browser/compacttheme.css
   skin/classic/browser/subtle-pattern.png
-  skin/classic/browser/menu-back.png
-  skin/classic/browser/menu-forward.png
   skin/classic/browser/menuPanel-customize.png
   skin/classic/browser/menuPanel-customize@2x.png
   skin/classic/browser/menuPanel-exit.png
   skin/classic/browser/menuPanel-exit@2x.png
   skin/classic/browser/menuPanel-help.png
   skin/classic/browser/menuPanel-help@2x.png
   skin/classic/browser/panel-expander-closed.png
   skin/classic/browser/panel-expander-closed@2x.png
deleted file mode 100644
index 82a74c7b811e16f90329edaafdf220d9cb138330..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
GIT binary patch
literal 0
Hc$@<O00001
--- a/browser/themes/shared/browser.inc.css
+++ b/browser/themes/shared/browser.inc.css
@@ -74,16 +74,32 @@
 #library-animatable-box {
   display: none;
 }
 
 #library-animatable-box[animate] {
   display: -moz-box;
 }
 
+/* Back / Forward context menu */
+
+.unified-nav-back[_moz-menuactive]:-moz-locale-dir(ltr),
+.unified-nav-forward[_moz-menuactive]:-moz-locale-dir(rtl) {
+  list-style-image: url("chrome://browser/skin/back.svg") !important;
+  -moz-context-properties: fill;
+  fill: currentColor;
+}
+
+.unified-nav-forward[_moz-menuactive]:-moz-locale-dir(ltr),
+.unified-nav-back[_moz-menuactive]:-moz-locale-dir(rtl) {
+  list-style-image: url("chrome://browser/skin/forward.svg") !important;
+  -moz-context-properties: fill;
+  fill: currentColor;
+}
+
 /* Private browsing and accessibility indicators */
 
 .accessibility-indicator,
 .private-browsing-indicator {
   background-repeat: no-repeat;
   background-size: 100% auto;
   background-position: center;
   width: 24px;
--- a/browser/themes/shared/compacttheme.inc.css
+++ b/browser/themes/shared/compacttheme.inc.css
@@ -12,17 +12,17 @@
   --toolbar-bgcolor: var(--chrome-secondary-background-color);
   --toolbar-gbimage: none;
   --toolbar-non-lwt-bgcolor: var(--toolbar-bgcolor);
   --toolbar-non-lwt-textcolor: var(--chrome-color);
   --toolbar-non-lwt-bgimage: none;
 
   --toolbarbutton-icon-fill-opacity: .7;
 
-  --tab-line-color: highlight;
+  --tab-line-color: #0a84ff;
 }
 
 :root:-moz-lwtheme-brighttext {
   /* Chrome */
   --chrome-background-color: hsl(240, 5%, 5%);
   --chrome-color: rgb(249, 249, 250);
   --chrome-secondary-background-color: hsl(240, 1%, 20%);
   --toolbox-border-bottom-color: hsla(240, 5%, 5%, .1);
--- a/browser/themes/shared/customizableui/customizeMode.inc.css
+++ b/browser/themes/shared/customizableui/customizeMode.inc.css
@@ -366,16 +366,20 @@ toolbarpaletteitem[place=toolbar] > tool
   background-color: var(--arrowpanel-dimmed-further);
 }
 
 .customization-uidensity-menuitem > .menu-iconic-text,
 .customization-lwtheme-menu-theme > .toolbarbutton-text {
   text-align: start;
 }
 
+.customization-uidensity-menuitem > .menu-iconic-text {
+  font: menu;
+}
+
 #customization-lwtheme-menu-header,
 #customization-lwtheme-menu-recommended {
   padding: 10px;
   margin-bottom: 5px;
   text-align: center;
   font-weight: 500;
   border-bottom: 1px solid var(--panel-separator-color);
 }
--- a/browser/themes/shared/tabs.inc.css
+++ b/browser/themes/shared/tabs.inc.css
@@ -3,17 +3,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 %endif
 %filter substitution
 %define horizontalTabPadding 9px
 
 :root {
   --tab-toolbar-navbar-overlap: 1px;
-  --tab-line-color: highlight;
   --tab-min-height: 33px;
   --tab-loading-fill: #0A84FF;
 }
 
 :root[uidensity=compact] {
   --tab-min-height: 29px;
 }
 
@@ -561,17 +560,17 @@ tabbrowser {
 
 /* Tab bar scroll arrows */
 
 .tabbrowser-arrowscrollbox > .scrollbutton-up,
 .tabbrowser-arrowscrollbox > .scrollbutton-down {
   list-style-image: url(chrome://browser/skin/arrow-left.svg);
   -moz-context-properties: fill, fill-opacity;
   fill: currentColor;
-  fill-opacity: .8;
+  fill-opacity: var(--toolbarbutton-icon-fill-opacity);
 }
 
 .tabbrowser-arrowscrollbox > .scrollbutton-up:-moz-locale-dir(rtl),
 .tabbrowser-arrowscrollbox > .scrollbutton-down:-moz-locale-dir(ltr) {
   transform: scaleX(-1);
 }
 
 /* New tab button */
--- a/browser/themes/shared/toolbarbuttons.inc.css
+++ b/browser/themes/shared/toolbarbuttons.inc.css
@@ -303,16 +303,26 @@ toolbarbutton.bookmark-item {
 .bookmark-item > .toolbarbutton-text {
   display: -moz-box !important;
 }
 
 #PlacesToolbarItems > .bookmark-item > .toolbarbutton-icon[label]:not([label=""]) {
   margin-inline-end: 4px;
 }
 
+/* The bookmarks toolbar is smaller than the other toolbars, so we
+ * need to override the badge position to not be cut off. */
+#PersonalToolbar .toolbarbutton-badge {
+  margin-top: -1px !important;
+}
+
+:root[uidensity=touch] #PersonalToolbar .toolbarbutton-badge {
+  margin-top: -4px !important;
+}
+
 /* Remove a pixel of margin on the end so that the badge doesn't
  * overflow the toolbar and push the button into the overflow menu. */
 :root[uidensity=compact] .toolbarbutton-badge {
   margin-inline-end: -7px !important;
 }
 
 /* To allow toolbarbuttons in the bookmarks toolbar to grow in
  * height with the toolbar (like bookmark items), we apply background
--- a/browser/themes/shared/urlbar-searchbar.inc.css
+++ b/browser/themes/shared/urlbar-searchbar.inc.css
@@ -196,22 +196,21 @@
 .urlbar-icon-wrapper[open] > .urlbar-icon,
 .urlbar-icon-wrapper > .urlbar-icon:hover,
 .urlbar-icon-wrapper > .urlbar-icon:hover:active {
   background-color: transparent;
 }
 
 .urlbar-go-button,
 .search-go-button {
-  list-style-image: url("chrome://browser/skin/back.svg");
-  width: 26px;
+  list-style-image: url("chrome://browser/skin/forward.svg");
 }
 
-.urlbar-go-button:-moz-locale-dir(ltr),
-.search-go-button:-moz-locale-dir(ltr) {
+.urlbar-go-button:-moz-locale-dir(rtl),
+.search-go-button:-moz-locale-dir(rtl) {
   transform: scaleX(-1);
 }
 
 .urlbar-history-dropmarker {
   -moz-appearance: none;
   list-style-image: url(chrome://global/skin/icons/arrow-dropdown-16.svg);
   transition: opacity 0.15s ease;
 }
--- a/browser/themes/windows/browser.css
+++ b/browser/themes/windows/browser.css
@@ -8,16 +8,17 @@
 @namespace html url("http://www.w3.org/1999/xhtml");
 
 %include ../shared/browser.inc.css
 
 :root {
   --titlebar-text-color: currentColor;
 
   --tabs-border: threedshadow;
+  --tab-line-color: highlight;
 
   --toolbar-non-lwt-bgcolor: -moz-dialog;
   --toolbar-non-lwt-textcolor: -moz-dialogtext;
   --toolbar-non-lwt-bgimage: linear-gradient(rgba(255,255,255,.15), rgba(255,255,255,.15));
   --toolbar-bgcolor: var(--toolbar-non-lwt-bgcolor);
   --toolbar-bgimage: var(--toolbar-non-lwt-bgimage);
 
   --toolbarbutton-vertical-text-padding: calc(var(--toolbarbutton-inner-padding) - 1px);
@@ -32,16 +33,17 @@
   --urlbar-separator-color: ThreeDLightShadow;
 
   --toolbox-border-bottom-color: ThreeDShadow;
 }
 
 @media (-moz-windows-default-theme) {
   :root {
     --tabs-border: rgba(0,0,0,.3);
+    --tab-line-color: #0a84ff;
 
     --toolbar-non-lwt-bgcolor: #f9f9fa;
     --toolbar-non-lwt-textcolor: #0c0c0d;
     --toolbar-non-lwt-bgimage: none;
 
     --toolbarbutton-icon-fill-opacity: .7;
 
     --panel-separator-color: hsla(210,4%,10%,.14);
@@ -403,26 +405,16 @@ menuitem.bookmark-item {
 }
 
 
 %include ../shared/bookmarked-notification.inc.css
 %include ../shared/toolbarbuttons.inc.css
 %include ../shared/toolbarbutton-icons.inc.css
 %include ../shared/menupanel.inc.css
 
-.unified-nav-back[_moz-menuactive]:-moz-locale-dir(ltr),
-.unified-nav-forward[_moz-menuactive]:-moz-locale-dir(rtl) {
-  list-style-image: url("chrome://browser/skin/menu-back.png") !important;
-}
-
-.unified-nav-forward[_moz-menuactive]:-moz-locale-dir(ltr),
-.unified-nav-back[_moz-menuactive]:-moz-locale-dir(rtl) {
-  list-style-image: url("chrome://browser/skin/menu-forward.png") !important;
-}
-
 /* ::::: fullscreen window controls ::::: */
 
 #minimize-button,
 #restore-button,
 #close-button {
   -moz-appearance: none;
   border: none;
   margin: 0 !important;
--- a/browser/themes/windows/jar.mn
+++ b/browser/themes/windows/jar.mn
@@ -6,18 +6,16 @@ browser.jar:
 % skin browser classic/1.0 %skin/classic/browser/
 #include ../shared/jar.inc.mn
   skin/classic/browser/sanitizeDialog.css
   skin/classic/browser/aboutSessionRestore-window-icon.png
 * skin/classic/browser/syncedtabs/sidebar.css     (syncedtabs/sidebar.css)
 * skin/classic/browser/browser.css
 * skin/classic/browser/compacttheme.css
   skin/classic/browser/livemark-folder.png
-  skin/classic/browser/menu-back.png
-  skin/classic/browser/menu-forward.png
   skin/classic/browser/menuPanel-customize.png
   skin/classic/browser/menuPanel-customize@2x.png
   skin/classic/browser/menuPanel-exit.png
   skin/classic/browser/menuPanel-exit@2x.png
   skin/classic/browser/menuPanel-help.png
   skin/classic/browser/menuPanel-help@2x.png
   skin/classic/browser/monitor.png
   skin/classic/browser/monitor_16-10.png
deleted file mode 100644
index 7c1d4620a5c4ff0a64ad5a9071772b05b3e1ef53..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
GIT binary patch
literal 0
Hc$@<O00001
--- a/build/sparse-profiles/taskgraph
+++ b/build/sparse-profiles/taskgraph
@@ -17,11 +17,15 @@ path:taskcluster/
 # them all in.
 path:testing/config/tooltool-manifests/
 path:testing/mozharness/
 path:tools/lint/
 
 # for new-style try pushes
 path:try_task_config.json
 
+# Moz.build files are read in filesystem mode
+glob:**/moz.build
+glob:**/*.mozbuild
+
 # Tooltool manifests also need to be opened. Assume they
 # are all somewhere in "tooltool-manifests" directories.
 glob:**/tooltool-manifests/**
new file mode 100644
--- /dev/null
+++ b/build/sparse-profiles/toolchain-build
@@ -0,0 +1,9 @@
+# Profile needed to build toolchain tasks.
+
+# This is probably a little wider than we need it to be. But it is
+# still relatively small and it keeps this profile simple.
+%include build/sparse-profiles/taskgraph
+
+[include]
+# Needed by build-clang.py.
+path:tools/rewriting/
\ No newline at end of file
--- a/build/unix/elfhack/elfhack.cpp
+++ b/build/unix/elfhack/elfhack.cpp
@@ -619,17 +619,31 @@ int do_relocation_section(Elf *elf, unsi
             }
             new_rels.push_back(*i);
             init_array_reloc = new_rels.size();
         } else if (!(loc.getSection()->getFlags() & SHF_WRITE) || (ELF32_R_TYPE(i->r_info) != rel_type)) {
             // Don't pack relocations happening in non writable sections.
             // Our injected code is likely not to be allowed to write there.
             new_rels.push_back(*i);
         } else {
-            // TODO: check that i->r_addend == *i->r_offset
+            // With Elf_Rel, the value pointed by the relocation offset is the addend.
+            // With Elf_Rela, the addend is in the relocation entry, but the elfhacked
+            // relocation info doesn't contain it. Elfhack relies on the value pointed
+            // by the relocation offset to also contain the addend. Which is true with
+            // BFD ld and gold, but not lld, which leaves that nulled out. So if that
+            // value is nulled out, we update it to the addend.
+            Elf_Addr addr(loc.getBuffer(), entry_sz, elf->getClass(), elf->getData());
+            unsigned int addend = get_addend(&*i, elf);
+            if (addr.value == 0) {
+                addr.value = addend;
+                addr.serialize(const_cast<char*>(loc.getBuffer()), entry_sz, elf->getClass(), elf->getData());
+            } else if (addr.value != addend) {
+                fprintf(stderr, "Relocation addend inconsistent with content. Skipping\n");
+                return -1;
+            }
             if (i->r_offset == relhack_entry.r_offset + relhack_entry.r_info * entry_sz) {
                 relhack_entry.r_info++;
             } else {
                 if (relhack_entry.r_offset)
                     relhack->push_back(relhack_entry);
                 relhack_entry.r_offset = i->r_offset;
                 relhack_entry.r_info = 1;
             }
--- a/devtools/client/debugger/new/test/mochitest/browser.ini
+++ b/devtools/client/debugger/new/test/mochitest/browser.ini
@@ -66,16 +66,17 @@ skip-if = debug # bug 1374187
 [browser_dbg-iframes.js]
 [browser_dbg_keyboard_navigation.js]
 [browser_dbg_keyboard-shortcuts.js]
 skip-if = os == "linux" # bug 1351952
 [browser_dbg-pause-exceptions.js]
 skip-if = true # Bug 1393121
 [browser_dbg-navigation.js]
 [browser_dbg-pretty-print.js]
+[browser_dbg-pretty-print-console.js]
 [browser_dbg-pretty-print-paused.js]
 [browser_dbg-scopes-mutations.js]
 [browser_dbg-search-file.js]
 skip-if = os == "win" # Bug 1393121
 [browser_dbg-search-sources.js]
 skip-if = os == "win" # Bug 1393121
 [browser_dbg-search-symbols.js]
 skip-if = os == "win" # Bug 1393121
new file mode 100644
--- /dev/null
+++ b/devtools/client/debugger/new/test/mochitest/browser_dbg-pretty-print-console.js
@@ -0,0 +1,47 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+// Tests that pretty-printing updates console messages.
+
+async function waitFor(condition) {
+  await BrowserTestUtils.waitForCondition(condition, "waitFor", 10, 500);
+  return condition();
+}
+
+add_task(async function () {
+  const dbg = await initDebugger("doc-minified.html");
+  invokeInTab("arithmetic");
+
+  info("Switch to console and check message");
+  const toolbox = dbg.toolbox;
+  const console = await toolbox.selectTool("webconsole");
+  const hud = console.hud;
+
+  let node = await waitFor(() => hud.ui.outputNode.querySelector(".frame-link-source"));
+  const initialLocation = "math.min.js:3:65";
+  is(node.textContent, initialLocation, "location is correct in minified code");
+
+  info("Switch back to debugger and pretty-print");
+  await toolbox.selectTool("jsdebugger");
+  await selectSource(dbg, "math.min.js", 2);
+  clickElement(dbg, "prettyPrintButton");
+
+  await waitForSource(dbg, "math.min.js:formatted");
+  const ppSrc = findSource(dbg, "math.min.js:formatted");
+
+  ok(ppSrc, "Pretty-printed source exists");
+
+  info("Switch back to console and check message");
+  node = await waitFor(() => {
+    // Wait until the message updates.
+    const found = hud.ui.outputNode.querySelector(".frame-link-source");
+    if (found.textContent == initialLocation) {
+      return null;
+    }
+    return found;
+  });
+
+  is(node.textContent, "math.min.js:formatted:22", "location is correct in minified code");
+});
--- a/devtools/client/framework/source-map-url-service.js
+++ b/devtools/client/framework/source-map-url-service.js
@@ -3,31 +3,39 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 "use strict";
 
 const Services = require("Services");
 const SOURCE_MAP_PREF = "devtools.source-map.client-service.enabled";
 
 /**
  * A simple service to track source actors and keep a mapping between
- * original URLs and objects holding the source actor's ID (which is
- * used as a cookie by the devtools-source-map service) and the source
- * map URL.
+ * original URLs and objects holding the source or style actor's ID
+ * (which is used as a cookie by the devtools-source-map service) and
+ * the source map URL.
  *
  * @param {object} toolbox
  *        The toolbox.
  * @param {SourceMapService} sourceMapService
  *        The devtools-source-map functions
  */
 function SourceMapURLService(toolbox, sourceMapService) {
   this._toolbox = toolbox;
   this._target = toolbox.target;
   this._sourceMapService = sourceMapService;
+  // Map from content URLs to descriptors.  Descriptors are later
+  // passed to the source map worker.
   this._urls = new Map();
+  // Map from (stringified) locations to callbacks that are called
+  // when the service decides a location should change (say, a source
+  // map is available or the user changes the pref).
   this._subscriptions = new Map();
+  // A backward map from actor IDs to the original URL.  This is used
+  // to support pretty-printing.
+  this._idMap = new Map();
 
   this._onSourceUpdated = this._onSourceUpdated.bind(this);
   this.reset = this.reset.bind(this);
   this._prefValue = Services.prefs.getBoolPref(SOURCE_MAP_PREF);
   this._onPrefChanged = this._onPrefChanged.bind(this);
   this._onNewStyleSheet = this._onNewStyleSheet.bind(this);
 
   this._target.on("source-updated", this._onSourceUpdated);
@@ -75,32 +83,33 @@ SourceMapURLService.prototype._getLoadin
 
 /**
  * Reset the service.  This flushes the internal cache.
  */
 SourceMapURLService.prototype.reset = function () {
   this._sourceMapService.clearSourceMaps();
   this._urls.clear();
   this._subscriptions.clear();
+  this._idMap.clear();
 };
 
 /**
  * Shut down the service, unregistering its event listeners and
  * flushing the cache.  After this call the service will no longer
  * function.
  */
 SourceMapURLService.prototype.destroy = function () {
   this.reset();
   this._target.off("source-updated", this._onSourceUpdated);
   this._target.off("will-navigate", this.reset);
   if (this._stylesheetsFront) {
     this._stylesheetsFront.off("stylesheet-added", this._onNewStyleSheet);
   }
   Services.prefs.removeObserver(SOURCE_MAP_PREF, this._onPrefChanged);
-  this._target = this._urls = this._subscriptions = null;
+  this._target = this._urls = this._subscriptions = this._idMap = null;
 };
 
 /**
  * A helper function that is called when a new source is available.
  */
 SourceMapURLService.prototype._onSourceUpdated = function (_, sourceEvent) {
   // Maybe we were shut down while waiting.
   if (!this._urls) {
@@ -109,32 +118,70 @@ SourceMapURLService.prototype._onSourceU
 
   let { source } = sourceEvent;
   let { generatedUrl, url, actor: id, sourceMapURL } = source;
 
   // |generatedUrl| comes from the actor and is extracted from the
   // source code by SpiderMonkey.
   let seenUrl = generatedUrl || url;
   this._urls.set(seenUrl, { id, url: seenUrl, sourceMapURL });
+  this._idMap.set(id, seenUrl);
 };
 
 /**
  * A helper function that is called when a new style sheet is
  * available.
  * @param {StyleSheetActor} sheet
  *        The new style sheet's actor.
  */
 SourceMapURLService.prototype._onNewStyleSheet = function (sheet) {
   // Maybe we were shut down while waiting.
   if (!this._urls) {
     return;
   }
 
   let {href: url, sourceMapURL, actor: id} = sheet._form;
   this._urls.set(url, { id, url, sourceMapURL});
+  this._idMap.set(id, url);
+};
+
+/**
+ * A callback that is called from the lower-level source map service
+ * proxy (see toolbox.js) when some tool has installed a new source
+ * map.  This happens when pretty-printing a source.
+ *
+ * @param {String} id
+ *        The actor ID (used as a cookie here as elsewhere in this file)
+ * @param {String} newUrl
+ *        The URL of the pretty-printed source
+ */
+SourceMapURLService.prototype.sourceMapChanged = function (id, newUrl) {
+  if (!this._urls) {
+    return;
+  }
+
+  let urlKey = this._idMap.get(id);
+  if (urlKey) {
+    // The source map URL here doesn't actually matter.
+    this._urls.set(urlKey, { id, url: newUrl, sourceMapURL: "" });
+
+    // Walk over all the location subscribers, looking for any that
+    // are subscribed to a location coming from |urlKey|.  Then,
+    // re-notify any such subscriber by clearing the stored promise
+    // and forcing a re-evaluation.
+    for (let [, subscriptionEntry] of this._subscriptions) {
+      if (subscriptionEntry.url === urlKey) {
+        // Force an update.
+        subscriptionEntry.promise = null;
+        for (let callback of subscriptionEntry.callbacks) {
+          this._callOneCallback(subscriptionEntry, callback);
+        }
+      }
+    }
+  }
 };
 
 /**
  * Look up the original position for a given location.  This returns a
  * promise resolving to either the original location, or null if the
  * given location is not source-mapped.  If a location is returned, it
  * is of the same form as devtools-source-map's |getOriginalLocation|.
  *
--- a/devtools/client/framework/toolbox.js
+++ b/devtools/client/framework/toolbox.js
@@ -598,16 +598,32 @@ Toolbox.prototype = {
                   // returns.
                   return {
                     text: message,
                     contentType: "text/plain",
                   };
                 });
             };
 
+          case "applySourceMap":
+            return (generatedId, url, code, mappings) => {
+              return target.applySourceMap(generatedId, url, code, mappings)
+                .then(result => {
+                  // If a tool has changed or introduced a source map
+                  // (e.g, by pretty-printing a source), tell the
+                  // source map URL service about the change, so that
+                  // subscribers to that service can be updated as
+                  // well.
+                  if (this._sourceMapURLService) {
+                    this._sourceMapURLService.sourceMapChanged(generatedId, url);
+                  }
+                  return result;
+                });
+            };
+
           default:
             return target[name];
         }
       },
     });
 
     this._sourceMapService.startSourceMapWorker(SOURCE_MAP_WORKER);
     return this._sourceMapService;
--- a/devtools/client/themes/components-frame.css
+++ b/devtools/client/themes/components-frame.css
@@ -7,25 +7,30 @@
  * Frame Component
  * Styles for React component at `devtools/client/shared/components/frame.js`
  */
 
 .frame-link {
   display: flex;
   justify-content: space-between;
   --frame-link-line-color: var(--theme-highlight-blue);
+  --frame-link-source: var(--theme-highlight-purple);
+}
+
+.theme-dark .frame-link {
+  --frame-link-source: #6B89FF;
 }
 
 .frame-link-async-cause {
   color: var(--theme-comment);
 }
 
 .frame-link .frame-link-source {
   flex: initial;
-  color: var(--theme-highlight-purple);
+  color: var(--frame-link-source);
 }
 
 .frame-link a.frame-link-source {
   cursor: pointer;
   text-decoration: none;
   font-style: normal;
 }
 
--- a/devtools/client/themes/webconsole.css
+++ b/devtools/client/themes/webconsole.css
@@ -716,17 +716,17 @@ a.learn-more-link.webconsole-learn-more-
 .webconsole-output-wrapper {
   display: flex;
   flex-direction: column;
   height: 100%;
   -moz-user-focus: normal;
   color: var(--console-output-color);
   --console-output-indent-width: 1rem;
   --console-output-indent-border-color: var(--theme-selection-background);
-  --icon-top-margin: 4px;
+  --icon-top-margin: 3px;
   --object-inspector-hover-background: transparent;
   --attachment-margin-block-end: 3px;
 }
 
 /* Webconsole specific theme variables */
 .theme-light .webconsole-output-wrapper,
 .theme-firebug .webconsole-output-wrapper {
   --error-color: var(--red-70);
@@ -831,40 +831,42 @@ a.learn-more-link.webconsole-learn-more-
 
 .webconsole-filterbar-filtered-messages .reset-filters-button {
   margin-inline-start: 0.5em;
 }
 
 .webconsole-output {
   flex: 1;
   overflow: auto;
-  font-size: 13px;
 }
 
 .webconsole-output-wrapper .message {
   --border-size: 3px;
   border-inline-start: var(--border-size) solid transparent;
 }
 
 .webconsole-output-wrapper .message:hover {
   border-inline-start-color: var(--theme-highlight-blue);
 }
 
 .webconsole-output-wrapper .message.warn.warn {
   background-color: var(--warning-background-color);
+}
+
+.webconsole-output-wrapper .message.error .message-body {
+  color: var(--error-color);
+}
+
+.webconsole-output-wrapper .message.warn .message-body {
   color: var(--warning-color);
 }
 
-/* Special casing error and warning String reps so they are legible */
-.webconsole-output-wrapper .message.error .message-body,
-.webconsole-output-wrapper .message.error .message-body > .objectBox-string {
-  color: var(--error-color);
-}
-.webconsole-output-wrapper .message.warn .message-body > .objectBox-string {
-  color: var(--warning-color);
+/* Special casing String reps so they are legible */
+.webconsole-output-wrapper .message .message-body > .objectBox-string {
+  color: currentColor;
 }
 
 /* Special casing dark-theme error and warning ObjectInspector colors */
 .theme-dark .webconsole-output-wrapper .message.error .tree.object-inspector .object-label,
 .theme-dark .webconsole-output-wrapper .message.error .tree.object-inspector .object-label *,
 .theme-dark .webconsole-output-wrapper .message.warn .tree.object-inspector .object-label,
 .theme-dark .webconsole-output-wrapper .message.warn .tree.object-inspector .object-label *,
 .theme-dark .webconsole-output-wrapper .message.error .objectLeftBrace,
@@ -909,17 +911,17 @@ a.learn-more-link.webconsole-learn-more-
 }
 
 .webconsole-output-wrapper .message.warn > .icon::before {
   /* Yellow warning icon */
   background-position: -24px -24px;
 }
 
 .webconsole-output-wrapper .message .theme-twisty {
-  margin: var(--icon-top-margin) 0 0 0;
+  margin: calc(var(--icon-top-margin) - 1px) 0 0 0;
 }
 
 .message.error > .icon::before {
   background-position: -12px -36px;
 }
 
 .message.warn > .icon::before {
   background-position: -24px -36px;
--- a/devtools/client/webconsole/new-console-output/actions/messages.js
+++ b/devtools/client/webconsole/new-console-output/actions/messages.js
@@ -9,27 +9,53 @@
 const {
   prepareMessage
 } = require("devtools/client/webconsole/new-console-output/utils/messages");
 const { IdGenerator } = require("devtools/client/webconsole/new-console-output/utils/id-generator");
 const { batchActions } = require("devtools/client/shared/redux/middleware/debounce");
 
 const {
   MESSAGE_ADD,
+  MESSAGES_ADD,
   NETWORK_MESSAGE_UPDATE,
   NETWORK_UPDATE_REQUEST,
   MESSAGES_CLEAR,
   MESSAGE_OPEN,
   MESSAGE_CLOSE,
   MESSAGE_TYPE,
   MESSAGE_TABLE_RECEIVE,
 } = require("../constants");
 
 const defaultIdGenerator = new IdGenerator();
 
+function messagesAdd(packets, idGenerator = null) {
+  if (idGenerator == null) {
+    idGenerator = defaultIdGenerator;
+  }
+  let messages = packets.map(packet => prepareMessage(packet, idGenerator));
+  for (let i = messages.length - 1; i >= 0; i--) {
+    if (messages[i].type === MESSAGE_TYPE.CLEAR) {
+      return batchActions([
+        messagesClear(),
+        {
+          type: MESSAGES_ADD,
+          messages: messages.slice(i),
+        }
+      ]);
+    }
+  }
+
+  // When this is used for non-cached messages then handle clear message and
+  // split up into batches
+  return {
+    type: MESSAGES_ADD,
+    messages
+  };
+}
+
 function messageAdd(packet, idGenerator = null) {
   if (idGenerator == null) {
     idGenerator = defaultIdGenerator;
   }
   let message = prepareMessage(packet, idGenerator);
   const addMessageAction = {
     type: MESSAGE_ADD,
     message
@@ -112,16 +138,17 @@ function networkUpdateRequest(id, data) 
     type: NETWORK_UPDATE_REQUEST,
     id,
     data,
   };
 }
 
 module.exports = {
   messageAdd,
+  messagesAdd,
   messagesClear,
   messageOpen,
   messageClose,
   messageTableDataGet,
   networkMessageUpdate,
   networkUpdateRequest,
   // for test purpose only.
   messageTableDataReceive,
--- a/devtools/client/webconsole/new-console-output/components/message.js
+++ b/devtools/client/webconsole/new-console-output/components/message.js
@@ -73,17 +73,17 @@ const Message = createClass({
     if (this.messageNode) {
       if (this.props.scrollToMessage) {
         this.messageNode.scrollIntoView();
       }
       // Event used in tests. Some message types don't pass it in because existing tests
       // did not emit for them.
       if (this.props.serviceContainer) {
         this.props.serviceContainer.emitNewMessage(
-          this.messageNode, this.props.messageId);
+          this.messageNode, this.props.messageId, this.props.timeStamp);
       }
     }
   },
 
   onLearnMoreClick: function () {
     let {exceptionDocURL} = this.props;
     this.props.serviceContainer.openLink(exceptionDocURL);
   },
--- a/devtools/client/webconsole/new-console-output/constants.js
+++ b/devtools/client/webconsole/new-console-output/constants.js
@@ -3,16 +3,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 "use strict";
 
 const actionTypes = {
   BATCH_ACTIONS: "BATCH_ACTIONS",
   MESSAGE_ADD: "MESSAGE_ADD",
+  MESSAGES_ADD: "MESSAGES_ADD",
   MESSAGES_CLEAR: "MESSAGES_CLEAR",
   MESSAGE_OPEN: "MESSAGE_OPEN",
   MESSAGE_CLOSE: "MESSAGE_CLOSE",
   NETWORK_MESSAGE_UPDATE: "NETWORK_MESSAGE_UPDATE",
   NETWORK_UPDATE_REQUEST: "NETWORK_UPDATE_REQUEST",
   MESSAGE_TABLE_RECEIVE: "MESSAGE_TABLE_RECEIVE",
   REMOVED_ACTORS_CLEAR: "REMOVED_ACTORS_CLEAR",
   TIMESTAMPS_TOGGLE: "TIMESTAMPS_TOGGLE",
--- a/devtools/client/webconsole/new-console-output/new-console-output-wrapper.js
+++ b/devtools/client/webconsole/new-console-output/new-console-output-wrapper.js
@@ -4,39 +4,41 @@
 "use strict";
 
 // React & Redux
 const React = require("devtools/client/shared/vendor/react");
 const ReactDOM = require("devtools/client/shared/vendor/react-dom");
 const { Provider } = require("devtools/client/shared/vendor/react-redux");
 
 const actions = require("devtools/client/webconsole/new-console-output/actions/index");
-const { batchActions } = require("devtools/client/shared/redux/middleware/debounce");
 const { createContextMenu } = require("devtools/client/webconsole/new-console-output/utils/context-menu");
 const { configureStore } = require("devtools/client/webconsole/new-console-output/store");
 
 const EventEmitter = require("devtools/shared/old-event-emitter");
 const ConsoleOutput = React.createFactory(require("devtools/client/webconsole/new-console-output/components/console-output"));
 const FilterBar = React.createFactory(require("devtools/client/webconsole/new-console-output/components/filter-bar"));
 
 let store = null;
-let queuedActions = [];
-let throttledDispatchTimeout = false;
 
 function NewConsoleOutputWrapper(parentNode, jsterm, toolbox, owner, document) {
   EventEmitter.decorate(this);
 
   this.parentNode = parentNode;
   this.jsterm = jsterm;
   this.toolbox = toolbox;
   this.owner = owner;
   this.document = document;
 
   this.init = this.init.bind(this);
 
+  this.queuedMessageAdds = [];
+  this.queuedMessageUpdates = [];
+  this.queuedRequestUpdates = [];
+  this.throttledDispatchTimeout = false;
+
   store = configureStore(this.jsterm.hud);
 }
 NewConsoleOutputWrapper.prototype = {
   init: function () {
     const attachRefToHud = (id, node) => {
       this.jsterm.hud[id] = node;
     };
     // Focus the input line whenever the output area is clicked.
@@ -68,20 +70,21 @@ NewConsoleOutputWrapper.prototype = {
         return;
       }
 
       this.jsterm.focus();
     });
 
     const serviceContainer = {
       attachRefToHud,
-      emitNewMessage: (node, messageId) => {
+      emitNewMessage: (node, messageId, timeStamp) => {
         this.jsterm.hud.emit("new-messages", new Set([{
           node,
           messageId,
+          timeStamp,
         }]));
       },
       hudProxy: this.jsterm.hud.proxy,
       openLink: url => {
         this.jsterm.hud.owner.openLink(url);
       },
       createElement: nodename => {
         return this.document.createElementNS("http://www.w3.org/1999/xhtml", nodename);
@@ -175,90 +178,120 @@ NewConsoleOutputWrapper.prototype = {
         {className: "webconsole-output-wrapper"},
         filterBar,
         childComponent
     ));
     this.body = ReactDOM.render(provider, this.parentNode);
 
     this.jsterm.focus();
   },
+
   dispatchMessageAdd: function (message, waitForResponse) {
-    let action = actions.messageAdd(message);
-    batchedMessageAdd(action);
     // Wait for the message to render to resolve with the DOM node.
     // This is just for backwards compatibility with old tests, and should
     // be removed once it's not needed anymore.
     // Can only wait for response if the action contains a valid message.
-    if (waitForResponse && action.message) {
-      let messageId = action.message.id;
-      return new Promise(resolve => {
+    let promise;
+    if (waitForResponse) {
+      promise = new Promise(resolve => {
         let jsterm = this.jsterm;
         jsterm.hud.on("new-messages", function onThisMessage(e, messages) {
           for (let m of messages) {
-            if (m.messageId === messageId) {
+            if (m.timeStamp === message.timestamp) {
               resolve(m.node);
               jsterm.hud.off("new-messages", onThisMessage);
               return;
             }
           }
         });
       });
+    } else {
+      promise = Promise.resolve();
     }
 
-    return Promise.resolve();
+    this.batchedMessagesAdd(message);
+    return promise;
   },
 
   dispatchMessagesAdd: function (messages) {
-    const batchedActions = messages.map(message => actions.messageAdd(message));
-    store.dispatch(batchActions(batchedActions));
+    store.dispatch(actions.messagesAdd(messages));
   },
 
   dispatchMessagesClear: function () {
     store.dispatch(actions.messagesClear());
   },
 
   dispatchTimestampsToggle: function (enabled) {
     store.dispatch(actions.timestampsToggle(enabled));
   },
 
   dispatchMessageUpdate: function (message, res) {
     // network-message-updated will emit when all the update message arrives.
     // Since we can't ensure the order of the network update, we check
     // that networkInfo.updates has all we need.
     const NUMBER_OF_NETWORK_UPDATE = 8;
     if (res.networkInfo.updates.length === NUMBER_OF_NETWORK_UPDATE) {
-      batchedMessageAdd(actions.networkMessageUpdate(message));
-      this.jsterm.hud.emit("network-message-updated", res);
+      this.batchedMessageUpdates({ res, message });
     }
   },
 
   dispatchRequestUpdate: function (id, data) {
-    batchedMessageAdd(actions.networkUpdateRequest(id, data));
+    this.batchedRequestUpdates({ id, data });
+  },
+
+  batchedMessageUpdates: function (info) {
+    this.queuedMessageUpdates.push(info);
+    this.setTimeoutIfNeeded();
+  },
+
+  batchedRequestUpdates: function (message) {
+    this.queuedRequestUpdates.push(message);
+    this.setTimeoutIfNeeded();
+  },
+
+  batchedMessagesAdd: function (message) {
+    this.queuedMessageAdds.push(message);
+    this.setTimeoutIfNeeded();
+  },
+
+  setTimeoutIfNeeded: function () {
+    if (this.throttledDispatchTimeout) {
+      return;
+    }
+
+    this.throttledDispatchTimeout = setTimeout(() => {
+      this.throttledDispatchTimeout = null;
 
-    // Fire an event indicating that all data fetched from
-    // the backend has been received. This is based on
-    // 'FirefoxDataProvider.isQueuePayloadReady', see more
-    // comments in that method.
-    // (netmonitor/src/connector/firefox-data-provider).
-    // This event might be utilized in tests to find the right
-    // time when to finish.
-    this.jsterm.hud.emit("network-request-payload-ready", {id, data});
+      store.dispatch(actions.messagesAdd(this.queuedMessageAdds));
+      this.queuedMessageAdds = [];
+
+      if (this.queuedMessageUpdates.length > 0) {
+        this.queuedMessageUpdates.forEach(({ message, res }) => {
+          actions.networkMessageUpdate(message);
+          this.jsterm.hud.emit("network-message-updated", res);
+        });
+        this.queuedMessageUpdates = [];
+      }
+      if (this.queuedRequestUpdates.length > 0) {
+        this.queuedRequestUpdates.forEach(({ id, data}) => {
+          actions.networkUpdateRequest(id, data);
+          // Fire an event indicating that all data fetched from
+          // the backend has been received. This is based on
+          // 'FirefoxDataProvider.isQueuePayloadReady', see more
+          // comments in that method.
+          // (netmonitor/src/connector/firefox-data-provider).
+          // This event might be utilized in tests to find the right
+          // time when to finish.
+          this.jsterm.hud.emit("network-request-payload-ready", {id, data});
+        });
+        this.queuedRequestUpdates = [];
+      }
+    }, 50);
   },
 
   // Should be used for test purpose only.
   getStore: function () {
     return store;
   }
 };
 
-function batchedMessageAdd(action) {
-  queuedActions.push(action);
-  if (!throttledDispatchTimeout) {
-    throttledDispatchTimeout = setTimeout(() => {
-      store.dispatch(batchActions(queuedActions));
-      queuedActions = [];
-      throttledDispatchTimeout = null;
-    }, 50);
-  }
-}
-
 // Exports from this module
 module.exports = NewConsoleOutputWrapper;
--- a/devtools/client/webconsole/new-console-output/reducers/messages.js
+++ b/devtools/client/webconsole/new-console-output/reducers/messages.js
@@ -49,105 +49,141 @@ const MessageState = Immutable.Record({
   removedActors: [],
   // Map of the form {messageId : numberOfRepeat}
   repeatById: {},
   // Map of the form {messageId : networkInformation}
   // `networkInformation` holds request, response, totalTime, ...
   networkMessagesUpdateById: {},
 });
 
-function messages(state = new MessageState(), action, filtersState, prefsState) {
+function addMessage(state, filtersState, prefsState, newMessage) {
   const {
     messagesById,
     messagesUiById,
-    messagesTableDataById,
-    networkMessagesUpdateById,
     groupsById,
     currentGroup,
     repeatById,
     visibleMessages,
     filteredMessagesCount,
   } = state;
 
+  if (newMessage.type === constants.MESSAGE_TYPE.NULL_MESSAGE) {
+    // When the message has a NULL type, we don't add it.
+    return state;
+  }
+
+  if (newMessage.type === constants.MESSAGE_TYPE.END_GROUP) {
+    // Compute the new current group.
+    return state.set("currentGroup", getNewCurrentGroup(currentGroup, groupsById));
+  }
+
+  if (newMessage.allowRepeating && messagesById.size > 0) {
+    let lastMessage = messagesById.last();
+    if (
+      lastMessage.repeatId === newMessage.repeatId
+      && lastMessage.groupId === currentGroup
+    ) {
+      return state.set(
+        "repeatById",
+        Object.assign({}, repeatById, {
+          [lastMessage.id]: (repeatById[lastMessage.id] || 1) + 1
+        })
+      );
+    }
+  }
+
+  return state.withMutations(function (record) {
+    // Add the new message with a reference to the parent group.
+    let parentGroups = getParentGroups(currentGroup, groupsById);
+    newMessage.groupId = currentGroup;
+    newMessage.indent = parentGroups.length;
+
+    const addedMessage = Object.freeze(newMessage);
+    record.set(
+      "messagesById",
+      messagesById.set(newMessage.id, addedMessage)
+    );
+
+    if (newMessage.type === "trace") {
+      // We want the stacktrace to be open by default.
+      record.set("messagesUiById", messagesUiById.push(newMessage.id));
+    } else if (isGroupType(newMessage.type)) {
+      record.set("currentGroup", newMessage.id);
+      record.set("groupsById", groupsById.set(newMessage.id, parentGroups));
+
+      if (newMessage.type === constants.MESSAGE_TYPE.START_GROUP) {
+        // We want the group to be open by default.
+        record.set("messagesUiById", messagesUiById.push(newMessage.id));
+      }
+    }
+
+    const {
+      visible,
+      cause
+    } = getMessageVisibility(addedMessage, record, filtersState);
+
+    if (visible) {
+      record.set("visibleMessages", [...visibleMessages, newMessage.id]);
+    } else if (DEFAULT_FILTERS.includes(cause)) {
+      record.set("filteredMessagesCount", Object.assign({}, filteredMessagesCount, {
+        global: filteredMessagesCount.global + 1,
+        [cause]: filteredMessagesCount[cause] + 1
+      }));
+    }
+  });
+}
+
+function messages(state = new MessageState(), action, filtersState, prefsState) {
+  const {
+    messagesById,
+    messagesUiById,
+    messagesTableDataById,
+    networkMessagesUpdateById,
+    groupsById,
+    visibleMessages,
+  } = state;
+
   const {logLimit} = prefsState;
 
+  let newState;
   switch (action.type) {
-    case constants.MESSAGE_ADD:
-      let newMessage = action.message;
-
-      if (newMessage.type === constants.MESSAGE_TYPE.NULL_MESSAGE) {
-        // When the message has a NULL type, we don't add it.
-        return state;
-      }
-
-      if (newMessage.type === constants.MESSAGE_TYPE.END_GROUP) {
-        // Compute the new current group.
-        return state.set("currentGroup", getNewCurrentGroup(currentGroup, groupsById));
-      }
+    case constants.MESSAGES_ADD:
+      newState = state;
 
-      if (newMessage.allowRepeating && messagesById.size > 0) {
-        let lastMessage = messagesById.last();
-        if (
-          lastMessage.repeatId === newMessage.repeatId
-          && lastMessage.groupId === currentGroup
-        ) {
-          return state.set(
-            "repeatById",
-            Object.assign({}, repeatById, {
-              [lastMessage.id]: (repeatById[lastMessage.id] || 1) + 1
-            })
-          );
+      // Preemptively remove messages that will never be rendered
+      let list = [];
+      let prunableCount = 0;
+      let lastMessageRepeatId = -1;
+      for (let i = action.messages.length - 1; i >= 0; i--) {
+        let message = action.messages[i];
+        if (!message.groupId && !isGroupType(message.type) &&
+            message.type !== MESSAGE_TYPE.END_GROUP) {
+          prunableCount++;
+          // Once we've added the max number of messages that can be added, stop.
+          // Except for repeated messages, where we keep adding over the limit.
+          if (prunableCount <= logLimit || message.repeatId == lastMessageRepeatId) {
+            list.unshift(action.messages[i]);
+          } else {
+            break;
+          }
+        } else {
+          list.unshift(message);
         }
+        lastMessageRepeatId = message.repeatId;
       }
 
-      return state.withMutations(function (record) {
-        // Add the new message with a reference to the parent group.
-        let parentGroups = getParentGroups(currentGroup, groupsById);
-        newMessage.groupId = currentGroup;
-        newMessage.indent = parentGroups.length;
-
-        const addedMessage = Object.freeze(newMessage);
-        record.set(
-          "messagesById",
-          messagesById.set(newMessage.id, addedMessage)
-        );
-
-        if (newMessage.type === "trace") {
-          // We want the stacktrace to be open by default.
-          record.set("messagesUiById", messagesUiById.push(newMessage.id));
-        } else if (isGroupType(newMessage.type)) {
-          record.set("currentGroup", newMessage.id);
-          record.set("groupsById", groupsById.set(newMessage.id, parentGroups));
+      list.forEach(message => {
+        newState = addMessage(newState, filtersState, prefsState, message);
+      });
 
-          if (newMessage.type === constants.MESSAGE_TYPE.START_GROUP) {
-            // We want the group to be open by default.
-            record.set("messagesUiById", messagesUiById.push(newMessage.id));
-          }
-        }
-
-        const {
-          visible,
-          cause
-        } = getMessageVisibility(addedMessage, record, filtersState);
+      return limitTopLevelMessageCount(newState, logLimit);
 
-        if (visible) {
-          record.set("visibleMessages", [...visibleMessages, newMessage.id]);
-        } else if (DEFAULT_FILTERS.includes(cause)) {
-          record.set("filteredMessagesCount", Object.assign({}, filteredMessagesCount, {
-            global: filteredMessagesCount.global + 1,
-            [cause]: filteredMessagesCount[cause] + 1
-          }));
-        }
-
-        // Remove top level message if the total count of top level messages
-        // exceeds the current limit.
-        if (record.messagesById.size > logLimit) {
-          limitTopLevelMessageCount(state, record, logLimit);
-        }
-      });
+    case constants.MESSAGE_ADD:
+      newState = addMessage(state, filtersState, prefsState, action.message);
+      return limitTopLevelMessageCount(newState, logLimit);
 
     case constants.MESSAGES_CLEAR:
       return new MessageState({
         // Store all actors from removed messages. This array is used by
         // `releaseActorsEnhancer` to release all of those backend actors.
         "removedActors": [...state.messagesById].reduce((res, [id, msg]) => {
           res.push(...getAllActorsInMessage(msg, state));
           return res;
@@ -261,17 +297,17 @@ function messages(state = new MessageSta
                 headers: [],
                 headersSize: 0,
               };
               break;
           }
         }
       }
 
-      let newState = state.set(
+      newState = state.set(
         "networkMessagesUpdateById",
         Object.assign({}, networkMessagesUpdateById, {
           [action.id]: Object.assign({}, request, values)
         })
       );
 
       return newState;
     }
@@ -336,109 +372,109 @@ function getParentGroups(currentGroup, g
   return groups;
 }
 
 /**
  * Remove all top level messages that exceeds message limit.
  * Also populate an array of all backend actors associated with these
  * messages so they can be released.
  */
-function limitTopLevelMessageCount(state, record, logLimit) {
-  let topLevelCount = record.groupsById.size === 0
-    ? record.messagesById.size
-    : getToplevelMessageCount(record);
-
-  if (topLevelCount <= logLimit) {
-    return record;
-  }
-
-  const removedMessagesId = [];
-  const removedActors = [];
-  let visibleMessages = [...record.visibleMessages];
-
-  let cleaningGroup = false;
-  record.messagesById.forEach((message, id) => {
-    // If we were cleaning a group and the current message does not have
-    // a groupId, we're done cleaning.
-    if (cleaningGroup === true && !message.groupId) {
-      cleaningGroup = false;
-    }
+function limitTopLevelMessageCount(state, logLimit) {
+  return state.withMutations(function (record) {
+    let topLevelCount = record.groupsById.size === 0
+      ? record.messagesById.size
+      : getToplevelMessageCount(record);
 
-    // If we're not cleaning a group and the message count is below the logLimit,
-    // we exit the forEach iteration.
-    if (cleaningGroup === false && topLevelCount <= logLimit) {
-      return false;
-    }
-
-    // If we're not currently cleaning a group, and the current message is identified
-    // as a group, set the cleaning flag to true.
-    if (cleaningGroup === false && record.groupsById.has(id)) {
-      cleaningGroup = true;
-    }
-
-    if (!message.groupId) {
-      topLevelCount--;
-    }
-
-    removedMessagesId.push(id);
-    removedActors.push(...getAllActorsInMessage(message, record));
-
-    const index = visibleMessages.indexOf(id);
-    if (index > -1) {
-      visibleMessages.splice(index, 1);
+    if (topLevelCount <= logLimit) {
+      return;
     }
 
-    return true;
-  });
+    const removedMessagesId = [];
+    const removedActors = [];
+    let visibleMessages = [...record.visibleMessages];
 
-  if (removedActors.length > 0) {
-    record.set("removedActors", record.removedActors.concat(removedActors));
-  }
-
-  if (record.visibleMessages.length > visibleMessages.length) {
-    record.set("visibleMessages", visibleMessages);
-  }
+    let cleaningGroup = false;
+    record.messagesById.forEach((message, id) => {
+      // If we were cleaning a group and the current message does not have
+      // a groupId, we're done cleaning.
+      if (cleaningGroup === true && !message.groupId) {
+        cleaningGroup = false;
+      }
 
-  const isInRemovedId = id => removedMessagesId.includes(id);
-  const mapHasRemovedIdKey = map => map.findKey((value, id) => isInRemovedId(id));
-  const objectHasRemovedIdKey = obj => Object.keys(obj).findIndex(isInRemovedId) !== -1;
-  const cleanUpCollection = map => removedMessagesId.forEach(id => map.remove(id));
-  const cleanUpList = list => list.filter(id => {
-    return isInRemovedId(id) === false;
-  });
-  const cleanUpObject = object => [...Object.entries(object)]
-    .reduce((res, [id, value]) => {
-      if (!isInRemovedId(id)) {
-        res[id] = value;
+      // If we're not cleaning a group and the message count is below the logLimit,
+      // we exit the forEach iteration.
+      if (cleaningGroup === false && topLevelCount <= logLimit) {
+        return false;
+      }
+
+      // If we're not currently cleaning a group, and the current message is identified
+      // as a group, set the cleaning flag to true.
+      if (cleaningGroup === false && record.groupsById.has(id)) {
+        cleaningGroup = true;
+      }
+
+      if (!message.groupId) {
+        topLevelCount--;
+      }
+
+      removedMessagesId.push(id);
+      removedActors.push(...getAllActorsInMessage(message, record));
+
+      const index = visibleMessages.indexOf(id);
+      if (index > -1) {
+        visibleMessages.splice(index, 1);
       }
-      return res;
-    }, {});
+
+      return true;
+    });
 
-  record.set("messagesById", record.messagesById.withMutations(cleanUpCollection));
+    if (removedActors.length > 0) {
+      record.set("removedActors", record.removedActors.concat(removedActors));
+    }
+
+    if (record.visibleMessages.length > visibleMessages.length) {
+      record.set("visibleMessages", visibleMessages);
+    }
 
-  if (record.messagesUiById.find(isInRemovedId)) {
-    record.set("messagesUiById", cleanUpList(record.messagesUiById));
-  }
-  if (mapHasRemovedIdKey(record.messagesTableDataById)) {
-    record.set("messagesTableDataById",
-      record.messagesTableDataById.withMutations(cleanUpCollection));
-  }
-  if (mapHasRemovedIdKey(record.groupsById)) {
-    record.set("groupsById", record.groupsById.withMutations(cleanUpCollection));
-  }
-  if (objectHasRemovedIdKey(record.repeatById)) {
-    record.set("repeatById", cleanUpObject(record.repeatById));
-  }
+    const isInRemovedId = id => removedMessagesId.includes(id);
+    const mapHasRemovedIdKey = map => map.findKey((value, id) => isInRemovedId(id));
+    const objectHasRemovedIdKey = obj => Object.keys(obj).findIndex(isInRemovedId) !== -1;
+    const cleanUpCollection = map => removedMessagesId.forEach(id => map.remove(id));
+    const cleanUpList = list => list.filter(id => {
+      return isInRemovedId(id) === false;
+    });
+    const cleanUpObject = object => [...Object.entries(object)]
+      .reduce((res, [id, value]) => {
+        if (!isInRemovedId(id)) {
+          res[id] = value;
+        }
+        return res;
+      }, {});
+
+    record.set("messagesById", record.messagesById.withMutations(cleanUpCollection));
 
-  if (objectHasRemovedIdKey(record.networkMessagesUpdateById)) {
-    record.set("networkMessagesUpdateById",
-      cleanUpObject(record.networkMessagesUpdateById));
-  }
+    if (record.messagesUiById.find(isInRemovedId)) {
+      record.set("messagesUiById", cleanUpList(record.messagesUiById));
+    }
+    if (mapHasRemovedIdKey(record.messagesTableDataById)) {
+      record.set("messagesTableDataById",
+        record.messagesTableDataById.withMutations(cleanUpCollection));
+    }
+    if (mapHasRemovedIdKey(record.groupsById)) {
+      record.set("groupsById", record.groupsById.withMutations(cleanUpCollection));
+    }
+    if (objectHasRemovedIdKey(record.repeatById)) {
+      record.set("repeatById", cleanUpObject(record.repeatById));
+    }
 
-  return record;
+    if (objectHasRemovedIdKey(record.networkMessagesUpdateById)) {
+      record.set("networkMessagesUpdateById",
+        cleanUpObject(record.networkMessagesUpdateById));
+    }
+  });
 }
 
 /**
  * Get an array of all the actors logged in a specific message.
  *
  * @param {Message} message: The message to get actors from.
  * @param {Record} state: The redux state.
  * @return {Array} An array containing all the actors logged in a message.
--- a/devtools/client/webconsole/new-console-output/test/store/messages.test.js
+++ b/devtools/client/webconsole/new-console-output/test/store/messages.test.js
@@ -752,9 +752,40 @@ describe("Message reducer:", () => {
       expect(table.get(id2)).toBe(tableData2);
 
       // This addition will remove the second table message.
       dispatch(actions.messageAdd(stubPackets.get("console.log('foobar', 'test')")));
 
       expect(getAllMessagesTableDataById(getState()).size).toBe(0);
     });
   });
+
+  describe("messagesAdd", () => {
+    it("still log repeated message over logLimit, but only repeated ones", () => {
+      // Log two distinct messages
+      const key1 = "console.log('foobar', 'test')";
+      const key2 = "console.log(undefined)";
+      const { dispatch, getState } = setupStore([key1, key2], null, {
+        logLimit: 2
+      });
+
+      // Then repeat the last one two times and log the first one again
+      const packet1 = clonePacket(stubPackets.get(key2));
+      const packet2 = clonePacket(stubPackets.get(key2));
+      const packet3 = clonePacket(stubPackets.get(key1));
+
+      // Repeat ID must be the same even if the timestamp is different.
+      packet1.message.timeStamp = 1;
+      packet2.message.timeStamp = 2;
+      packet3.message.timeStamp = 3;
+      dispatch(actions.messagesAdd([packet1, packet2, packet3]));
+
+      // There is still only two messages being logged,
+      const messages = getAllMessagesById(getState());
+      expect(messages.size).toBe(2);
+
+      // the second one being repeated 3 times
+      const repeat = getAllRepeatById(getState());
+      expect(repeat[messages.first().id]).toBe(3);
+      expect(repeat[messages.last().id]).toBe(undefined);
+    });
+  });
 });
--- a/dom/base/nsDOMClassInfo.cpp
+++ b/dom/base/nsDOMClassInfo.cpp
@@ -235,24 +235,16 @@ SetParentToWindow(nsGlobalWindow *win, J
   if (MOZ_UNLIKELY(!*parent)) {
     // The inner window has been torn down. The scope is dying, so don't create
     // any new wrappers.
     return NS_ERROR_FAILURE;
   }
   return NS_OK;
 }
 
-// static
-
-nsISupports *
-nsDOMClassInfo::GetNative(nsIXPConnectWrappedNative *wrapper, JSObject *obj)
-{
-  return wrapper ? wrapper->Native() : static_cast<nsISupports*>(js::GetObjectPrivate(obj));
-}
-
 nsresult
 nsDOMClassInfo::DefineStaticJSVals()
 {
   AutoJSAPI jsapi;
   if (!jsapi.Init(xpc::UnprivilegedJunkScope())) {
     return NS_ERROR_UNEXPECTED;
   }
   JSContext* cx = jsapi.cx();
--- a/dom/base/nsDOMClassInfo.h
+++ b/dom/base/nsDOMClassInfo.h
@@ -87,23 +87,16 @@ public:
    * helper with a wrapper, even though we should be treating the lookup as a
    * transparent one.
    *
    * Note: So ObjectIsNativeWrapper(cx, obj) check usually means "through xray
    * wrapper this part is not visible".
    */
   static bool ObjectIsNativeWrapper(JSContext* cx, JSObject* obj);
 
-  static nsISupports *GetNative(nsIXPConnectWrappedNative *wrapper, JSObject *obj);
-
-  static nsIXPConnect *XPConnect()
-  {
-    return sXPConnect;
-  }
-
 protected:
   friend nsIClassInfo* NS_GetDOMClassInfoInstance(nsDOMClassInfoID aID);
 
   const nsDOMClassInfoData* mData;
 
   virtual void PreserveWrapper(nsISupports *aNative) override
   {
   }
@@ -117,37 +110,16 @@ protected:
 
   static bool sIsInitialized;
 
 public:
   static jsid sConstructor_id;
   static jsid sWrappedJSObject_id;
 };
 
-// THIS ONE ISN'T SAFE!! It assumes that the private of the JSObject is
-// an nsISupports.
-inline
-const nsQueryInterface
-do_QueryWrappedNative(nsIXPConnectWrappedNative *wrapper, JSObject *obj)
-{
-  return nsQueryInterface(nsDOMClassInfo::GetNative(wrapper, obj));
-}
-
-// THIS ONE ISN'T SAFE!! It assumes that the private of the JSObject is
-// an nsISupports.
-inline
-const nsQueryInterfaceWithError
-do_QueryWrappedNative(nsIXPConnectWrappedNative *wrapper, JSObject *obj,
-                      nsresult *aError)
-
-{
-  return nsQueryInterfaceWithError(nsDOMClassInfo::GetNative(wrapper, obj),
-                                   aError);
-}
-
 typedef nsDOMClassInfo nsDOMGenericSH;
 
 // Makes sure that the wrapper is preserved if new properties are added.
 class nsEventTargetSH : public nsDOMGenericSH
 {
 protected:
   explicit nsEventTargetSH(nsDOMClassInfoData* aData) : nsDOMGenericSH(aData)
   {
--- a/gfx/layers/AnimationInfo.cpp
+++ b/gfx/layers/AnimationInfo.cpp
@@ -38,16 +38,21 @@ AnimationInfo::AddAnimation()
   // Here generates a new id when the first animation is added and
   // this id is used to represent the animations in this layer.
   EnsureAnimationsId();
 
   MOZ_ASSERT(!mPendingAnimations, "should have called ClearAnimations first");
 
   Animation* anim = mAnimations.AppendElement();
 
+  if (mManager->AsWebRenderLayerManager()) {
+    mManager->AsWebRenderLayerManager()->
+      KeepCompositorAnimationsIdAlive(mCompositorAnimationsId);
+  }
+
   mMutated = true;
 
   return anim;
 }
 
 Animation*
 AnimationInfo::AddAnimationForNextTransaction()
 {
--- a/gfx/layers/LayersLogging.cpp
+++ b/gfx/layers/LayersLogging.cpp
@@ -98,16 +98,26 @@ AppendToString(std::stringstream& aStrea
   aStream << pfx;
   aStream << nsPrintfCString(
     "(w=%f, h=%f)",
     s.width, s.height).get();
   aStream << sfx;
 }
 
 void
+AppendToString(std::stringstream& aStream, const wr::StickySideConstraint& s,
+               const char* pfx, const char* sfx)
+{
+  aStream << pfx;
+  aStream << nsPrintfCString("(margin=%f max=%f)",
+      s.margin, s.max_offset).get();
+  aStream << sfx;
+}
+
+void
 AppendToString(std::stringstream& aStream, const nsRegion& r,
                const char* pfx, const char* sfx)
 {
   aStream << pfx;
 
   aStream << "< ";
   for (auto iter = r.RectIter(); !iter.Done(); iter.Next()) {
     AppendToString(aStream, iter.Get());
--- a/gfx/layers/LayersLogging.h
+++ b/gfx/layers/LayersLogging.h
@@ -117,16 +117,20 @@ void
 AppendToString(std::stringstream& aStream, const wr::LayoutRect& r,
                const char* pfx="", const char* sfx="");
 
 void
 AppendToString(std::stringstream& aStream, const wr::LayoutSize& s,
                const char* pfx="", const char* sfx="");
 
 void
+AppendToString(std::stringstream& aStream, const wr::StickySideConstraint& s,
+               const char* pfx="", const char* sfx="");
+
+void
 AppendToString(std::stringstream& aStream, const nsRegion& r,
                const char* pfx="", const char* sfx="");
 
 void
 AppendToString(std::stringstream& aStream, const nsIntRegion& r,
                const char* pfx="", const char* sfx="");
 
 template <typename units>
--- a/gfx/layers/wr/StackingContextHelper.cpp
+++ b/gfx/layers/wr/StackingContextHelper.cpp
@@ -99,16 +99,22 @@ StackingContextHelper::StackingContextHe
 
 StackingContextHelper::~StackingContextHelper()
 {
   if (mBuilder) {
     mBuilder->PopStackingContext();
   }
 }
 
+void
+StackingContextHelper::AdjustOrigin(const LayerPoint& aDelta)
+{
+  mOrigin += aDelta;
+}
+
 wr::LayoutRect
 StackingContextHelper::ToRelativeLayoutRect(const LayerRect& aRect) const
 {
   return wr::ToLayoutRect(RoundedToInt(aRect - mOrigin));
 }
 
 wr::LayoutRect
 StackingContextHelper::ToRelativeLayoutRect(const LayoutDeviceRect& aRect) const
--- a/gfx/layers/wr/StackingContextHelper.h
+++ b/gfx/layers/wr/StackingContextHelper.h
@@ -64,16 +64,18 @@ public:
   // of the tree, so that we have a StackingContextHelper to pass down into
   // the RenderLayer traversal, but don't actually want it to push a stacking
   // context on the display list builder.
   StackingContextHelper();
 
   // Pops the stacking context, if one was pushed during the constructor.
   ~StackingContextHelper();
 
+  void AdjustOrigin(const LayerPoint& aDelta);
+
   // When this StackingContextHelper is in scope, this function can be used
   // to convert a rect from the layer system's coordinate space to a LayoutRect
   // that is relative to the stacking context. This is useful because most
   // things that are pushed inside the stacking context need to be relative
   // to the stacking context.
   // We allow passing in a LayoutDeviceRect for convenience because in a lot of
   // cases with WebRender display item generate the layout device space is the
   // same as the layer space. (TODO: try to make this more explicit somehow).
--- a/gfx/layers/wr/WebRenderLayerManager.cpp
+++ b/gfx/layers/wr/WebRenderLayerManager.cpp
@@ -957,16 +957,22 @@ WebRenderLayerManager::DiscardImages()
 
 void
 WebRenderLayerManager::AddCompositorAnimationsIdForDiscard(uint64_t aId)
 {
   mDiscardedCompositorAnimationsIds.AppendElement(aId);
 }
 
 void
+WebRenderLayerManager::KeepCompositorAnimationsIdAlive(uint64_t aId)
+{
+  mDiscardedCompositorAnimationsIds.RemoveElement(aId);
+}
+
+void
 WebRenderLayerManager::DiscardCompositorAnimations()
 {
   if (WrBridge()->IPCOpen() &&
       !mDiscardedCompositorAnimationsIds.IsEmpty()) {
     WrBridge()->
       SendDeleteCompositorAnimations(mDiscardedCompositorAnimationsIds);
   }
   mDiscardedCompositorAnimationsIds.Clear();
--- a/gfx/layers/wr/WebRenderLayerManager.h
+++ b/gfx/layers/wr/WebRenderLayerManager.h
@@ -169,16 +169,19 @@ public:
   // transaction or destruction
   void AddImageKeyForDiscard(wr::ImageKey);
   void DiscardImages();
   void DiscardLocalImages();
 
   // Before destroying a layer with animations, add its compositorAnimationsId
   // to a list of ids that will be discarded on the next transaction
   void AddCompositorAnimationsIdForDiscard(uint64_t aId);
+  // If the animations are valid and running on the compositor,
+  // we should keep the compositorAnimationsId alive on the compositor side.
+  void KeepCompositorAnimationsIdAlive(uint64_t aId);
   void DiscardCompositorAnimations();
 
   WebRenderBridgeChild* WrBridge() const { return mWrChild; }
 
   virtual void Mutated(Layer* aLayer) override;
   virtual void MutatedSimple(Layer* aLayer) override;
 
   void Hold(Layer* aLayer);
@@ -274,17 +277,19 @@ private:
         MOZ_ASSERT(userDataTable->Count());
 
         userDataTable->Remove(data->GetDisplayItemKey());
 
         if (!userDataTable->Count()) {
           frame->RemoveProperty(nsIFrame::WebRenderUserDataProperty());
         }
         iter.Remove();
+        continue;
       }
+
       data->SetUsed(false);
     }
   }
 
 private:
   nsIWidget* MOZ_NON_OWNING_REF mWidget;
   nsTArray<wr::ImageKey> mImageKeysToDelete;
   // TODO - This is needed because we have some code that creates image keys
--- a/gfx/webrender_bindings/WebRenderAPI.cpp
+++ b/gfx/webrender_bindings/WebRenderAPI.cpp
@@ -721,16 +721,48 @@ DisplayListBuilder::PopClip(bool aRecord
 {
   WRDL_LOG("PopClip id=%" PRIu64 "\n", mWrState, mClipIdStack.back().id);
   if (aRecordInStack) {
     mClipIdStack.pop_back();
   }
   wr_dp_pop_clip(mWrState);
 }
 
+wr::WrStickyId
+DisplayListBuilder::DefineStickyFrame(const wr::LayoutRect& aContentRect,
+                                      const wr::StickySideConstraint* aTop,
+                                      const wr::StickySideConstraint* aRight,
+                                      const wr::StickySideConstraint* aBottom,
+                                      const wr::StickySideConstraint* aLeft)
+{
+  uint64_t id = wr_dp_define_sticky_frame(mWrState, aContentRect, aTop,
+      aRight, aBottom, aLeft);
+  WRDL_LOG("DefineSticky id=%" PRIu64 " c=%s t=%s r=%s b=%s l=%s\n", mWrState, id,
+      Stringify(aContentRect).c_str(),
+      aTop ? Stringify(*aTop).c_str() : "none",
+      aRight ? Stringify(*aRight).c_str() : "none",
+      aBottom ? Stringify(*aBottom).c_str() : "none",
+      aLeft ? Stringify(*aLeft).c_str() : "none");
+  return wr::WrStickyId { id };
+}
+
+void
+DisplayListBuilder::PushStickyFrame(const wr::WrStickyId& aStickyId)
+{
+  wr_dp_push_clip(mWrState, aStickyId.id);
+  WRDL_LOG("PushSticky id=%" PRIu64 "\n", mWrState, aStickyId.id);
+}
+
+void
+DisplayListBuilder::PopStickyFrame()
+{
+  WRDL_LOG("PopSticky\n", mWrState);
+  wr_dp_pop_clip(mWrState);
+}
+
 void
 DisplayListBuilder::PushBuiltDisplayList(BuiltDisplayList &dl)
 {
   WRDL_LOG("PushBuiltDisplayList\n", mWrState);
   wr_dp_push_built_display_list(mWrState,
                                 dl.dl_desc,
                                 &dl.dl.inner);
 }
--- a/gfx/webrender_bindings/WebRenderAPI.h
+++ b/gfx/webrender_bindings/WebRenderAPI.h
@@ -224,16 +224,24 @@ public:
   void PopStackingContext();
 
   wr::WrClipId DefineClip(const wr::LayoutRect& aClipRect,
                           const nsTArray<wr::WrComplexClipRegion>* aComplex = nullptr,
                           const wr::WrImageMask* aMask = nullptr);
   void PushClip(const wr::WrClipId& aClipId, bool aRecordInStack = true);
   void PopClip(bool aRecordInStack = true);
 
+  wr::WrStickyId DefineStickyFrame(const wr::LayoutRect& aContentRect,
+                                   const wr::StickySideConstraint* aTop,
+                                   const wr::StickySideConstraint* aRight,
+                                   const wr::StickySideConstraint* aBottom,
+                                   const wr::StickySideConstraint* aLeft);
+  void PushStickyFrame(const wr::WrStickyId& aStickyId);
+  void PopStickyFrame();
+
   void PushBuiltDisplayList(wr::BuiltDisplayList &dl);
 
   bool IsScrollLayerDefined(layers::FrameMetrics::ViewID aScrollId) const;
   void DefineScrollLayer(const layers::FrameMetrics::ViewID& aScrollId,
                          const wr::LayoutRect& aContentRect, // TODO: We should work with strongly typed rects
                          const wr::LayoutRect& aClipRect);
   void PushScrollLayer(const layers::FrameMetrics::ViewID& aScrollId);
   void PopScrollLayer();
--- a/gfx/webrender_bindings/WebRenderTypes.h
+++ b/gfx/webrender_bindings/WebRenderTypes.h
@@ -731,16 +731,26 @@ static inline wr::WrFilterOp ToWrFilterO
 struct WrClipId {
   uint64_t id;
 
   bool operator==(const WrClipId& other) const {
     return id == other.id;
   }
 };
 
+// Corresponds to a clip id for a position:sticky clip in webrender. Similar
+// to WrClipId but a separate struct so we don't get them mixed up in C++.
+struct WrStickyId {
+  uint64_t id;
+
+  bool operator==(const WrClipId& other) const {
+    return id == other.id;
+  }
+};
+
 typedef Variant<layers::FrameMetrics::ViewID, WrClipId> ScrollOrClipId;
 
 enum class WebRenderError : int8_t {
   INITIALIZE = 0,
   MAKE_CURRENT,
   RENDER,
 
   Sentinel /* this must be last for serialization purposes. */
--- a/gfx/webrender_bindings/src/bindings.rs
+++ b/gfx/webrender_bindings/src/bindings.rs
@@ -1210,16 +1210,41 @@ pub extern "C" fn wr_dp_push_clip(state:
 
 #[no_mangle]
 pub extern "C" fn wr_dp_pop_clip(state: &mut WrState) {
     debug_assert!(unsafe { !is_in_render_thread() });
     state.frame_builder.dl_builder.pop_clip_id();
 }
 
 #[no_mangle]
+pub extern "C" fn wr_dp_define_sticky_frame(state: &mut WrState,
+                                            content_rect: LayoutRect,
+                                            top_range: *const StickySideConstraint,
+                                            right_range: *const StickySideConstraint,
+                                            bottom_range: *const StickySideConstraint,
+                                            left_range: *const StickySideConstraint)
+                                            -> u64 {
+    assert!(unsafe { is_in_main_thread() });
+    let clip_id = state.frame_builder.dl_builder.define_sticky_frame(
+        None, content_rect, StickyFrameInfo::new(
+            unsafe { top_range.as_ref() }.cloned(),
+            unsafe { right_range.as_ref() }.cloned(),
+            unsafe { bottom_range.as_ref() }.cloned(),
+            unsafe { left_range.as_ref() }.cloned()));
+    match clip_id {
+        ClipId::Clip(id, nesting_index, pipeline_id) => {
+            assert!(pipeline_id == state.pipeline_id);
+            assert!(nesting_index == 0);
+            id
+        },
+        _ => panic!("Got unexpected clip id type"),
+    }
+}
+
+#[no_mangle]
 pub extern "C" fn wr_dp_define_scroll_layer(state: &mut WrState,
                                             scroll_id: u64,
                                             content_rect: LayoutRect,
                                             clip_rect: LayoutRect) {
     assert!(unsafe { is_in_main_thread() });
     let clip_id = ClipId::new(scroll_id, state.pipeline_id);
     state.frame_builder.dl_builder.define_scroll_frame(
         Some(clip_id), content_rect, clip_rect, vec![], None,
--- a/gfx/webrender_bindings/webrender_ffi_generated.h
+++ b/gfx/webrender_bindings/webrender_ffi_generated.h
@@ -434,16 +434,26 @@ struct WrImageMask {
 
   bool operator==(const WrImageMask& aOther) const {
     return image == aOther.image &&
            rect == aOther.rect &&
            repeat == aOther.repeat;
   }
 };
 
+struct StickySideConstraint {
+  float margin;
+  float max_offset;
+
+  bool operator==(const StickySideConstraint& aOther) const {
+    return margin == aOther.margin &&
+           max_offset == aOther.max_offset;
+  }
+};
+
 struct BorderWidths {
   float left;
   float top;
   float right;
   float bottom;
 
   bool operator==(const BorderWidths& aOther) const {
     return left == aOther.left &&
@@ -854,16 +864,25 @@ WR_FUNC;
 WR_INLINE
 void wr_dp_define_scroll_layer(WrState *aState,
                                uint64_t aScrollId,
                                LayoutRect aContentRect,
                                LayoutRect aClipRect)
 WR_FUNC;
 
 WR_INLINE
+uint64_t wr_dp_define_sticky_frame(WrState *aState,
+                                   LayoutRect aContentRect,
+                                   const StickySideConstraint *aTopRange,
+                                   const StickySideConstraint *aRightRange,
+                                   const StickySideConstraint *aBottomRange,
+                                   const StickySideConstraint *aLeftRange)
+WR_FUNC;
+
+WR_INLINE
 void wr_dp_end(WrState *aState)
 WR_FUNC;
 
 WR_INLINE
 void wr_dp_pop_clip(WrState *aState)
 WR_FUNC;
 
 WR_INLINE
--- a/intl/l10n/Localization.jsm
+++ b/intl/l10n/Localization.jsm
@@ -52,16 +52,27 @@ class CachedIterable {
       async next() {
         if (seen.length <= cur) {
           seen.push(await iterator.next());
         }
         return seen[cur++];
       }
     };
   }
+
+  /**
+   * This method allows user to consume the next element from the iterator
+   * into the cache.
+   */
+  touchNext() {
+    const { seen, iterator } = this;
+    if (seen.length === 0 || seen[seen.length - 1].done === false) {
+      seen.push(iterator.next());
+    }
+  }
 }
 
 /**
  * Specialized version of an Error used to indicate errors that are result
  * of a problem during the localization process.
  *
  * We use them to identify the class of errors the require a fallback
  * mechanism to be triggered vs errors that should be reported, but
new file mode 100644
--- /dev/null
+++ b/intl/l10n/jar.mn
@@ -0,0 +1,2 @@
+toolkit.jar:
+  content/global/l10n.js
new file mode 100644
--- /dev/null
+++ b/intl/l10n/l10n.js
@@ -0,0 +1,51 @@
+{
+  const { DOMLocalization } =
+    Components.utils.import("resource://gre/modules/DOMLocalization.jsm");
+
+  /**
+   * Polyfill for document.ready polyfill.
+   * See: https://github.com/whatwg/html/issues/127 for details.
+   *
+   * @returns {Promise}
+   */
+  function documentReady() {
+    const rs = document.readyState;
+    if (rs === 'interactive' || rs === 'completed') {
+      return Promise.resolve();
+    }
+
+    return new Promise(
+      resolve => document.addEventListener(
+        'readystatechange', resolve, { once: true }
+      )
+    );
+  }
+
+  /**
+   * Scans the `elem` for links with localization resources.
+   *
+   * @param {Element} elem
+   * @returns {Array<string>}
+   */
+  function getResourceLinks(elem) {
+    return Array.from(elem.querySelectorAll('link[rel="localization"]')).map(
+      el => el.getAttribute('href')
+    );
+  }
+
+  const resourceIds = getResourceLinks(document.head || document);
+
+  document.l10n = new DOMLocalization(window, resourceIds);
+
+  // trigger first context to be fetched eagerly
+  document.l10n.ctxs.touchNext();
+
+  document.l10n.ready = documentReady().then(() => {
+    document.l10n.registerObservers();
+    window.addEventListener('unload', () => {
+      document.l10n.unregisterObservers();
+    });
+    document.l10n.connectRoot(document.documentElement);
+    return document.l10n.translateRoots();
+  });
+}
--- a/intl/l10n/moz.build
+++ b/intl/l10n/moz.build
@@ -10,9 +10,11 @@ EXTRA_JS_MODULES += [
     'Localization.jsm',
     'MessageContext.jsm',
 ]
 
 XPCSHELL_TESTS_MANIFESTS += ['test/xpcshell.ini']
 
 MOCHITEST_CHROME_MANIFESTS += ['test/chrome.ini']
 
+JAR_MANIFESTS += ['jar.mn']
+
 FINAL_LIBRARY = 'xul'
--- a/js/public/Utility.h
+++ b/js/public/Utility.h
@@ -149,35 +149,99 @@ ShouldFailWithOOM()
     return false;
 }
 
 inline bool
 HadSimulatedOOM() {
     return counter >= maxAllocations;
 }
 
+/*
+ * Out of stack space testing support, similar to OOM testing functions.
+ */
+
+extern JS_PUBLIC_DATA(uint32_t) stackTargetThread;
+extern JS_PUBLIC_DATA(uint64_t) maxStackChecks;
+extern JS_PUBLIC_DATA(uint64_t) stackCheckCounter;
+extern JS_PUBLIC_DATA(bool) stackCheckFailAlways;
+
+extern void
+SimulateStackOOMAfter(uint64_t checks, uint32_t thread, bool always);
+
+extern void
+ResetSimulatedStackOOM();
+
+inline bool
+IsThreadSimulatingStackOOM()
+{
+    return js::oom::stackTargetThread && js::oom::stackTargetThread == js::oom::GetThreadType();
+}
+
+inline bool
+IsSimulatedStackOOMCheck()
+{
+    return IsThreadSimulatingStackOOM() &&
+           (stackCheckCounter == maxStackChecks || (stackCheckCounter > maxStackChecks && stackCheckFailAlways));
+}
+
+inline bool
+ShouldFailWithStackOOM()
+{
+    if (!IsThreadSimulatingStackOOM())
+        return false;
+
+    stackCheckCounter++;
+    if (IsSimulatedStackOOMCheck()) {
+        JS_OOM_CALL_BP_FUNC();
+        return true;
+    }
+    return false;
+}
+
+inline bool
+HadSimulatedStackOOM()
+{
+    return stackCheckCounter >= maxStackChecks;
+}
+
 } /* namespace oom */
 } /* namespace js */
 
 #  define JS_OOM_POSSIBLY_FAIL()                                              \
     do {                                                                      \
         if (js::oom::ShouldFailWithOOM())                                     \
             return nullptr;                                                   \
     } while (0)
 
 #  define JS_OOM_POSSIBLY_FAIL_BOOL()                                         \
     do {                                                                      \
         if (js::oom::ShouldFailWithOOM())                                     \
             return false;                                                     \
     } while (0)
 
+#  define JS_STACK_OOM_POSSIBLY_FAIL()                                        \
+    do {                                                                      \
+        if (js::oom::ShouldFailWithStackOOM())                                \
+            return false;                                                     \
+    } while (0)
+
+#  define JS_STACK_OOM_POSSIBLY_FAIL_REPORT()                                 \
+    do {                                                                      \
+        if (js::oom::ShouldFailWithStackOOM()) {                              \
+            ReportOverRecursed(cx);                                           \
+            return false;                                                     \
+        }                                                                     \
+    } while (0)
+
 # else
 
 #  define JS_OOM_POSSIBLY_FAIL() do {} while(0)
 #  define JS_OOM_POSSIBLY_FAIL_BOOL() do {} while(0)
+#  define JS_STACK_OOM_POSSIBLY_FAIL() do {} while(0)
+#  define JS_STACK_OOM_POSSIBLY_FAIL_REPORT() do {} while(0)
 namespace js {
 namespace oom {
 static inline bool IsSimulatedOOMAllocation() { return false; }
 static inline bool ShouldFailWithOOM() { return false; }
 } /* namespace oom */
 } /* namespace js */
 
 # endif /* DEBUG || JS_OOM_BREAKPOINT */
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -1622,17 +1622,17 @@ OOMTest(JSContext* cx, unsigned argc, Va
             // would be to have the caller pass some kind of exception
             // specification and to check the exception against it.
 
             cx->clearPendingException();
             cx->runtime()->hadOutOfMemory = false;
 
             // Some tests create a new compartment or zone on every
             // iteration. Our GC is triggered by GC allocations and not by
-            // number of copmartments or zones, so these won't normally get
+            // number of compartments or zones, so these won't normally get
             // cleaned up. The check here stops some tests running out of
             // memory.
             if (CountCompartments(cx) > compartmentCount + 100) {
                 JS_GC(cx);
                 compartmentCount = CountCompartments(cx);
             }
 
 #ifdef JS_TRACE_LOGGING
@@ -1652,16 +1652,156 @@ OOMTest(JSContext* cx, unsigned argc, Va
             fprintf(stderr, "  finished after %d allocations\n", allocation - 2);
         }
     }
 
     cx->runningOOMTest = false;
     args.rval().setUndefined();
     return true;
 }
+
+static bool
+StackTest(JSContext* cx, unsigned argc, Value* vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+
+    if (args.length() < 1 || args.length() > 2) {
+        JS_ReportErrorASCII(cx, "stackTest() takes between 1 and 2 arguments.");
+        return false;
+    }
+
+    if (!args[0].isObject() || !args[0].toObject().is<JSFunction>()) {
+        JS_ReportErrorASCII(cx, "The first argument to stackTest() must be a function.");
+        return false;
+    }
+
+    if (args.length() == 2 && !args[1].isBoolean()) {
+        JS_ReportErrorASCII(cx, "The optional second argument to stackTest() must be a boolean.");
+        return false;
+    }
+
+    bool expectExceptionOnFailure = true;
+    if (args.length() == 2)
+        expectExceptionOnFailure = args[1].toBoolean();
+
+    // There are some places where we do fail without raising an exception, so
+    // we can't expose this to the fuzzers by default.
+    if (fuzzingSafe)
+        expectExceptionOnFailure = false;
+
+    if (disableOOMFunctions) {
+        args.rval().setUndefined();
+        return true;
+    }
+
+    RootedFunction function(cx, &args[0].toObject().as<JSFunction>());
+
+    bool verbose = EnvVarIsDefined("OOM_VERBOSE");
+
+    unsigned threadStart = THREAD_TYPE_COOPERATING;
+    unsigned threadEnd = THREAD_TYPE_MAX;
+
+    // Test a single thread type if specified by the OOM_THREAD environment variable.
+    int threadOption = 0;
+    if (EnvVarAsInt("OOM_THREAD", &threadOption)) {
+        if (threadOption < THREAD_TYPE_COOPERATING || threadOption > THREAD_TYPE_MAX) {
+            JS_ReportErrorASCII(cx, "OOM_THREAD value out of range.");
+            return false;
+        }
+
+        threadStart = threadOption;
+        threadEnd = threadOption + 1;
+    }
+
+    if (cx->runningOOMTest) {
+        JS_ReportErrorASCII(cx, "Nested call to oomTest() or stackTest() is not allowed.");
+        return false;
+    }
+    cx->runningOOMTest = true;
+
+    MOZ_ASSERT(!cx->isExceptionPending());
+
+    size_t compartmentCount = CountCompartments(cx);
+
+#ifdef JS_GC_ZEAL
+    JS_SetGCZeal(cx, 0, JS_DEFAULT_ZEAL_FREQ);
+#endif
+
+    for (unsigned thread = threadStart; thread < threadEnd; thread++) {
+        if (verbose)
+            fprintf(stderr, "thread %d\n", thread);
+
+        unsigned check = 1;
+        bool handledOOM;
+        do {
+            if (verbose)
+                fprintf(stderr, "  check %d\n", check);
+
+            MOZ_ASSERT(!cx->isExceptionPending());
+
+            js::oom::SimulateStackOOMAfter(check, thread, false);
+
+            RootedValue result(cx);
+            bool ok = JS_CallFunction(cx, cx->global(), function,
+                                      HandleValueArray::empty(), &result);
+
+            handledOOM = js::oom::HadSimulatedStackOOM();
+            js::oom::ResetSimulatedStackOOM();
+
+            MOZ_ASSERT_IF(ok, !cx->isExceptionPending());
+
+            if (ok) {
+                MOZ_ASSERT(!cx->isExceptionPending(),
+                           "Thunk execution succeeded but an exception was raised - "
+                           "missing error check?");
+            } else if (expectExceptionOnFailure) {
+                MOZ_ASSERT(cx->isExceptionPending(),
+                           "Thunk execution failed but no exception was raised - "
+                           "missing call to js::ReportOutOfMemory()?");
+            }
+
+            // Note that it is possible that the function throws an exception
+            // unconnected to OOM, in which case we ignore it. More correct
+            // would be to have the caller pass some kind of exception
+            // specification and to check the exception against it.
+
+            cx->clearPendingException();
+
+            // Some tests create a new compartment or zone on every
+            // iteration. Our GC is triggered by GC allocations and not by
+            // number of compartments or zones, so these won't normally get
+            // cleaned up. The check here stops some tests running out of
+            // memory.
+            if (CountCompartments(cx) > compartmentCount + 100) {
+                JS_GC(cx);
+                compartmentCount = CountCompartments(cx);
+            }
+
+#ifdef JS_TRACE_LOGGING
+            // Reset the TraceLogger state if enabled.
+            TraceLoggerThread* logger = TraceLoggerForCurrentThread(cx);
+            if (logger->enabled()) {
+                while (logger->enabled())
+                    logger->disable();
+                logger->enable(cx);
+            }
+#endif
+
+            check++;
+        } while (handledOOM);
+
+        if (verbose) {
+            fprintf(stderr, "  finished after %d checks\n", check - 2);
+        }
+    }
+
+    cx->runningOOMTest = false;
+    args.rval().setUndefined();
+    return true;
+}
 #endif
 
 static bool
 SettlePromiseNow(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     if (!args.requireAtLeast(cx, "settlePromiseNow", 1))
         return false;
@@ -4616,16 +4756,22 @@ static const JSFunctionSpecWithHelp Test
     JS_FN_HELP("oomTest", OOMTest, 0, 0,
 "oomTest(function, [expectExceptionOnFailure = true])",
 "  Test that the passed function behaves correctly under OOM conditions by\n"
 "  repeatedly executing it and simulating allocation failure at successive\n"
 "  allocations until the function completes without seeing a failure.\n"
 "  By default this tests that an exception is raised if execution fails, but\n"
 "  this can be disabled by passing false as the optional second parameter.\n"
 "  This is also disabled when --fuzzing-safe is specified."),
+
+    JS_FN_HELP("stackTest", StackTest, 0, 0,
+"stackTest(function, [expectExceptionOnFailure = true])",
+"  This function behaves exactly like oomTest with the difference that\n"
+"  instead of simulating regular OOM conditions, it simulates the engine\n"
+"  running out of stack space (failing recursion check)."),
 #endif
 
     JS_FN_HELP("settlePromiseNow", SettlePromiseNow, 1, 0,
 "settlePromiseNow(promise)",
 "  'Settle' a 'promise' immediately. This just marks the promise as resolved\n"
 "  with a value of `undefined` and causes the firing of any onPromiseSettled\n"
 "  hooks set on Debugger instances that are observing the given promise's\n"
 "  global as a debuggee."),
--- a/js/src/jsfriendapi.h
+++ b/js/src/jsfriendapi.h
@@ -1047,33 +1047,41 @@ GetNativeStackLimit(JSContext* cx, int e
  * allows less space than any other check, including a safety buffer (as in, it
  * uses the untrusted limit and subtracts a little more from it).
  */
 
 MOZ_ALWAYS_INLINE bool
 CheckRecursionLimit(JSContext* cx, uintptr_t limit)
 {
     int stackDummy;
+
+    JS_STACK_OOM_POSSIBLY_FAIL_REPORT();
+
     if (!JS_CHECK_STACK_SIZE(limit, &stackDummy)) {
         ReportOverRecursed(cx);
         return false;
     }
     return true;
 }
 
 MOZ_ALWAYS_INLINE bool
 CheckRecursionLimitDontReport(JSContext* cx, uintptr_t limit)
 {
     int stackDummy;
+
+    JS_STACK_OOM_POSSIBLY_FAIL();
+
     return JS_CHECK_STACK_SIZE(limit, &stackDummy);
 }
 
 MOZ_ALWAYS_INLINE bool
 CheckRecursionLimit(JSContext* cx)
 {
+    JS_STACK_OOM_POSSIBLY_FAIL_REPORT();
+
     // GetNativeStackLimit(cx) is pretty slow because it has to do an uninlined
     // call to RunningWithTrustedPrincipals to determine which stack limit to
     // use. To work around this, check the untrusted limit first to avoid the
     // overhead in most cases.
     uintptr_t untrustedLimit = GetNativeStackLimit(cx, JS::StackForUntrustedScript);
     if (MOZ_LIKELY(CheckRecursionLimitDontReport(cx, untrustedLimit)))
         return true;
     return CheckRecursionLimit(cx, GetNativeStackLimit(cx));
@@ -1083,22 +1091,26 @@ MOZ_ALWAYS_INLINE bool
 CheckRecursionLimitDontReport(JSContext* cx)
 {
     return CheckRecursionLimitDontReport(cx, GetNativeStackLimit(cx));
 }
 
 MOZ_ALWAYS_INLINE bool
 CheckRecursionLimitWithStackPointerDontReport(JSContext* cx, void* sp)
 {
+    JS_STACK_OOM_POSSIBLY_FAIL();
+
     return JS_CHECK_STACK_SIZE(GetNativeStackLimit(cx), sp);
 }
 
 MOZ_ALWAYS_INLINE bool
 CheckRecursionLimitWithStackPointer(JSContext* cx, void* sp)
 {
+    JS_STACK_OOM_POSSIBLY_FAIL_REPORT();
+
     if (!JS_CHECK_STACK_SIZE(GetNativeStackLimit(cx), sp)) {
         ReportOverRecursed(cx);
         return false;
     }
     return true;
 }
 
 MOZ_ALWAYS_INLINE bool
--- a/js/src/jsutil.cpp
+++ b/js/src/jsutil.cpp
@@ -40,16 +40,21 @@ mozilla::Atomic<AutoEnterOOMUnsafeRegion
 namespace oom {
 
 JS_PUBLIC_DATA(uint32_t) targetThread = 0;
 MOZ_THREAD_LOCAL(uint32_t) threadType;
 JS_PUBLIC_DATA(uint64_t) maxAllocations = UINT64_MAX;
 JS_PUBLIC_DATA(uint64_t) counter = 0;
 JS_PUBLIC_DATA(bool) failAlways = true;
 
+JS_PUBLIC_DATA(uint32_t) stackTargetThread = 0;
+JS_PUBLIC_DATA(uint64_t) maxStackChecks = UINT64_MAX;
+JS_PUBLIC_DATA(uint64_t) stackCheckCounter = 0;
+JS_PUBLIC_DATA(bool) stackCheckFailAlways = true;
+
 bool
 InitThreadType(void) {
     return threadType.init();
 }
 
 void
 SetThreadType(ThreadType type) {
     threadType.set(type);
@@ -62,43 +67,75 @@ GetThreadType(void) {
 
 static inline bool
 IsHelperThreadType(uint32_t thread)
 {
     return thread != THREAD_TYPE_NONE && thread != THREAD_TYPE_COOPERATING;
 }
 
 void
-SimulateOOMAfter(uint64_t allocations, uint32_t thread, bool always) {
+SimulateOOMAfter(uint64_t allocations, uint32_t thread, bool always)
+{
     Maybe<AutoLockHelperThreadState> lock;
     if (IsHelperThreadType(targetThread) || IsHelperThreadType(thread)) {
         lock.emplace();
         HelperThreadState().waitForAllThreadsLocked(lock.ref());
     }
 
     MOZ_ASSERT(counter + allocations > counter);
     MOZ_ASSERT(thread > js::THREAD_TYPE_NONE && thread < js::THREAD_TYPE_MAX);
     targetThread = thread;
     maxAllocations = counter + allocations;
     failAlways = always;
 }
 
 void
-ResetSimulatedOOM() {
+ResetSimulatedOOM()
+{
     Maybe<AutoLockHelperThreadState> lock;
     if (IsHelperThreadType(targetThread)) {
         lock.emplace();
         HelperThreadState().waitForAllThreadsLocked(lock.ref());
     }
 
     targetThread = THREAD_TYPE_NONE;
     maxAllocations = UINT64_MAX;
     failAlways = false;
 }
 
+void
+SimulateStackOOMAfter(uint64_t checks, uint32_t thread, bool always)
+{
+    Maybe<AutoLockHelperThreadState> lock;
+    if (IsHelperThreadType(stackTargetThread) || IsHelperThreadType(thread)) {
+        lock.emplace();
+        HelperThreadState().waitForAllThreadsLocked(lock.ref());
+    }
+
+    MOZ_ASSERT(stackCheckCounter + checks > stackCheckCounter);
+    MOZ_ASSERT(thread > js::THREAD_TYPE_NONE && thread < js::THREAD_TYPE_MAX);
+    stackTargetThread = thread;
+    maxStackChecks = stackCheckCounter + checks;
+    stackCheckFailAlways = always;
+}
+
+void
+ResetSimulatedStackOOM()
+{
+    Maybe<AutoLockHelperThreadState> lock;
+    if (IsHelperThreadType(stackTargetThread)) {
+        lock.emplace();
+        HelperThreadState().waitForAllThreadsLocked(lock.ref());
+    }
+
+    stackTargetThread = THREAD_TYPE_NONE;
+    maxStackChecks = UINT64_MAX;
+    stackCheckFailAlways = false;
+}
+
 
 } // namespace oom
 } // namespace js
 #endif // defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
 
 JS_PUBLIC_API(void)
 JS_Assert(const char* s, const char* file, int ln)
 {
--- a/js/src/vm/Printer.cpp
+++ b/js/src/vm/Printer.cpp
@@ -13,16 +13,20 @@
 #include <stdarg.h>
 #include <stdio.h>
 
 #include "jscntxt.h"
 #include "jsutil.h"
 
 #include "ds/LifoAlloc.h"
 
+#ifdef XP_WIN32
+#include "jswin.h"
+#endif
+
 using mozilla::PodCopy;
 
 namespace
 {
 
 class GenericPrinterPrintfTarget : public mozilla::PrintfTarget
 {
 public:
@@ -445,16 +449,28 @@ bool
 Fprinter::put(const char* s, size_t len)
 {
     MOZ_ASSERT(file_);
     int i = fwrite(s, /*size=*/ 1, /*nitems=*/ len, file_);
     if (size_t(i) != len) {
         reportOutOfMemory();
         return false;
     }
+#ifdef XP_WIN32
+    if ((file_ == stderr) && (IsDebuggerPresent())) {
+        UniqueChars buf(static_cast<char*>(js_malloc(len + 1)));
+        if (!buf) {
+            reportOutOfMemory();
+            return false;
+        }
+        PodCopy(buf.get(), s, len);
+        buf[len] = '\0';
+        OutputDebugStringA(buf.get());
+    }
+#endif
     return true;
 }
 
 LSprinter::LSprinter(LifoAlloc* lifoAlloc)
   : alloc_(lifoAlloc),
     head_(nullptr),
     tail_(nullptr),
     unused_(0)
--- a/layout/painting/nsDisplayList.cpp
+++ b/layout/painting/nsDisplayList.cpp
@@ -7037,16 +7037,124 @@ nsDisplayStickyPosition::BuildLayer(nsDi
                           aContainerParameters.mXScale,
                         NSAppUnitsToFloatPixels(inner.height, factor) *
                           aContainerParameters.mYScale);
   layer->SetStickyPositionData(scrollId, stickyOuter, stickyInner);
 
   return layer.forget();
 }
 
+bool
+nsDisplayStickyPosition::CreateWebRenderCommands(mozilla::wr::DisplayListBuilder& aBuilder,
+                                                 mozilla::wr::IpcResourceUpdateQueue& aResources,
+                                                 const StackingContextHelper& aSc,
+                                                 WebRenderLayerManager* aManager,
+                                                 nsDisplayListBuilder* aDisplayListBuilder)
+{
+  LayerPoint scTranslation;
+  StickyScrollContainer* stickyScrollContainer = StickyScrollContainer::GetStickyScrollContainerForFrame(mFrame);
+  if (stickyScrollContainer) {
+    float auPerDevPixel = mFrame->PresContext()->AppUnitsPerDevPixel();
+
+    bool snap;
+    nsRect itemBounds = GetBounds(aDisplayListBuilder, &snap);
+
+    // The itemBounds here already take into account the main-thread
+    // position:sticky implementation, so we need to unapply that.
+    nsIFrame* firstCont = nsLayoutUtils::FirstContinuationOrIBSplitSibling(mFrame);
+    nsPoint translation = stickyScrollContainer->ComputePosition(firstCont) - firstCont->GetNormalPosition();
+    itemBounds.MoveBy(-translation);
+    scTranslation = ViewAs<LayerPixel>(
+        LayoutDevicePoint::FromAppUnits(translation, auPerDevPixel),
+        PixelCastJustification::WebRenderHasUnitResolution);
+
+    LayoutDeviceRect bounds = LayoutDeviceRect::FromAppUnits(itemBounds, auPerDevPixel);
+
+    Maybe<wr::StickySideConstraint> top;
+    Maybe<wr::StickySideConstraint> right;
+    Maybe<wr::StickySideConstraint> bottom;
+    Maybe<wr::StickySideConstraint> left;
+
+    nsRect outer;
+    nsRect inner;
+    stickyScrollContainer->GetScrollRanges(mFrame, &outer, &inner);
+
+    nsRect scrollPort = stickyScrollContainer->ScrollFrame()->GetScrollPortRect();
+
+    // The following computations make more sense upon understanding the
+    // semantics of "inner" and "outer", which is explained in the comment on
+    // SetStickyPositionData in Layers.h.
+
+    if (outer.YMost() != inner.YMost()) {
+      // Question: How far will itemBounds.y be from the top of the scrollport
+      // when we have scrolled down from the current scroll position of "0" to a
+      // scroll position of "inner.YMost()" (which is >= 0 since we are
+      // scrolling down)?
+      // Answer: (itemBounds.y - 0) - (inner.YMost() - 0)
+      //      == itemBounds.y - inner.YMost()
+      float margin = NSAppUnitsToFloatPixels(itemBounds.y - inner.YMost(), auPerDevPixel);
+      // The scroll distance during which the item should remain "stuck"
+      float maxOffset = NSAppUnitsToFloatPixels(outer.YMost() - inner.YMost(), auPerDevPixel);
+      top = Some(wr::StickySideConstraint { margin, maxOffset });
+    }
+    if (outer.y != inner.y) {
+      // Question: How far will itemBounds.YMost() be from the bottom of the
+      // scrollport when we have scrolled up from the current scroll position of
+      // "0" to a scroll position of "inner.y" (which is <= 0 since we are
+      // scrolling up)?
+      // Answer: (scrollPort.height - itemBounds.YMost()) - (0 - inner.y)
+      //      == scrollPort.height - itemBounds.YMost() + inner.y
+      float margin = NSAppUnitsToFloatPixels(scrollPort.height - itemBounds.YMost() + inner.y, auPerDevPixel);
+      // The scroll distance during which the item should remain "stuck"
+      float maxOffset = NSAppUnitsToFloatPixels(outer.y - inner.y, auPerDevPixel);
+      bottom = Some(wr::StickySideConstraint { margin, maxOffset });
+    }
+    // Same as above, but for the x-axis
+    if (outer.XMost() != inner.XMost()) {
+      float margin = NSAppUnitsToFloatPixels(itemBounds.x - inner.XMost(), auPerDevPixel);
+      float maxOffset = NSAppUnitsToFloatPixels(outer.XMost() - inner.XMost(), auPerDevPixel);
+      left = Some(wr::StickySideConstraint { margin, maxOffset });
+    }
+    if (outer.x != inner.x) {
+      float margin = NSAppUnitsToFloatPixels(scrollPort.width - itemBounds.XMost() + inner.x, auPerDevPixel);
+      float maxOffset = NSAppUnitsToFloatPixels(outer.x - inner.x, auPerDevPixel);
+      right = Some(wr::StickySideConstraint { margin, maxOffset });
+    }
+
+    wr::WrStickyId id = aBuilder.DefineStickyFrame(aSc.ToRelativeLayoutRect(bounds),
+        top.ptrOr(nullptr), right.ptrOr(nullptr), bottom.ptrOr(nullptr), left.ptrOr(nullptr));
+
+    aBuilder.PushStickyFrame(id);
+  }
+
+  // All the things inside this position:sticky item also have the main-thread
+  // translation already applied, so we need to make sure that gets unapplied.
+  // The easiest way to do it is to just create a new stacking context with an
+  // adjusted origin and use that for the nested items. This way all the
+  // ToRelativeLayoutRect calls on this StackingContextHelper object will
+  // include the necessary adjustment.
+  StackingContextHelper sc(aSc, aBuilder, aDisplayListBuilder, this,
+                           &mList, nullptr, 0, nullptr, nullptr);
+  sc.AdjustOrigin(scTranslation);
+
+  // TODO: if, inside this nested command builder, we try to turn a gecko clip
+  // chain into a WR clip chain, we might end up repushing the clip stack
+  // without `id` which effectively throws out the sticky behaviour. The
+  // repushing can happen because of the need to define a new clip while
+  // particular things are on the stack
+  nsDisplayOwnLayer::CreateWebRenderCommands(aBuilder, aResources, sc,
+      aManager, aDisplayListBuilder);
+
+  if (stickyScrollContainer) {
+    aBuilder.PopStickyFrame();
+  }
+
+  return true;
+}
+
 nsDisplayScrollInfoLayer::nsDisplayScrollInfoLayer(
   nsDisplayListBuilder* aBuilder,
   nsIFrame* aScrolledFrame,
   nsIFrame* aScrollFrame)
   : nsDisplayWrapList(aBuilder, aScrollFrame)
   , mScrollFrame(aScrollFrame)
   , mScrolledFrame(aScrolledFrame)
   , mScrollParentId(aBuilder->GetCurrentScrollParentId())
--- a/layout/painting/nsDisplayList.h
+++ b/layout/painting/nsDisplayList.h
@@ -4561,16 +4561,22 @@ public:
     return mozilla::LAYER_ACTIVE;
   }
 
   virtual bool CanMerge(const nsDisplayItem* aItem) const override
   {
     // Items with the same fixed position frame can be merged.
     return HasSameTypeAndClip(aItem) && mFrame == aItem->Frame();
   }
+
+  virtual bool CreateWebRenderCommands(mozilla::wr::DisplayListBuilder& aBuilder,
+                                       mozilla::wr::IpcResourceUpdateQueue& aResources,
+                                       const StackingContextHelper& aSc,
+                                       mozilla::layers::WebRenderLayerManager* aManager,
+                                       nsDisplayListBuilder* aDisplayListBuilder) override;
 };
 
 class nsDisplayFixedPosition : public nsDisplayOwnLayer {
 public:
   nsDisplayFixedPosition(nsDisplayListBuilder* aBuilder, nsIFrame* aFrame,
                          nsDisplayList* aList,
                          const ActiveScrolledRoot* aActiveScrolledRoot);
 
--- a/layout/reftests/async-scrolling/reftest.list
+++ b/layout/reftests/async-scrolling/reftest.list
@@ -27,17 +27,17 @@ skip-if(!asyncPan) == position-fixed-cov
 skip-if(!asyncPan) == position-fixed-cover-2.html position-fixed-cover-2-ref.html
 skip-if(!asyncPan) == position-fixed-cover-3.html position-fixed-cover-3-ref.html
 fuzzy-if(Android,5,4) skip-if(!asyncPan) == position-fixed-transformed-1.html position-fixed-transformed-1-ref.html
 skip-if(!asyncPan) == split-layers-1.html split-layers-1-ref.html
 skip-if(!asyncPan) == split-layers-multi-scrolling-1.html split-layers-multi-scrolling-1-ref.html
 fuzzy-if(skiaContent,2,240000) fuzzy-if(browserIsRemote&&!skiaContent&&(cocoaWidget||winWidget),1,240000) skip-if(!asyncPan) == split-opacity-layers-1.html split-opacity-layers-1-ref.html
 skip-if(!asyncPan) == sticky-pos-scrollable-1.html sticky-pos-scrollable-1-ref.html
 fails-if(webrender) skip-if(!asyncPan) == sticky-pos-scrollable-2.html sticky-pos-scrollable-2-ref.html # bug 1366295 for webrender
-fails-if(webrender) skip-if(!asyncPan) == sticky-pos-scrollable-3.html sticky-pos-scrollable-3-ref.html # bug 1366295 for webrender
+skip-if(!asyncPan) == sticky-pos-scrollable-3.html sticky-pos-scrollable-3-ref.html
 skip-if(!asyncPan) == fixed-pos-scrollable-1.html fixed-pos-scrollable-1-ref.html
 skip-if(!asyncPan) == culling-1.html culling-1-ref.html
 skip-if(!asyncPan) == position-fixed-iframe-1.html position-fixed-iframe-1-ref.html
 skip-if(!asyncPan) == position-fixed-iframe-2.html position-fixed-iframe-2-ref.html
 fuzzy-if(skiaContent,1,11300) skip-if(!asyncPan) == position-fixed-in-scroll-container.html position-fixed-in-scroll-container-ref.html
 skip-if(!asyncPan) == position-fixed-inside-sticky-1.html position-fixed-inside-sticky-1-ref.html
 skip-if(!asyncPan) == position-fixed-inside-sticky-2.html position-fixed-inside-sticky-2-ref.html
 fuzzy(1,60000) skip-if(!asyncPan) == group-opacity-surface-size-1.html group-opacity-surface-size-1-ref.html
@@ -54,17 +54,17 @@ fuzzy-if(Android,7,4) skip-if(!asyncPan)
 pref(apz.disable_for_scroll_linked_effects,true) skip-if(!asyncPan) == disable-apz-for-sle-pages.html disable-apz-for-sle-pages-ref.html
 fuzzy-if(browserIsRemote&&d2d,1,19) skip-if(!asyncPan) == background-blend-mode-1.html background-blend-mode-1-ref.html
 random-if(webrender) skip-if(Android||!asyncPan) != opaque-fractional-displayport-1.html about:blank    # test is specific to "layers" and not valid with webrender
 random-if(webrender) skip-if(Android||!asyncPan) != opaque-fractional-displayport-2.html about:blank    # test is specific to "layers" and not valid with webrender
 fuzzy-if(Android,6,4) fails-if(webrender) skip-if(!asyncPan) == fixed-pos-scrolled-clip-1.html fixed-pos-scrolled-clip-1-ref.html   # bug 1373802 for webrender
 fuzzy-if(Android,6,8) fails-if(webrender) skip-if(!asyncPan) == fixed-pos-scrolled-clip-2.html fixed-pos-scrolled-clip-2-ref.html   # bug 1373802 for webrender
 fuzzy-if(Android,6,8) fails-if(webrender) skip-if(!asyncPan) == fixed-pos-scrolled-clip-3.html fixed-pos-scrolled-clip-3-ref.html   # bug 1373802 for webrender
 fuzzy-if(Android,6,8) fails-if(webrender) skip-if(!asyncPan) == fixed-pos-scrolled-clip-4.html fixed-pos-scrolled-clip-4-ref.html   # bug 1373802 for webrender
-fuzzy-if(Android,6,4) fails-if(webrender) skip-if(!asyncPan) == position-sticky-scrolled-clip-1.html position-sticky-scrolled-clip-1-ref.html # bug 1366295 for webrender
+fuzzy-if(Android,6,4) skip-if(!asyncPan) == position-sticky-scrolled-clip-1.html position-sticky-scrolled-clip-1-ref.html
 fuzzy-if(Android,6,4) skip == position-sticky-scrolled-clip-2.html position-sticky-scrolled-clip-2-ref.html # bug ?????? - incorrectly applying clip to sticky contents
 
 # for the following tests, we want to disable the low-precision buffer
 # as it will expand the displayport beyond what the test specifies in
 # its reftest-displayport attributes, and interfere with where we expect
 # checkerboarding to occur
 default-preferences pref(layers.low-precision-buffer,false)
 skip-if(!asyncPan) == checkerboard-1.html checkerboard-1-ref.html
--- a/layout/tools/reftest/reftest.jsm
+++ b/layout/tools/reftest/reftest.jsm
@@ -1940,17 +1940,18 @@ function RecordResult(testRunTime, error
                     failures.push("failed reftest-opaque-layer: " + gFailedOpaqueLayerMessages.join(", "));
                 }
                 if (gFailedAssignedLayer) {
                     failures.push("failed reftest-assigned-layer: " + gFailedAssignedLayerMessages.join(", "));
                 }
                 var failureString = failures.join(", ");
                 logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], failureString, null, extra);
             } else {
-                var message = "image comparison";
+                var message = "image comparison, max difference: " + maxDifference.value +
+                              ", number of differing pixels: " + differences;
                 if (!test_passed && expected == EXPECTED_PASS ||
                     !test_passed && expected == EXPECTED_FUZZY ||
                     test_passed && expected == EXPECTED_FAIL) {
                     if (!equal) {
                         extra.max_difference = maxDifference.value;
                         extra.differences = differences;
                         var image1 = gCanvas1.toDataURL();
                         var image2 = gCanvas2.toDataURL();
@@ -1958,18 +1959,16 @@ function RecordResult(testRunTime, error
                             {url:gURLs[0].identifier[0],
                              screenshot: image1.slice(image1.indexOf(",") + 1)},
                             gURLs[0].identifier[1],
                             {url:gURLs[0].identifier[2],
                              screenshot: image2.slice(image2.indexOf(",") + 1)}
                         ];
                         extra.image1 = image1;
                         extra.image2 = image2;
-                        message += (", max difference: " + extra.max_difference +
-                                    ", number of differing pixels: " + differences);
                     } else {
                         var image1 = gCanvas1.toDataURL();
                         extra.reftest_screenshots = [
                             {url:gURLs[0].identifier[0],
                              screenshot: image1.slice(image1.indexOf(",") + 1)}
                         ];
                         extra.image1 = image1;
                     }
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -457,17 +457,17 @@ struct arena_stats_t {
 /******************************************************************************/
 /*
  * Extent data structures.
  */
 
 enum ChunkType {
   UNKNOWN_CHUNK,
   ZEROED_CHUNK,   // chunk only contains zeroes
-  ARENA_CHUNK,    // used to back arena runs created by arena_run_alloc
+  ARENA_CHUNK,    // used to back arena runs created by arena_t::AllocRun
   HUGE_CHUNK,     // used to back huge allocations (e.g. huge_malloc)
   RECYCLED_CHUNK, // chunk has been stored for future use by chunk_recycle
 };
 
 /* Tree of extents. */
 struct extent_node_t {
 	/* Linkage for the size/address-ordered tree. */
 	rb_node(extent_node_t) link_szad;
@@ -516,17 +516,17 @@ struct malloc_rtree_t {
 struct arena_t;
 struct arena_bin_t;
 
 /* Each element of the chunk map corresponds to one page within the chunk. */
 struct arena_chunk_map_t {
 	/*
 	 * Linkage for run trees.  There are two disjoint uses:
 	 *
-	 * 1) arena_t's runs_avail tree.
+	 * 1) arena_t's tree or available runs.
 	 * 2) arena_run_t conceptually uses this linkage for in-use non-full
 	 *    runs, rather than directly embedding linkage.
 	 */
 	rb_node(arena_chunk_map_t)	link;
 
 	/*
 	 * Run address (or size) and various flags are stored together.  The bit
 	 * layout looks like (assuming 32-bit system):
@@ -599,17 +599,17 @@ struct arena_chunk_map_t {
 typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
 typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
 
 /* Arena chunk header. */
 struct arena_chunk_t {
 	/* Arena that owns the chunk. */
 	arena_t		*arena;
 
-	/* Linkage for the arena's chunks_dirty tree. */
+	/* Linkage for the arena's tree of dirty chunks. */
 	rb_node(arena_chunk_t) link_dirty;
 
 #ifdef MALLOC_DOUBLE_PURGE
 	/* If we're double-purging, we maintain a linked list of chunks which
 	 * have pages which have been madvise(MADV_FREE)'d but not explicitly
 	 * purged.
 	 *
 	 * We're currently lazy and don't remove a chunk from this list when
@@ -691,88 +691,136 @@ struct arena_bin_t {
 	uint32_t	reg0_offset;
 
 	/* Bin statistics. */
 	malloc_bin_stats_t stats;
 };
 
 struct arena_t {
 #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
-	uint32_t		magic;
+  uint32_t mMagic;
 #  define ARENA_MAGIC 0x947d3d24
 #endif
 
-	/* All operations on this arena require that lock be locked. */
-	malloc_spinlock_t	lock;
-
-	arena_stats_t		stats;
-
-	/* Tree of dirty-page-containing chunks this arena manages. */
-	arena_chunk_tree_t	chunks_dirty;
+  /* All operations on this arena require that lock be locked. */
+  malloc_spinlock_t mLock;
+
+  arena_stats_t mStats;
+
+private:
+  /* Tree of dirty-page-containing chunks this arena manages. */
+  arena_chunk_tree_t mChunksDirty;
 
 #ifdef MALLOC_DOUBLE_PURGE
-	/* Head of a linked list of MADV_FREE'd-page-containing chunks this
-	 * arena manages. */
-	mozilla::DoublyLinkedList<arena_chunk_t> chunks_madvised;
+  /* Head of a linked list of MADV_FREE'd-page-containing chunks this
+   * arena manages. */
+  mozilla::DoublyLinkedList<arena_chunk_t> mChunksMAdvised;
 #endif
 
-	/*
-	 * In order to avoid rapid chunk allocation/deallocation when an arena
-	 * oscillates right on the cusp of needing a new chunk, cache the most
-	 * recently freed chunk.  The spare is left in the arena's chunk trees
-	 * until it is deleted.
-	 *
-	 * There is one spare chunk per arena, rather than one spare total, in
-	 * order to avoid interactions between multiple threads that could make
-	 * a single spare inadequate.
-	 */
-	arena_chunk_t		*spare;
-
-	/*
-	 * Current count of pages within unused runs that are potentially
-	 * dirty, and for which madvise(... MADV_FREE) has not been called.  By
-	 * tracking this, we can institute a limit on how much dirty unused
-	 * memory is mapped for each arena.
-	 */
-	size_t			ndirty;
-	/*
-	 * Maximum value allowed for ndirty.
-	 */
-	size_t			dirty_max;
-
-	/*
-	 * Size/address-ordered tree of this arena's available runs.  This tree
-	 * is used for first-best-fit run allocation.
-	 */
-	arena_avail_tree_t	runs_avail;
-
-	/*
-	 * bins is used to store rings of free regions of the following sizes,
-	 * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
-	 *
-	 *   bins[i] | size |
-	 *   --------+------+
-	 *        0  |    2 |
-	 *        1  |    4 |
-	 *        2  |    8 |
-	 *   --------+------+
-	 *        3  |   16 |
-	 *        4  |   32 |
-	 *        5  |   48 |
-	 *        6  |   64 |
-	 *           :      :
-	 *           :      :
-	 *       33  |  496 |
-	 *       34  |  512 |
-	 *   --------+------+
-	 *       35  | 1024 |
-	 *       36  | 2048 |
-	 *   --------+------+
-	 */
-	arena_bin_t		bins[1]; /* Dynamically sized. */
+  /*
+   * In order to avoid rapid chunk allocation/deallocation when an arena
+   * oscillates right on the cusp of needing a new chunk, cache the most
+   * recently freed chunk.  The spare is left in the arena's chunk trees
+   * until it is deleted.
+   *
+   * There is one spare chunk per arena, rather than one spare total, in
+   * order to avoid interactions between multiple threads that could make
+   * a single spare inadequate.
+   */
+  arena_chunk_t* mSpare;
+
+public:
+  /*
+   * Current count of pages within unused runs that are potentially
+   * dirty, and for which madvise(... MADV_FREE) has not been called.  By
+   * tracking this, we can institute a limit on how much dirty unused
+   * memory is mapped for each arena.
+   */
+  size_t mNumDirty;
+  /*
+   * Maximum value allowed for mNumDirty.
+   */
+  size_t mMaxDirty;
+
+private:
+  /*
+   * Size/address-ordered tree of this arena's available runs.  This tree
+   * is used for first-best-fit run allocation.
+   */
+  arena_avail_tree_t mRunsAvail;
+
+public:
+  /*
+   * mBins is used to store rings of free regions of the following sizes,
+   * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
+   *
+   *   mBins[i] | size |
+   *   --------+------+
+   *        0  |    2 |
+   *        1  |    4 |
+   *        2  |    8 |
+   *   --------+------+
+   *        3  |   16 |
+   *        4  |   32 |
+   *        5  |   48 |
+   *        6  |   64 |
+   *           :      :
+   *           :      :
+   *       33  |  496 |
+   *       34  |  512 |
+   *   --------+------+
+   *       35  | 1024 |
+   *       36  | 2048 |
+   *   --------+------+
+   */
+  arena_bin_t mBins[1]; /* Dynamically sized. */
+
+  bool Init();
+
+private:
+  void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
+
+  void DeallocChunk(arena_chunk_t* aChunk);
+
+  arena_run_t* AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero);
+
+  void DallocRun(arena_run_t* aRun, bool aDirty);
+
+  void SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero);
+
+  void TrimRunHead(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize);
+
+  void TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize, bool dirty);
+
+  inline void* MallocBinEasy(arena_bin_t* aBin, arena_run_t* aRun);
+
+  void* MallocBinHard(arena_bin_t* aBin);
+
+  arena_run_t* GetNonFullBinRun(arena_bin_t* aBin);
+
+  inline void* MallocSmall(size_t aSize, bool aZero);
+
+  void* MallocLarge(size_t aSize, bool aZero);
+
+public:
+  inline void* Malloc(size_t aSize, bool aZero);
+
+  void* Palloc(size_t aAlignment, size_t aSize, size_t aAllocSize);
+
+  inline void DallocSmall(arena_chunk_t* aChunk, void* aPtr, arena_chunk_map_t *aMapElm);
+
+  void DallocLarge(arena_chunk_t* aChunk, void* aPtr);
+
+  void RallocShrinkLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize, size_t aOldSize);
+
+  bool RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize, size_t aOldSize);
+
+  void Purge(bool aAll);
+
+  void HardPurge();
 };
 
 /******************************************************************************/
 /*
  * Data.
  */
 
 /*
@@ -2531,601 +2579,587 @@ arena_run_reg_dalloc(arena_run_t *run, a
 		run->regs_minelm = elm;
 	bit = regind - (elm << (SIZEOF_INT_2POW + 3));
 	MOZ_DIAGNOSTIC_ASSERT((run->regs_mask[elm] & (1U << bit)) == 0);
 	run->regs_mask[elm] |= (1U << bit);
 #undef SIZE_INV
 #undef SIZE_INV_SHIFT
 }
 
-static void
-arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
-    bool zero)
+void
+arena_t::SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero)
 {
-	arena_chunk_t *chunk;
-	size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
-	old_ndirty = chunk->ndirty;
-	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
-	    >> pagesize_2pow);
-	total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >>
-	    pagesize_2pow;
-	need_pages = (size >> pagesize_2pow);
-	MOZ_ASSERT(need_pages > 0);
-	MOZ_ASSERT(need_pages <= total_pages);
-	rem_pages = total_pages - need_pages;
-
-	arena_avail_tree_remove(&arena->runs_avail, &chunk->map[run_ind]);
-
-	/* Keep track of trailing unused pages for later use. */
-	if (rem_pages > 0) {
-		chunk->map[run_ind+need_pages].bits = (rem_pages <<
-		    pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
-		    pagesize_mask);
-		chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
-		    pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
-		    pagesize_mask);
-		arena_avail_tree_insert(&arena->runs_avail,
-		    &chunk->map[run_ind+need_pages]);
-	}
-
-	for (i = 0; i < need_pages; i++) {
-		/*
-		 * Commit decommitted pages if necessary.  If a decommitted
-		 * page is encountered, commit all needed adjacent decommitted
-		 * pages in one operation, in order to reduce system call
-		 * overhead.
-		 */
-		if (chunk->map[run_ind + i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) {
-			size_t j;
-
-			/*
-			 * Advance i+j to just past the index of the last page
-			 * to commit.  Clear CHUNK_MAP_DECOMMITTED and
-			 * CHUNK_MAP_MADVISED along the way.
-			 */
-			for (j = 0; i + j < need_pages && (chunk->map[run_ind +
-			    i + j].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED); j++) {
-				/* DECOMMITTED and MADVISED are mutually exclusive. */
-				MOZ_ASSERT(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
-					     chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
-
-				chunk->map[run_ind + i + j].bits &=
-				    ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
-			}
+  arena_chunk_t* chunk;
+  size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
+
+  chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(aRun);
+  old_ndirty = chunk->ndirty;
+  run_ind = (unsigned)((uintptr_t(aRun) - uintptr_t(chunk)) >> pagesize_2pow);
+  total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >> pagesize_2pow;
+  need_pages = (aSize >> pagesize_2pow);
+  MOZ_ASSERT(need_pages > 0);
+  MOZ_ASSERT(need_pages <= total_pages);
+  rem_pages = total_pages - need_pages;
+
+  arena_avail_tree_remove(&mRunsAvail, &chunk->map[run_ind]);
+
+  /* Keep track of trailing unused pages for later use. */
+  if (rem_pages > 0) {
+    chunk->map[run_ind+need_pages].bits = (rem_pages <<
+        pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
+        pagesize_mask);
+    chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
+        pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
+        pagesize_mask);
+    arena_avail_tree_insert(&mRunsAvail, &chunk->map[run_ind+need_pages]);
+  }
+
+  for (i = 0; i < need_pages; i++) {
+    /*
+     * Commit decommitted pages if necessary.  If a decommitted
+     * page is encountered, commit all needed adjacent decommitted
+     * pages in one operation, in order to reduce system call
+     * overhead.
+     */
+    if (chunk->map[run_ind + i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) {
+      size_t j;
+
+      /*
+       * Advance i+j to just past the index of the last page
+       * to commit.  Clear CHUNK_MAP_DECOMMITTED and
+       * CHUNK_MAP_MADVISED along the way.
+       */
+      for (j = 0; i + j < need_pages && (chunk->map[run_ind +
+          i + j].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED); j++) {
+        /* DECOMMITTED and MADVISED are mutually exclusive. */
+        MOZ_ASSERT(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
+               chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
+
+        chunk->map[run_ind + i + j].bits &=
+            ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
+      }
 
 #  ifdef MALLOC_DECOMMIT
-			pages_commit((void *)((uintptr_t)chunk + ((run_ind + i)
-			    << pagesize_2pow)), (j << pagesize_2pow));
+      pages_commit((void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
+                   j << pagesize_2pow);
 #  endif
 
-			arena->stats.committed += j;
-
-#  ifndef MALLOC_DECOMMIT
-                }
-#  else
-		} else /* No need to zero since commit zeros. */
+      mStats.committed += j;
+
+    }
+#  ifdef MALLOC_DECOMMIT
+    else /* No need to zero since commit zeroes. */
 #  endif
 
-		/* Zero if necessary. */
-		if (zero) {
-			if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED)
-			    == 0) {
-				memset((void *)((uintptr_t)chunk + ((run_ind
-				    + i) << pagesize_2pow)), 0, pagesize);
-				/* CHUNK_MAP_ZEROED is cleared below. */
-			}
-		}
-
-		/* Update dirty page accounting. */
-		if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
-			chunk->ndirty--;
-			arena->ndirty--;
-			/* CHUNK_MAP_DIRTY is cleared below. */
-		}
-
-		/* Initialize the chunk map. */
-		if (large) {
-			chunk->map[run_ind + i].bits = CHUNK_MAP_LARGE
-			    | CHUNK_MAP_ALLOCATED;
-		} else {
-			chunk->map[run_ind + i].bits = (size_t)run
-			    | CHUNK_MAP_ALLOCATED;
-		}
-	}
-
-	/*
-	 * Set the run size only in the first element for large runs.  This is
-	 * primarily a debugging aid, since the lack of size info for trailing
-	 * pages only matters if the application tries to operate on an
-	 * interior pointer.
-	 */
-	if (large)
-		chunk->map[run_ind].bits |= size;
-
-	if (chunk->ndirty == 0 && old_ndirty > 0)
-		arena_chunk_tree_dirty_remove(&arena->chunks_dirty, chunk);
+    /* Zero if necessary. */
+    if (aZero) {
+      if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED) == 0) {
+        memset((void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
+               0, pagesize);
+        /* CHUNK_MAP_ZEROED is cleared below. */
+      }
+    }
+
+    /* Update dirty page accounting. */
+    if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
+      chunk->ndirty--;
+      mNumDirty--;
+      /* CHUNK_MAP_DIRTY is cleared below. */
+    }
+
+    /* Initialize the chunk map. */
+    if (aLarge) {
+      chunk->map[run_ind + i].bits = CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+    } else {
+      chunk->map[run_ind + i].bits = size_t(aRun) | CHUNK_MAP_ALLOCATED;
+    }
+  }
+
+  /*
+   * Set the run size only in the first element for large runs.  This is
+   * primarily a debugging aid, since the lack of size info for trailing
+   * pages only matters if the application tries to operate on an
+   * interior pointer.
+   */
+  if (aLarge) {
+    chunk->map[run_ind].bits |= aSize;
+  }
+
+  if (chunk->ndirty == 0 && old_ndirty > 0) {
+    arena_chunk_tree_dirty_remove(&mChunksDirty, chunk);
+  }
 }
 
-static void
-arena_chunk_init(arena_t *arena, arena_chunk_t *chunk, bool zeroed)
+void
+arena_t::InitChunk(arena_chunk_t* aChunk, bool aZeroed)
 {
-	size_t i;
-	/* WARNING: The following relies on !zeroed meaning "used to be an arena
+  size_t i;
+  /* WARNING: The following relies on !aZeroed meaning "used to be an arena
          * chunk".
          * When the chunk we're initializating as an arena chunk is zeroed, we
          * mark all runs are decommitted and zeroed.
          * When it is not, which we can assume means it's a recycled arena chunk,
          * all it can contain is an arena chunk header (which we're overwriting),
          * and zeroed or poisoned memory (because a recycled arena chunk will
          * have been emptied before being recycled). In that case, we can get
          * away with reusing the chunk as-is, marking all runs as madvised.
          */
-	size_t flags = zeroed ? CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED
-	                      : CHUNK_MAP_MADVISED;
-
-	arena->stats.mapped += chunksize;
-
-	chunk->arena = arena;
-
-	/*
-	 * Claim that no pages are in use, since the header is merely overhead.
-	 */
-	chunk->ndirty = 0;
-
-	/* Initialize the map to contain one maximal free untouched run. */
+  size_t flags = aZeroed ? CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED
+                         : CHUNK_MAP_MADVISED;
+
+  mStats.mapped += chunksize;
+
+  aChunk->arena = this;
+
+  /*
+   * Claim that no pages are in use, since the header is merely overhead.
+   */
+  aChunk->ndirty = 0;
+
+  /* Initialize the map to contain one maximal free untouched run. */
 #ifdef MALLOC_DECOMMIT
-	arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
-	                   (arena_chunk_header_npages << pagesize_2pow));
+  arena_run_t* run = (arena_run_t*)(uintptr_t(aChunk) +
+                     (arena_chunk_header_npages << pagesize_2pow));
 #endif
 
-	for (i = 0; i < arena_chunk_header_npages; i++)
-		chunk->map[i].bits = 0;
-	chunk->map[i].bits = arena_maxclass | flags;
-	for (i++; i < chunk_npages-1; i++) {
-		chunk->map[i].bits = flags;
-	}
-	chunk->map[chunk_npages-1].bits = arena_maxclass | flags;
+  for (i = 0; i < arena_chunk_header_npages; i++) {
+    aChunk->map[i].bits = 0;
+  }
+  aChunk->map[i].bits = arena_maxclass | flags;
+  for (i++; i < chunk_npages-1; i++) {
+    aChunk->map[i].bits = flags;
+  }
+  aChunk->map[chunk_npages-1].bits = arena_maxclass | flags;
 
 #ifdef MALLOC_DECOMMIT
-	/*
-	 * Start out decommitted, in order to force a closer correspondence
-	 * between dirty pages and committed untouched pages.
-	 */
-	pages_decommit(run, arena_maxclass);
+  /*
+   * Start out decommitted, in order to force a closer correspondence
+   * between dirty pages and committed untouched pages.
+   */
+  pages_decommit(run, arena_maxclass);
 #endif
-	arena->stats.committed += arena_chunk_header_npages;
-
-	/* Insert the run into the runs_avail tree. */
-	arena_avail_tree_insert(&arena->runs_avail,
-	    &chunk->map[arena_chunk_header_npages]);
+  mStats.committed += arena_chunk_header_npages;
+
+  /* Insert the run into the tree of available runs. */
+  arena_avail_tree_insert(&mRunsAvail,
+      &aChunk->map[arena_chunk_header_npages]);
 
 #ifdef MALLOC_DOUBLE_PURGE
-	new (&chunk->chunks_madvised_elem) mozilla::DoublyLinkedListElement<arena_chunk_t>();
+  new (&aChunk->chunks_madvised_elem) mozilla::DoublyLinkedListElement<arena_chunk_t>();
 #endif
 }
 
-static void
-arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
+void
+arena_t::DeallocChunk(arena_chunk_t* aChunk)
 {
-
-	if (arena->spare) {
-		if (arena->spare->ndirty > 0) {
-			arena_chunk_tree_dirty_remove(
-			    &chunk->arena->chunks_dirty, arena->spare);
-			arena->ndirty -= arena->spare->ndirty;
-			arena->stats.committed -= arena->spare->ndirty;
-		}
+  if (mSpare) {
+    if (mSpare->ndirty > 0) {
+      arena_chunk_tree_dirty_remove(&aChunk->arena->mChunksDirty, mSpare);
+      mNumDirty -= mSpare->ndirty;
+      mStats.committed -= mSpare->ndirty;
+    }
 
 #ifdef MALLOC_DOUBLE_PURGE
-		if (arena->chunks_madvised.ElementProbablyInList(arena->spare)) {
-			arena->chunks_madvised.remove(arena->spare);
-		}
+    if (mChunksMAdvised.ElementProbablyInList(mSpare)) {
+      mChunksMAdvised.remove(mSpare);
+    }
 #endif
 
-		chunk_dealloc((void *)arena->spare, chunksize, ARENA_CHUNK);
-		arena->stats.mapped -= chunksize;
-		arena->stats.committed -= arena_chunk_header_npages;
-	}
-
-	/*
-	 * Remove run from runs_avail, so that the arena does not use it.
-	 * Dirty page flushing only uses the chunks_dirty tree, so leaving this
-	 * chunk in the chunks_* trees is sufficient for that purpose.
-	 */
-	arena_avail_tree_remove(&arena->runs_avail,
-	    &chunk->map[arena_chunk_header_npages]);
-
-	arena->spare = chunk;
+    chunk_dealloc((void*)mSpare, chunksize, ARENA_CHUNK);
+    mStats.mapped -= chunksize;
+    mStats.committed -= arena_chunk_header_npages;
+  }
+
+  /*
+   * Remove run from the tree of available runs, so that the arena does not use it.
+   * Dirty page flushing only uses the tree of dirty chunks, so leaving this
+   * chunk in the chunks_* trees is sufficient for that purpose.
+   */
+  arena_avail_tree_remove(&mRunsAvail, &aChunk->map[arena_chunk_header_npages]);
+
+  mSpare = aChunk;
 }
 
-static arena_run_t *
-arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
-    bool zero)
+arena_run_t*
+arena_t::AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero)
 {
-	arena_run_t *run;
-	arena_chunk_map_t *mapelm, key;
-
-	MOZ_ASSERT(size <= arena_maxclass);
-	MOZ_ASSERT((size & pagesize_mask) == 0);
-
-	/* Search the arena's chunks for the lowest best fit. */
-	key.bits = size | CHUNK_MAP_KEY;
-	mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
-	if (mapelm) {
-		arena_chunk_t *chunk =
-		    (arena_chunk_t*)CHUNK_ADDR2BASE(mapelm);
-		size_t pageind = ((uintptr_t)mapelm -
-		    (uintptr_t)chunk->map) /
-		    sizeof(arena_chunk_map_t);
-
-		run = (arena_run_t *)((uintptr_t)chunk + (pageind
-		    << pagesize_2pow));
-		arena_run_split(arena, run, size, large, zero);
-		return (run);
-	}
-
-	if (arena->spare) {
-		/* Use the spare. */
-		arena_chunk_t *chunk = arena->spare;
-		arena->spare = nullptr;
-		run = (arena_run_t *)((uintptr_t)chunk +
-		    (arena_chunk_header_npages << pagesize_2pow));
-		/* Insert the run into the runs_avail tree. */
-		arena_avail_tree_insert(&arena->runs_avail,
-		    &chunk->map[arena_chunk_header_npages]);
-		arena_run_split(arena, run, size, large, zero);
-		return (run);
-	}
-
-	/*
-	 * No usable runs.  Create a new chunk from which to allocate
-	 * the run.
-	 */
-	{
-		bool zeroed;
-		arena_chunk_t *chunk = (arena_chunk_t *)
-		    chunk_alloc(chunksize, chunksize, false, &zeroed);
-		if (!chunk)
-			return nullptr;
-
-		arena_chunk_init(arena, chunk, zeroed);
-		run = (arena_run_t *)((uintptr_t)chunk +
-		    (arena_chunk_header_npages << pagesize_2pow));
-	}
-	/* Update page map. */
-	arena_run_split(arena, run, size, large, zero);
-	return (run);
+  arena_run_t* run;
+  arena_chunk_map_t* mapelm;
+  arena_chunk_map_t key;
+
+  MOZ_ASSERT(aSize <= arena_maxclass);
+  MOZ_ASSERT((aSize & pagesize_mask) == 0);
+
+  /* Search the arena's chunks for the lowest best fit. */
+  key.bits = aSize | CHUNK_MAP_KEY;
+  mapelm = arena_avail_tree_nsearch(&mRunsAvail, &key);
+  if (mapelm) {
+    arena_chunk_t* chunk =
+        (arena_chunk_t*)CHUNK_ADDR2BASE(mapelm);
+    size_t pageind = (uintptr_t(mapelm) - uintptr_t(chunk->map)) /
+        sizeof(arena_chunk_map_t);
+
+    run = (arena_run_t*)(uintptr_t(chunk) + (pageind << pagesize_2pow));
+    SplitRun(run, aSize, aLarge, aZero);
+    return run;
+  }
+
+  if (mSpare) {
+    /* Use the spare. */
+    arena_chunk_t* chunk = mSpare;
+    mSpare = nullptr;
+    run = (arena_run_t*)(uintptr_t(chunk) + (arena_chunk_header_npages << pagesize_2pow));
+    /* Insert the run into the tree of available runs. */
+    arena_avail_tree_insert(&mRunsAvail, &chunk->map[arena_chunk_header_npages]);
+    SplitRun(run, aSize, aLarge, aZero);
+    return run;
+  }
+
+  /*
+   * No usable runs.  Create a new chunk from which to allocate
+   * the run.
+   */
+  {
+    bool zeroed;
+    arena_chunk_t* chunk = (arena_chunk_t*)
+        chunk_alloc(chunksize, chunksize, false, &zeroed);
+    if (!chunk) {
+      return nullptr;
+    }
+
+    InitChunk(chunk, zeroed);
+    run = (arena_run_t*)(uintptr_t(chunk) + (arena_chunk_header_npages << pagesize_2pow));
+  }
+  /* Update page map. */
+  SplitRun(run, aSize, aLarge, aZero);
+  return run;
 }
 
-static void
-arena_purge(arena_t *arena, bool all)
+void
+arena_t::Purge(bool aAll)
 {
-	arena_chunk_t *chunk;
-	size_t i, npages;
-	/* If all is set purge all dirty pages. */
-	size_t dirty_max = all ? 1 : arena->dirty_max;
+  arena_chunk_t* chunk;
+  size_t i, npages;
+  /* If all is set purge all dirty pages. */
+  size_t dirty_max = aAll ? 1 : mMaxDirty;
 #ifdef MOZ_DEBUG
-	size_t ndirty = 0;
-	rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
-	    chunk) {
-		ndirty += chunk->ndirty;
-	} rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
-	MOZ_ASSERT(ndirty == arena->ndirty);
+  size_t ndirty = 0;
+  rb_foreach_begin(arena_chunk_t, link_dirty, &mChunksDirty, chunk) {
+    ndirty += chunk->ndirty;
+  } rb_foreach_end(arena_chunk_t, link_dirty, &mChunksDirty, chunk)
+  MOZ_ASSERT(ndirty == mNumDirty);
 #endif
-	MOZ_DIAGNOSTIC_ASSERT(all || (arena->ndirty > arena->dirty_max));
-
-	/*
-	 * Iterate downward through chunks until enough dirty memory has been
-	 * purged.  Terminate as soon as possible in order to minimize the
-	 * number of system calls, even if a chunk has only been partially
-	 * purged.
-	 */
-	while (arena->ndirty > (dirty_max >> 1)) {
+  MOZ_DIAGNOSTIC_ASSERT(aAll || (mNumDirty > mMaxDirty));
+
+  /*
+   * Iterate downward through chunks until enough dirty memory has been
+   * purged.  Terminate as soon as possible in order to minimize the
+   * number of system calls, even if a chunk has only been partially
+   * purged.
+   */
+  while (mNumDirty > (dirty_max >> 1)) {
 #ifdef MALLOC_DOUBLE_PURGE
-		bool madvised = false;
+    bool madvised = false;
 #endif
-		chunk = arena_chunk_tree_dirty_last(&arena->chunks_dirty);
-		MOZ_DIAGNOSTIC_ASSERT(chunk);
-
-		for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
-			MOZ_DIAGNOSTIC_ASSERT(i >= arena_chunk_header_npages);
-
-			if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
+    chunk = arena_chunk_tree_dirty_last(&mChunksDirty);
+    MOZ_DIAGNOSTIC_ASSERT(chunk);
+
+    for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
+      MOZ_DIAGNOSTIC_ASSERT(i >= arena_chunk_header_npages);
+
+      if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
 #ifdef MALLOC_DECOMMIT
-				const size_t free_operation = CHUNK_MAP_DECOMMITTED;
+        const size_t free_operation = CHUNK_MAP_DECOMMITTED;
 #else
-				const size_t free_operation = CHUNK_MAP_MADVISED;
+        const size_t free_operation = CHUNK_MAP_MADVISED;
 #endif
-				MOZ_ASSERT((chunk->map[i].bits &
-				            CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
-				chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
-				/* Find adjacent dirty run(s). */
-				for (npages = 1;
-				     i > arena_chunk_header_npages &&
-				       (chunk->map[i - 1].bits & CHUNK_MAP_DIRTY);
-				     npages++) {
-					i--;
-					MOZ_ASSERT((chunk->map[i].bits &
-					            CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
-					chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
-				}
-				chunk->ndirty -= npages;
-				arena->ndirty -= npages;
+        MOZ_ASSERT((chunk->map[i].bits &
+                    CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
+        chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
+        /* Find adjacent dirty run(s). */
+        for (npages = 1;
+             i > arena_chunk_header_npages &&
+               (chunk->map[i - 1].bits & CHUNK_MAP_DIRTY);
+             npages++) {
+          i--;
+          MOZ_ASSERT((chunk->map[i].bits &
+                      CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
+          chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
+        }
+        chunk->ndirty -= npages;
+        mNumDirty -= npages;
 
 #ifdef MALLOC_DECOMMIT
-				pages_decommit((void *)((uintptr_t)
-				    chunk + (i << pagesize_2pow)),
-				    (npages << pagesize_2pow));
+        pages_decommit((void*)(uintptr_t(chunk) + (i << pagesize_2pow)),
+                       (npages << pagesize_2pow));
 #endif
-				arena->stats.committed -= npages;
+        mStats.committed -= npages;
 
 #ifndef MALLOC_DECOMMIT
-				madvise((void *)((uintptr_t)chunk + (i <<
-				    pagesize_2pow)), (npages << pagesize_2pow),
-				    MADV_FREE);
+        madvise((void*)(uintptr_t(chunk) + (i << pagesize_2pow)),
+                (npages << pagesize_2pow), MADV_FREE);
 #  ifdef MALLOC_DOUBLE_PURGE
-				madvised = true;
+        madvised = true;
 #  endif
 #endif
-				if (arena->ndirty <= (dirty_max >> 1))
-					break;
-			}
-		}
-
-		if (chunk->ndirty == 0) {
-			arena_chunk_tree_dirty_remove(&arena->chunks_dirty,
-			    chunk);
-		}
+        if (mNumDirty <= (dirty_max >> 1)) {
+          break;
+        }
+      }
+    }
+
+    if (chunk->ndirty == 0) {
+      arena_chunk_tree_dirty_remove(&mChunksDirty, chunk);
+    }
 #ifdef MALLOC_DOUBLE_PURGE
-		if (madvised) {
-			/* The chunk might already be in the list, but this
-			 * makes sure it's at the front. */
-			if (arena->chunks_madvised.ElementProbablyInList(chunk)) {
-				arena->chunks_madvised.remove(chunk);
-			}
-			arena->chunks_madvised.pushFront(chunk);
-		}
+    if (madvised) {
+      /* The chunk might already be in the list, but this
+       * makes sure it's at the front. */
+      if (mChunksMAdvised.ElementProbablyInList(chunk)) {
+        mChunksMAdvised.remove(chunk);
+      }
+      mChunksMAdvised.pushFront(chunk);
+    }
 #endif
-	}
+  }
 }
 
-static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
+void
+arena_t::DallocRun(arena_run_t* aRun, bool aDirty)
 {
-	arena_chunk_t *chunk;
-	size_t size, run_ind, run_pages;
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
-	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
-	    >> pagesize_2pow);
-	MOZ_DIAGNOSTIC_ASSERT(run_ind >= arena_chunk_header_npages);
-	MOZ_DIAGNOSTIC_ASSERT(run_ind < chunk_npages);
-	if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0)
-		size = chunk->map[run_ind].bits & ~pagesize_mask;
-	else
-		size = run->bin->run_size;
-	run_pages = (size >> pagesize_2pow);
-
-	/* Mark pages as unallocated in the chunk map. */
-	if (dirty) {
-		size_t i;
-
-		for (i = 0; i < run_pages; i++) {
-			MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
-			    == 0);
-			chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
-		}
-
-		if (chunk->ndirty == 0) {
-			arena_chunk_tree_dirty_insert(&arena->chunks_dirty,
-			    chunk);
-		}
-		chunk->ndirty += run_pages;
-		arena->ndirty += run_pages;
-	} else {
-		size_t i;
-
-		for (i = 0; i < run_pages; i++) {
-			chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE |
-			    CHUNK_MAP_ALLOCATED);
-		}
-	}
-	chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
-	    pagesize_mask);
-	chunk->map[run_ind+run_pages-1].bits = size |
-	    (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
-
-	/* Try to coalesce forward. */
-	if (run_ind + run_pages < chunk_npages &&
-	    (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
-		size_t nrun_size = chunk->map[run_ind+run_pages].bits &
-		    ~pagesize_mask;
-
-		/*
-		 * Remove successor from runs_avail; the coalesced run is
-		 * inserted later.
-		 */
-		arena_avail_tree_remove(&arena->runs_avail,
-		    &chunk->map[run_ind+run_pages]);
-
-		size += nrun_size;
-		run_pages = size >> pagesize_2pow;
-
-		MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
-		    == nrun_size);
-		chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
-		    pagesize_mask);
-		chunk->map[run_ind+run_pages-1].bits = size |
-		    (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
-	}
-
-	/* Try to coalesce backward. */
-	if (run_ind > arena_chunk_header_npages && (chunk->map[run_ind-1].bits &
-	    CHUNK_MAP_ALLOCATED) == 0) {
-		size_t prun_size = chunk->map[run_ind-1].bits & ~pagesize_mask;
-
-		run_ind -= prun_size >> pagesize_2pow;
-
-		/*
-		 * Remove predecessor from runs_avail; the coalesced run is
-		 * inserted later.
-		 */
-		arena_avail_tree_remove(&arena->runs_avail,
-		    &chunk->map[run_ind]);
-
-		size += prun_size;
-		run_pages = size >> pagesize_2pow;
-
-		MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
-		    prun_size);
-		chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
-		    pagesize_mask);
-		chunk->map[run_ind+run_pages-1].bits = size |
-		    (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
-	}
-
-	/* Insert into runs_avail, now that coalescing is complete. */
-	arena_avail_tree_insert(&arena->runs_avail, &chunk->map[run_ind]);
-
-	/* Deallocate chunk if it is now completely unused. */
-	if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
-	    CHUNK_MAP_ALLOCATED)) == arena_maxclass)
-		arena_chunk_dealloc(arena, chunk);
-
-	/* Enforce dirty_max. */
-	if (arena->ndirty > arena->dirty_max)
-		arena_purge(arena, false);
+  arena_chunk_t* chunk;
+  size_t size, run_ind, run_pages;
+
+  chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(aRun);
+  run_ind = (size_t)((uintptr_t(aRun) - uintptr_t(chunk)) >> pagesize_2pow);
+  MOZ_DIAGNOSTIC_ASSERT(run_ind >= arena_chunk_header_npages);
+  MOZ_DIAGNOSTIC_ASSERT(run_ind < chunk_npages);
+  if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0)
+    size = chunk->map[run_ind].bits & ~pagesize_mask;
+  else
+    size = aRun->bin->run_size;
+  run_pages = (size >> pagesize_2pow);
+
+  /* Mark pages as unallocated in the chunk map. */
+  if (aDirty) {
+    size_t i;
+
+    for (i = 0; i < run_pages; i++) {
+      MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
+          == 0);
+      chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
+    }
+
+    if (chunk->ndirty == 0) {
+      arena_chunk_tree_dirty_insert(&mChunksDirty,
+          chunk);
+    }
+    chunk->ndirty += run_pages;
+    mNumDirty += run_pages;
+  } else {
+    size_t i;
+
+    for (i = 0; i < run_pages; i++) {
+      chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE |
+          CHUNK_MAP_ALLOCATED);
+    }
+  }
+  chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+      pagesize_mask);
+  chunk->map[run_ind+run_pages-1].bits = size |
+      (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+
+  /* Try to coalesce forward. */
+  if (run_ind + run_pages < chunk_npages &&
+      (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
+    size_t nrun_size = chunk->map[run_ind+run_pages].bits &
+        ~pagesize_mask;
+
+    /*
+     * Remove successor from tree of available runs; the coalesced run is
+     * inserted later.
+     */
+    arena_avail_tree_remove(&mRunsAvail,
+        &chunk->map[run_ind+run_pages]);
+
+    size += nrun_size;
+    run_pages = size >> pagesize_2pow;
+
+    MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
+        == nrun_size);
+    chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+        pagesize_mask);
+    chunk->map[run_ind+run_pages-1].bits = size |
+        (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+  }
+
+  /* Try to coalesce backward. */
+  if (run_ind > arena_chunk_header_npages && (chunk->map[run_ind-1].bits &
+      CHUNK_MAP_ALLOCATED) == 0) {
+    size_t prun_size = chunk->map[run_ind-1].bits & ~pagesize_mask;
+
+    run_ind -= prun_size >> pagesize_2pow;
+
+    /*
+     * Remove predecessor from tree of available runs; the coalesced run is
+     * inserted later.
+     */
+    arena_avail_tree_remove(&mRunsAvail, &chunk->map[run_ind]);
+
+    size += prun_size;
+    run_pages = size >> pagesize_2pow;
+
+    MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
+        prun_size);
+    chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+        pagesize_mask);
+    chunk->map[run_ind+run_pages-1].bits = size |
+        (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+  }
+
+  /* Insert into tree of available runs, now that coalescing is complete. */
+  arena_avail_tree_insert(&mRunsAvail, &chunk->map[run_ind]);
+
+  /* Deallocate chunk if it is now completely unused. */
+  if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
+      CHUNK_MAP_ALLOCATED)) == arena_maxclass) {
+    DeallocChunk(chunk);
+  }
+
+  /* Enforce mMaxDirty. */
+  if (mNumDirty > mMaxDirty) {
+    Purge(false);
+  }
 }
 
-static void
-arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
-    size_t oldsize, size_t newsize)
+void
+arena_t::TrimRunHead(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize,
+                     size_t aNewSize)
 {
-	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
-	size_t head_npages = (oldsize - newsize) >> pagesize_2pow;
-
-	MOZ_ASSERT(oldsize > newsize);
-
-	/*
-	 * Update the chunk map so that arena_run_dalloc() can treat the
-	 * leading run as separately allocated.
-	 */
-	chunk->map[pageind].bits = (oldsize - newsize) | CHUNK_MAP_LARGE |
-	    CHUNK_MAP_ALLOCATED;
-	chunk->map[pageind+head_npages].bits = newsize | CHUNK_MAP_LARGE |
-	    CHUNK_MAP_ALLOCATED;
-
-	arena_run_dalloc(arena, run, false);
+  size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> pagesize_2pow;
+  size_t head_npages = (aOldSize - aNewSize) >> pagesize_2pow;
+
+  MOZ_ASSERT(aOldSize > aNewSize);
+
+  /*
+   * Update the chunk map so that arena_t::RunDalloc() can treat the
+   * leading run as separately allocated.
+   */
+  aChunk->map[pageind].bits = (aOldSize - aNewSize) | CHUNK_MAP_LARGE |
+      CHUNK_MAP_ALLOCATED;
+  aChunk->map[pageind+head_npages].bits = aNewSize | CHUNK_MAP_LARGE |
+      CHUNK_MAP_ALLOCATED;
+
+  DallocRun(aRun, false);
 }
 
-static void
-arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
-    size_t oldsize, size_t newsize, bool dirty)
+void
+arena_t::TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize,
+                     size_t aNewSize, bool aDirty)
 {
-	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
-	size_t npages = newsize >> pagesize_2pow;
-
-	MOZ_ASSERT(oldsize > newsize);
-
-	/*
-	 * Update the chunk map so that arena_run_dalloc() can treat the
-	 * trailing run as separately allocated.
-	 */
-	chunk->map[pageind].bits = newsize | CHUNK_MAP_LARGE |
-	    CHUNK_MAP_ALLOCATED;
-	chunk->map[pageind+npages].bits = (oldsize - newsize) | CHUNK_MAP_LARGE
-	    | CHUNK_MAP_ALLOCATED;
-
-	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
-	    dirty);
+  size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> pagesize_2pow;
+  size_t npages = aNewSize >> pagesize_2pow;
+
+  MOZ_ASSERT(aOldSize > aNewSize);
+
+  /*
+   * Update the chunk map so that arena_t::RunDalloc() can treat the
+   * trailing run as separately allocated.
+   */
+  aChunk->map[pageind].bits = aNewSize | CHUNK_MAP_LARGE |
+      CHUNK_MAP_ALLOCATED;
+  aChunk->map[pageind+npages].bits = (aOldSize - aNewSize) | CHUNK_MAP_LARGE
+      | CHUNK_MAP_ALLOCATED;
+
+  DallocRun((arena_run_t*)(uintptr_t(aRun) + aNewSize), aDirty);
 }
 
-static arena_run_t *
-arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
+arena_run_t*
+arena_t::GetNonFullBinRun(arena_bin_t* aBin)
 {
-	arena_chunk_map_t *mapelm;
-	arena_run_t *run;
-	unsigned i, remainder;
-
-	/* Look for a usable run. */
-	mapelm = arena_run_tree_first(&bin->runs);
-	if (mapelm) {
-		/* run is guaranteed to have available space. */
-		arena_run_tree_remove(&bin->runs, mapelm);
-		run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
-		return (run);
-	}
-	/* No existing runs have any space available. */
-
-	/* Allocate a new run. */
-	run = arena_run_alloc(arena, bin, bin->run_size, false, false);
-	if (!run)
-		return nullptr;
-	/*
-	 * Don't initialize if a race in arena_run_alloc() allowed an existing
-	 * run to become usable.
-	 */
-	if (run == bin->runcur)
-		return (run);
-
-	/* Initialize run internals. */
-	run->bin = bin;
-
-	for (i = 0; i < bin->regs_mask_nelms - 1; i++)
-		run->regs_mask[i] = UINT_MAX;
-	remainder = bin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
-	if (remainder == 0)
-		run->regs_mask[i] = UINT_MAX;
-	else {
-		/* The last element has spare bits that need to be unset. */
-		run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
-		    - remainder));
-	}
-
-	run->regs_minelm = 0;
-
-	run->nfree = bin->nregs;
+  arena_chunk_map_t* mapelm;
+  arena_run_t* run;
+  unsigned i, remainder;
+
+  /* Look for a usable run. */
+  mapelm = arena_run_tree_first(&aBin->runs);
+  if (mapelm) {
+    /* run is guaranteed to have available space. */
+    arena_run_tree_remove(&aBin->runs, mapelm);
+    run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
+    return run;
+  }
+  /* No existing runs have any space available. */
+
+  /* Allocate a new run. */
+  run = AllocRun(aBin, aBin->run_size, false, false);
+  if (!run)
+    return nullptr;
+  /*
+   * Don't initialize if a race in arena_t::RunAlloc() allowed an existing
+   * run to become usable.
+   */
+  if (run == aBin->runcur) {
+    return run;
+  }
+
+  /* Initialize run internals. */
+  run->bin = aBin;
+
+  for (i = 0; i < aBin->regs_mask_nelms - 1; i++) {
+    run->regs_mask[i] = UINT_MAX;
+  }
+  remainder = aBin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
+  if (remainder == 0) {
+    run->regs_mask[i] = UINT_MAX;
+  } else {
+    /* The last element has spare bits that need to be unset. */
+    run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
+        - remainder));
+  }
+
+  run->regs_minelm = 0;
+
+  run->nfree = aBin->nregs;
 #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
-	run->magic = ARENA_RUN_MAGIC;
+  run->magic = ARENA_RUN_MAGIC;
 #endif
 
-	bin->stats.curruns++;
-	return (run);
+  aBin->stats.curruns++;
+  return run;
 }
 
 /* bin->runcur must have space available before this function is called. */
-static inline void *
-arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
+void*
+arena_t::MallocBinEasy(arena_bin_t* aBin, arena_run_t* aRun)
 {
-	void *ret;
-
-	MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
-	MOZ_DIAGNOSTIC_ASSERT(run->nfree > 0);
-
-	ret = arena_run_reg_alloc(run, bin);
-	MOZ_DIAGNOSTIC_ASSERT(ret);
-	run->nfree--;
-
-	return (ret);
+  void* ret;
+
+  MOZ_DIAGNOSTIC_ASSERT(aRun->magic == ARENA_RUN_MAGIC);
+  MOZ_DIAGNOSTIC_ASSERT(aRun->nfree > 0);
+
+  ret = arena_run_reg_alloc(aRun, aBin);
+  MOZ_DIAGNOSTIC_ASSERT(ret);
+  aRun->nfree--;
+
+  return ret;
 }
 
-/* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
-static void *
-arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
+/* Re-fill aBin->runcur, then call arena_t::MallocBinEasy(). */
+void*
+arena_t::MallocBinHard(arena_bin_t* aBin)
 {
-
-	bin->runcur = arena_bin_nonfull_run_get(arena, bin);
-	if (!bin->runcur)
-		return nullptr;
-	MOZ_DIAGNOSTIC_ASSERT(bin->runcur->magic == ARENA_RUN_MAGIC);
-	MOZ_DIAGNOSTIC_ASSERT(bin->runcur->nfree > 0);
-
-	return (arena_bin_malloc_easy(arena, bin, bin->runcur));
+  aBin->runcur = GetNonFullBinRun(aBin);
+  if (!aBin->runcur) {
+    return nullptr;
+  }
+  MOZ_DIAGNOSTIC_ASSERT(aBin->runcur->magic == ARENA_RUN_MAGIC);
+  MOZ_DIAGNOSTIC_ASSERT(aBin->runcur->nfree > 0);
+
+  return MallocBinEasy(aBin, aBin->runcur);
 }
 
 /*
  * Calculate bin->run_size such that it meets the following constraints:
  *
  *   *) bin->run_size >= min_run_size
  *   *) bin->run_size <= arena_maxclass
  *   *) bin->run_size <= RUN_MAX_SMALL
@@ -3200,188 +3234,185 @@ arena_bin_run_size_calc(arena_bin_t *bin
 	bin->run_size = good_run_size;
 	bin->nregs = good_nregs;
 	bin->regs_mask_nelms = good_mask_nelms;
 	bin->reg0_offset = good_reg0_offset;
 
 	return (good_run_size);
 }
 
-static inline void *
-arena_malloc_small(arena_t *arena, size_t size, bool zero)
+void*
+arena_t::MallocSmall(size_t aSize, bool aZero)
 {
-	void *ret;
-	arena_bin_t *bin;
-	arena_run_t *run;
-
-	if (size < small_min) {
-		/* Tiny. */
-		size = pow2_ceil(size);
-		bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
-		    1)))];
-		/*
-		 * Bin calculation is always correct, but we may need
-		 * to fix size for the purposes of assertions and/or
-		 * stats accuracy.
-		 */
-		if (size < (1U << TINY_MIN_2POW))
-			size = (1U << TINY_MIN_2POW);
-	} else if (size <= small_max) {
-		/* Quantum-spaced. */
-		size = QUANTUM_CEILING(size);
-		bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
-		    - 1];
-	} else {
-		/* Sub-page. */
-		size = pow2_ceil(size);
-		bin = &arena->bins[ntbins + nqbins
-		    + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
-	}
-	MOZ_DIAGNOSTIC_ASSERT(size == bin->reg_size);
-
-	malloc_spin_lock(&arena->lock);
-	if ((run = bin->runcur) && run->nfree > 0)
-		ret = arena_bin_malloc_easy(arena, bin, run);
-	else
-		ret = arena_bin_malloc_hard(arena, bin);
-
-	if (!ret) {
-		malloc_spin_unlock(&arena->lock);
-		return nullptr;
-	}
-
-	arena->stats.allocated_small += size;
-	malloc_spin_unlock(&arena->lock);
-
-	if (zero == false) {
-		if (opt_junk)
-			memset(ret, kAllocJunk, size);
-		else if (opt_zero)
-			memset(ret, 0, size);
-	} else
-		memset(ret, 0, size);
-
-	return (ret);
+  void* ret;
+  arena_bin_t* bin;
+  arena_run_t* run;
+
+  if (aSize < small_min) {
+    /* Tiny. */
+    aSize = pow2_ceil(aSize);
+    bin = &mBins[ffs((int)(aSize >> (TINY_MIN_2POW + 1)))];
+    /*
+     * Bin calculation is always correct, but we may need
+     * to fix size for the purposes of assertions and/or
+     * stats accuracy.
+     */
+    if (aSize < (1U << TINY_MIN_2POW)) {
+      aSize = 1U << TINY_MIN_2POW;
+    }
+  } else if (aSize <= small_max) {
+    /* Quantum-spaced. */
+    aSize = QUANTUM_CEILING(aSize);
+    bin = &mBins[ntbins + (aSize >> opt_quantum_2pow) - 1];
+  } else {
+    /* Sub-page. */
+    aSize = pow2_ceil(aSize);
+    bin = &mBins[ntbins + nqbins
+        + (ffs((int)(aSize >> opt_small_max_2pow)) - 2)];
+  }
+  MOZ_DIAGNOSTIC_ASSERT(aSize == bin->reg_size);
+
+  malloc_spin_lock(&mLock);
+  if ((run = bin->runcur) && run->nfree > 0) {
+    ret = MallocBinEasy(bin, run);
+  } else {
+    ret = MallocBinHard(bin);
+  }
+
+  if (!ret) {
+    malloc_spin_unlock(&mLock);
+    return nullptr;
+  }
+
+  mStats.allocated_small += aSize;
+  malloc_spin_unlock(&mLock);
+
+  if (aZero == false) {
+    if (opt_junk) {
+      memset(ret, kAllocJunk, aSize);
+    } else if (opt_zero) {
+      memset(ret, 0, aSize);
+    }
+  } else
+    memset(ret, 0, aSize);
+
+  return ret;
 }
 
-static void *
-arena_malloc_large(arena_t *arena, size_t size, bool zero)
+void*
+arena_t::MallocLarge(size_t aSize, bool aZero)
 {
-	void *ret;
-
-	/* Large allocation. */
-	size = PAGE_CEILING(size);
-	malloc_spin_lock(&arena->lock);
-	ret = (void *)arena_run_alloc(arena, nullptr, size, true, zero);
-	if (!ret) {
-		malloc_spin_unlock(&arena->lock);
-		return nullptr;
-	}
-	arena->stats.allocated_large += size;
-	malloc_spin_unlock(&arena->lock);
-
-	if (zero == false) {
-		if (opt_junk)
-			memset(ret, kAllocJunk, size);
-		else if (opt_zero)
-			memset(ret, 0, size);
-	}
-
-	return (ret);
+  void* ret;
+
+  /* Large allocation. */
+  aSize = PAGE_CEILING(aSize);
+  malloc_spin_lock(&mLock);
+  ret = AllocRun(nullptr, aSize, true, aZero);
+  if (!ret) {
+    malloc_spin_unlock(&mLock);
+    return nullptr;
+  }
+  mStats.allocated_large += aSize;
+  malloc_spin_unlock(&mLock);
+
+  if (aZero == false) {
+    if (opt_junk) {
+      memset(ret, kAllocJunk, aSize);
+    } else if (opt_zero) {
+      memset(ret, 0, aSize);
+    }
+  }
+
+  return (ret);
 }
 
-static inline void *
-arena_malloc(arena_t *arena, size_t size, bool zero)
+void*
+arena_t::Malloc(size_t aSize, bool aZero)
 {
-
-	MOZ_ASSERT(arena);
-	MOZ_DIAGNOSTIC_ASSERT(arena->magic == ARENA_MAGIC);
-	MOZ_ASSERT(size != 0);
-	MOZ_ASSERT(QUANTUM_CEILING(size) <= arena_maxclass);
-
-	if (size <= bin_maxclass) {
-		return (arena_malloc_small(arena, size, zero));
-	} else
-		return (arena_malloc_large(arena, size, zero));
+  MOZ_DIAGNOSTIC_ASSERT(mMagic == ARENA_MAGIC);
+  MOZ_ASSERT(aSize != 0);
+  MOZ_ASSERT(QUANTUM_CEILING(aSize) <= arena_maxclass);
+
+  return (aSize <= bin_maxclass) ? MallocSmall(aSize, aZero)
+                                 : MallocLarge(aSize, aZero);
 }
 
 static inline void *
 imalloc(size_t size)
 {
 
 	MOZ_ASSERT(size != 0);
 
 	if (size <= arena_maxclass)
-		return (arena_malloc(choose_arena(size), size, false));
+		return choose_arena(size)->Malloc(size, false);
 	else
 		return (huge_malloc(size, false));
 }
 
 static inline void *
 icalloc(size_t size)
 {
 
 	if (size <= arena_maxclass)
-		return (arena_malloc(choose_arena(size), size, true));
+		return choose_arena(size)->Malloc(size, true);
 	else
 		return (huge_malloc(size, true));
 }
 
 /* Only handles large allocations that require more than page alignment. */
-static void *
-arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
+void*
+arena_t::Palloc(size_t aAlignment, size_t aSize, size_t aAllocSize)
 {
-	void *ret;
-	size_t offset;
-	arena_chunk_t *chunk;
-
-	MOZ_ASSERT((size & pagesize_mask) == 0);
-	MOZ_ASSERT((alignment & pagesize_mask) == 0);
-
-	malloc_spin_lock(&arena->lock);
-	ret = (void *)arena_run_alloc(arena, nullptr, alloc_size, true, false);
-	if (!ret) {
-		malloc_spin_unlock(&arena->lock);
-		return nullptr;
-	}
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
-
-	offset = (uintptr_t)ret & (alignment - 1);
-	MOZ_ASSERT((offset & pagesize_mask) == 0);
-	MOZ_ASSERT(offset < alloc_size);
-	if (offset == 0)
-		arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, alloc_size, size, false);
-	else {
-		size_t leadsize, trailsize;
-
-		leadsize = alignment - offset;
-		if (leadsize > 0) {
-			arena_run_trim_head(arena, chunk, (arena_run_t*)ret, alloc_size,
-			    alloc_size - leadsize);
-			ret = (void *)((uintptr_t)ret + leadsize);
-		}
-
-		trailsize = alloc_size - leadsize - size;
-		if (trailsize != 0) {
-			/* Trim trailing space. */
-			MOZ_ASSERT(trailsize < alloc_size);
-			arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, size + trailsize,
-			    size, false);
-		}
-	}
-
-	arena->stats.allocated_large += size;
-	malloc_spin_unlock(&arena->lock);
-
-	if (opt_junk)
-		memset(ret, kAllocJunk, size);
-	else if (opt_zero)
-		memset(ret, 0, size);
-	return (ret);
+  void* ret;
+  size_t offset;
+  arena_chunk_t* chunk;
+
+  MOZ_ASSERT((aSize & pagesize_mask) == 0);
+  MOZ_ASSERT((aAlignment & pagesize_mask) == 0);
+
+  malloc_spin_lock(&mLock);
+  ret = AllocRun(nullptr, aAllocSize, true, false);
+  if (!ret) {
+    malloc_spin_unlock(&mLock);
+    return nullptr;
+  }
+
+  chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(ret);
+
+  offset = uintptr_t(ret) & (aAlignment - 1);
+  MOZ_ASSERT((offset & pagesize_mask) == 0);
+  MOZ_ASSERT(offset < aAllocSize);
+  if (offset == 0) {
+    TrimRunTail(chunk, (arena_run_t*)ret, aAllocSize, aSize, false);
+  } else {
+    size_t leadsize, trailsize;
+
+    leadsize = aAlignment - offset;
+    if (leadsize > 0) {
+      TrimRunHead(chunk, (arena_run_t*)ret, aAllocSize, aAllocSize - leadsize);
+      ret = (void*)(uintptr_t(ret) + leadsize);
+    }
+
+    trailsize = aAllocSize - leadsize - aSize;
+    if (trailsize != 0) {
+      /* Trim trailing space. */
+      MOZ_ASSERT(trailsize < aAllocSize);
+      TrimRunTail(chunk, (arena_run_t*)ret, aSize + trailsize, aSize, false);
+    }
+  }
+
+  mStats.allocated_large += aSize;
+  malloc_spin_unlock(&mLock);
+
+  if (opt_junk) {
+    memset(ret, kAllocJunk, aSize);
+  } else if (opt_zero) {
+    memset(ret, 0, aSize);
+  }
+  return ret;
 }
 
 static inline void *
 ipalloc(size_t alignment, size_t size)
 {
 	void *ret;
 	size_t ceil_size;
 
@@ -3410,17 +3441,17 @@ ipalloc(size_t alignment, size_t size)
 	 */
 	if (ceil_size < size) {
 		/* size_t overflow. */
 		return nullptr;
 	}
 
 	if (ceil_size <= pagesize || (alignment <= pagesize
 	    && ceil_size <= arena_maxclass))
-		ret = arena_malloc(choose_arena(size), ceil_size, false);
+		ret = choose_arena(size)->Malloc(ceil_size, false);
 	else {
 		size_t run_size;
 
 		/*
 		 * We can't achieve sub-page alignment, so round up alignment
 		 * permanently; it makes later calculations simpler.
 		 */
 		alignment = PAGE_CEILING(alignment);
@@ -3457,17 +3488,17 @@ ipalloc(size_t alignment, size_t size)
 			 * the first conditional below to fail, which means
 			 * that the bogus run_size value never gets used for
 			 * anything important.
 			 */
 			run_size = (alignment << 1) - pagesize;
 		}
 
 		if (run_size <= arena_maxclass) {
-			ret = arena_palloc(choose_arena(size), alignment, ceil_size,
+			ret = choose_arena(size)->Palloc(alignment, ceil_size,
 			    run_size);
 		} else if (alignment <= chunksize)
 			ret = huge_malloc(ceil_size, false);
 		else
 			ret = huge_palloc(ceil_size, alignment, false);
 	}
 
 	MOZ_ASSERT(((uintptr_t)ret & (alignment - 1)) == 0);
@@ -3522,17 +3553,17 @@ isalloc_validate(const void* ptr)
     return 0;
   }
 
   if (!malloc_rtree_get(chunk_rtree, (uintptr_t)chunk)) {
     return 0;
   }
 
   if (chunk != ptr) {
-    MOZ_DIAGNOSTIC_ASSERT(chunk->arena->magic == ARENA_MAGIC);
+    MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
     return arena_salloc(ptr);
   } else {
     size_t ret;
     extent_node_t* node;
     extent_node_t key;
 
     /* Chunk. */
     key.addr = (void*)chunk;
@@ -3553,17 +3584,17 @@ isalloc(const void *ptr)
 	size_t ret;
 	arena_chunk_t *chunk;
 
 	MOZ_ASSERT(ptr);
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	if (chunk != ptr) {
 		/* Region. */
-		MOZ_ASSERT(chunk->arena->magic == ARENA_MAGIC);
+		MOZ_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
 
 		ret = arena_salloc(ptr);
 	} else {
 		extent_node_t *node, key;
 
 		/* Chunk (huge allocation). */
 
 		malloc_mutex_lock(&huge_mtx);
@@ -3609,17 +3640,17 @@ MozJemalloc::jemalloc_ptr_info(const voi
   }
 
   // It's not a huge allocation. Check if we have a known chunk.
   if (!malloc_rtree_get(chunk_rtree, (uintptr_t)chunk)) {
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
   }
 
-  MOZ_DIAGNOSTIC_ASSERT(chunk->arena->magic == ARENA_MAGIC);
+  MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
 
   // Get the page number within the chunk.
   size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> pagesize_2pow);
   if (pageind < arena_chunk_header_npages) {
     // Within the chunk header.
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
   }
@@ -3704,107 +3735,93 @@ MozJemalloc::jemalloc_ptr_info(const voi
   unsigned elm = regind >> (SIZEOF_INT_2POW + 3);
   unsigned bit = regind - (elm << (SIZEOF_INT_2POW + 3));
   PtrInfoTag tag = ((run->regs_mask[elm] & (1U << bit)))
                  ? TagFreedSmall : TagLiveSmall;
 
   *aInfo = { tag, addr, size};
 }
 
-static inline void
-arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    arena_chunk_map_t *mapelm)
+void
+arena_t::DallocSmall(arena_chunk_t* aChunk, void* aPtr, arena_chunk_map_t* aMapElm)
 {
-	arena_run_t *run;
-	arena_bin_t *bin;
-	size_t size;
-
-	run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
-	MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
-	bin = run->bin;
-	size = bin->reg_size;
-
-	memset(ptr, kAllocPoison, size);
-
-	arena_run_reg_dalloc(run, bin, ptr, size);
-	run->nfree++;
-
-	if (run->nfree == bin->nregs) {
-		/* Deallocate run. */
-		if (run == bin->runcur)
-			bin->runcur = nullptr;
-		else if (bin->nregs != 1) {
-			size_t run_pageind = (((uintptr_t)run -
-			    (uintptr_t)chunk)) >> pagesize_2pow;
-			arena_chunk_map_t *run_mapelm =
-			    &chunk->map[run_pageind];
-			/*
-			 * This block's conditional is necessary because if the
-			 * run only contains one region, then it never gets
-			 * inserted into the non-full runs tree.
-			 */
-			MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
-				run_mapelm);
-			arena_run_tree_remove(&bin->runs, run_mapelm);
-		}
+  arena_run_t* run;
+  arena_bin_t* bin;
+  size_t size;
+
+  run = (arena_run_t*)(aMapElm->bits & ~pagesize_mask);
+  MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
+  bin = run->bin;
+  size = bin->reg_size;
+
+  memset(aPtr, kAllocPoison, size);
+
+  arena_run_reg_dalloc(run, bin, aPtr, size);
+  run->nfree++;
+
+  if (run->nfree == bin->nregs) {
+    /* Deallocate run. */
+    if (run == bin->runcur) {
+      bin->runcur = nullptr;
+    } else if (bin->nregs != 1) {
+      size_t run_pageind = (uintptr_t(run) - uintptr_t(aChunk)) >> pagesize_2pow;
+      arena_chunk_map_t* run_mapelm = &aChunk->map[run_pageind];
+      /*
+       * This block's conditional is necessary because if the
+       * run only contains one region, then it never gets
+       * inserted into the non-full runs tree.
+       */
+      MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) == run_mapelm);
+      arena_run_tree_remove(&bin->runs, run_mapelm);
+    }
 #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
-		run->magic = 0;
+    run->magic = 0;
 #endif
-		arena_run_dalloc(arena, run, true);
-		bin->stats.curruns--;
-	} else if (run->nfree == 1 && run != bin->runcur) {
-		/*
-		 * Make sure that bin->runcur always refers to the lowest
-		 * non-full run, if one exists.
-		 */
-		if (!bin->runcur)
-			bin->runcur = run;
-		else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
-			/* Switch runcur. */
-			if (bin->runcur->nfree > 0) {
-				arena_chunk_t *runcur_chunk =
-				    (arena_chunk_t*)CHUNK_ADDR2BASE(bin->runcur);
-				size_t runcur_pageind =
-				    (((uintptr_t)bin->runcur -
-				    (uintptr_t)runcur_chunk)) >> pagesize_2pow;
-				arena_chunk_map_t *runcur_mapelm =
-				    &runcur_chunk->map[runcur_pageind];
-
-				/* Insert runcur. */
-				MOZ_DIAGNOSTIC_ASSERT(!arena_run_tree_search(&bin->runs,
-				    runcur_mapelm));
-				arena_run_tree_insert(&bin->runs,
-				    runcur_mapelm);
-			}
-			bin->runcur = run;
-		} else {
-			size_t run_pageind = (((uintptr_t)run -
-			    (uintptr_t)chunk)) >> pagesize_2pow;
-			arena_chunk_map_t *run_mapelm =
-			    &chunk->map[run_pageind];
-
-			MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
-			    nullptr);
-			arena_run_tree_insert(&bin->runs, run_mapelm);
-		}
-	}
-	arena->stats.allocated_small -= size;
+    DallocRun(run, true);
+    bin->stats.curruns--;
+  } else if (run->nfree == 1 && run != bin->runcur) {
+    /*
+     * Make sure that bin->runcur always refers to the lowest
+     * non-full run, if one exists.
+     */
+    if (!bin->runcur) {
+      bin->runcur = run;
+    } else if (uintptr_t(run) < uintptr_t(bin->runcur)) {
+      /* Switch runcur. */
+      if (bin->runcur->nfree > 0) {
+        arena_chunk_t* runcur_chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(bin->runcur);
+        size_t runcur_pageind = (uintptr_t(bin->runcur) - uintptr_t(runcur_chunk)) >> pagesize_2pow;
+        arena_chunk_map_t* runcur_mapelm = &runcur_chunk->map[runcur_pageind];
+
+        /* Insert runcur. */
+        MOZ_DIAGNOSTIC_ASSERT(!arena_run_tree_search(&bin->runs, runcur_mapelm));
+        arena_run_tree_insert(&bin->runs, runcur_mapelm);
+      }
+      bin->runcur = run;
+    } else {
+      size_t run_pageind = (uintptr_t(run) - uintptr_t(aChunk)) >> pagesize_2pow;
+      arena_chunk_map_t *run_mapelm = &aChunk->map[run_pageind];
+
+      MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) == nullptr);
+      arena_run_tree_insert(&bin->runs, run_mapelm);
+    }
+  }
+  mStats.allocated_small -= size;
 }
 
-static void
-arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+void
+arena_t::DallocLarge(arena_chunk_t* aChunk, void* aPtr)
 {
-	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
-	    pagesize_2pow;
-	size_t size = chunk->map[pageind].bits & ~pagesize_mask;
-
-	memset(ptr, kAllocPoison, size);
-	arena->stats.allocated_large -= size;
-
-	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
+  size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
+  size_t size = aChunk->map[pageind].bits & ~pagesize_mask;
+
+  memset(aPtr, kAllocPoison, size);
+  mStats.allocated_large -= size;
+
+  DallocRun((arena_run_t*)aPtr, true);
 }
 
 static inline void
 arena_dalloc(void *ptr, size_t offset)
 {
 	arena_chunk_t *chunk;
 	arena_t *arena;
 	size_t pageind;
@@ -3812,100 +3829,98 @@ arena_dalloc(void *ptr, size_t offset)
 
 	MOZ_ASSERT(ptr);
 	MOZ_ASSERT(offset != 0);
 	MOZ_ASSERT(CHUNK_ADDR2OFFSET(ptr) == offset);
 
 	chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
 	arena = chunk->arena;
 	MOZ_ASSERT(arena);
-	MOZ_DIAGNOSTIC_ASSERT(arena->magic == ARENA_MAGIC);
-
-	malloc_spin_lock(&arena->lock);
+	MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
+
+	malloc_spin_lock(&arena->mLock);
 	pageind = offset >> pagesize_2pow;
 	mapelm = &chunk->map[pageind];
 	MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
 	if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
 		/* Small allocation. */
-		arena_dalloc_small(arena, chunk, ptr, mapelm);
+		arena->DallocSmall(chunk, ptr, mapelm);
 	} else {
 		/* Large allocation. */
-		arena_dalloc_large(arena, chunk, ptr);
+		arena->DallocLarge(chunk, ptr);
 	}
-	malloc_spin_unlock(&arena->lock);
+	malloc_spin_unlock(&arena->mLock);
 }
 
 static inline void
 idalloc(void *ptr)
 {
 	size_t offset;
 
 	MOZ_ASSERT(ptr);
 
 	offset = CHUNK_ADDR2OFFSET(ptr);
 	if (offset != 0)
 		arena_dalloc(ptr, offset);
 	else
 		huge_dalloc(ptr);
 }
 
-static void
-arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t size, size_t oldsize)
+void
+arena_t::RallocShrinkLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
+                           size_t aOldSize)
 {
-
-	MOZ_ASSERT(size < oldsize);
-
-	/*
-	 * Shrink the run, and make trailing pages available for other
-	 * allocations.
-	 */
-	malloc_spin_lock(&arena->lock);
-	arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
-	    true);
-	arena->stats.allocated_large -= oldsize - size;
-	malloc_spin_unlock(&arena->lock);
+  MOZ_ASSERT(aSize < aOldSize);
+
+  /*
+   * Shrink the run, and make trailing pages available for other
+   * allocations.
+   */
+  malloc_spin_lock(&mLock);
+  TrimRunTail(aChunk, (arena_run_t*)aPtr, aOldSize, aSize, true);
+  mStats.allocated_large -= aOldSize - aSize;
+  malloc_spin_unlock(&mLock);
 }
 
-static bool
-arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t size, size_t oldsize)
+bool
+arena_t::RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
+                         size_t aOldSize)
 {
-	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow;
-	size_t npages = oldsize >> pagesize_2pow;
-
-	malloc_spin_lock(&arena->lock);
-	MOZ_DIAGNOSTIC_ASSERT(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
-
-	/* Try to extend the run. */
-	MOZ_ASSERT(size > oldsize);
-	if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits
-	    & CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits &
-	    ~pagesize_mask) >= size - oldsize) {
-		/*
-		 * The next run is available and sufficiently large.  Split the
-		 * following run, then merge the first part with the existing
-		 * allocation.
-		 */
-		arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
-		    ((pageind+npages) << pagesize_2pow)), size - oldsize, true,
-		    false);
-
-		chunk->map[pageind].bits = size | CHUNK_MAP_LARGE |
-		    CHUNK_MAP_ALLOCATED;
-		chunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
-		    CHUNK_MAP_ALLOCATED;
-
-		arena->stats.allocated_large += size - oldsize;
-		malloc_spin_unlock(&arena->lock);
-		return (false);
-	}
-	malloc_spin_unlock(&arena->lock);
-
-	return (true);
+  size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
+  size_t npages = aOldSize >> pagesize_2pow;
+
+  malloc_spin_lock(&mLock);
+  MOZ_DIAGNOSTIC_ASSERT(aOldSize == (aChunk->map[pageind].bits & ~pagesize_mask));
+
+  /* Try to extend the run. */
+  MOZ_ASSERT(aSize > aOldSize);
+  if (pageind + npages < chunk_npages && (aChunk->map[pageind+npages].bits
+      & CHUNK_MAP_ALLOCATED) == 0 && (aChunk->map[pageind+npages].bits &
+      ~pagesize_mask) >= aSize - aOldSize) {
+    /*
+     * The next run is available and sufficiently large.  Split the
+     * following run, then merge the first part with the existing
+     * allocation.
+     */
+    SplitRun((arena_run_t *)(uintptr_t(aChunk) +
+        ((pageind+npages) << pagesize_2pow)), aSize - aOldSize, true,
+        false);
+
+    aChunk->map[pageind].bits = aSize | CHUNK_MAP_LARGE |
+        CHUNK_MAP_ALLOCATED;
+    aChunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
+        CHUNK_MAP_ALLOCATED;
+
+    mStats.allocated_large += aSize - aOldSize;
+    malloc_spin_unlock(&mLock);
+    return false;
+  }
+  malloc_spin_unlock(&mLock);
+
+  return true;
 }
 
 /*
  * Try to resize a large allocation, in order to avoid copying.  This will
  * always fail if growing an object, and the following run is already in use.
  */
 static bool
 arena_ralloc_large(void *ptr, size_t size, size_t oldsize)
@@ -3921,28 +3936,26 @@ arena_ralloc_large(void *ptr, size_t siz
 		}
 		return (false);
 	} else {
 		arena_chunk_t *chunk;
 		arena_t *arena;
 
 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 		arena = chunk->arena;
-		MOZ_DIAGNOSTIC_ASSERT(arena->magic == ARENA_MAGIC);
+		MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
 
 		if (psize < oldsize) {
 			/* Fill before shrinking in order avoid a race. */
 			memset((void *)((uintptr_t)ptr + size), kAllocPoison,
 			    oldsize - size);
-			arena_ralloc_large_shrink(arena, chunk, ptr, psize,
-			    oldsize);
+			arena->RallocShrinkLarge(chunk, ptr, psize, oldsize);
 			return (false);
 		} else {
-			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
-			    psize, oldsize);
+			bool ret = arena->RallocGrowLarge(chunk, ptr, psize, oldsize);
 			if (ret == false && opt_zero) {
 				memset((void *)((uintptr_t)ptr + oldsize), 0,
 				    size - oldsize);
 			}
 			return (ret);
 		}
 	}
 }
@@ -3974,21 +3987,21 @@ arena_ralloc(void *ptr, size_t size, siz
 			return (ptr);
 	}
 
 	/*
 	 * If we get here, then size and oldsize are different enough that we
 	 * need to move the object.  In that case, fall back to allocating new
 	 * space and copying.
 	 */
-	ret = arena_malloc(choose_arena(size), size, false);
+	ret = choose_arena(size)->Malloc(size, false);
 	if (!ret)
 		return nullptr;
 
-	/* Junk/zero-filling were already done by arena_malloc(). */
+	/* Junk/zero-filling were already done by arena_t::Malloc(). */
 	copysize = (size < oldsize) ? size : oldsize;
 #ifdef VM_COPY_MIN
 	if (copysize >= VM_COPY_MIN)
 		pages_copy(ret, ptr, copysize);
 	else
 #endif
 		memcpy(ret, ptr, copysize);
 	idalloc(ptr);
@@ -4012,89 +4025,89 @@ iralloc(void *ptr, size_t size)
 	oldsize = isalloc(ptr);
 
 	if (size <= arena_maxclass)
 		return (arena_ralloc(ptr, size, oldsize));
 	else
 		return (huge_ralloc(ptr, size, oldsize));
 }
 
-static bool
-arena_new(arena_t *arena)
+bool
+arena_t::Init()
 {
-	unsigned i;
-	arena_bin_t *bin;
-	size_t prev_run_size;
-
-	if (malloc_spin_init(&arena->lock))
-		return (true);
-
-	memset(&arena->stats, 0, sizeof(arena_stats_t));
-
-	/* Initialize chunks. */
-	arena_chunk_tree_dirty_new(&arena->chunks_dirty);
+  unsigned i;
+  arena_bin_t* bin;
+  size_t prev_run_size;
+
+  if (malloc_spin_init(&mLock))
+    return true;
+
+  memset(&mStats, 0, sizeof(arena_stats_t));
+
+  /* Initialize chunks. */
+  arena_chunk_tree_dirty_new(&mChunksDirty);
 #ifdef MALLOC_DOUBLE_PURGE
-	new (&arena->chunks_madvised) mozilla::DoublyLinkedList<arena_chunk_t>();
+  new (&mChunksMAdvised) mozilla::DoublyLinkedList<arena_chunk_t>();
 #endif
-	arena->spare = nullptr;
-
-	arena->ndirty = 0;
-	// Reduce the maximum amount of dirty pages we allow to be kept on
-	// thread local arenas. TODO: make this more flexible.
-	arena->dirty_max = opt_dirty_max >> 3;
-
-	arena_avail_tree_new(&arena->runs_avail);
-
-	/* Initialize bins. */
-	prev_run_size = pagesize;
-
-	/* (2^n)-spaced tiny bins. */
-	for (i = 0; i < ntbins; i++) {
-		bin = &arena->bins[i];
-		bin->runcur = nullptr;
-		arena_run_tree_new(&bin->runs);
-
-		bin->reg_size = (1ULL << (TINY_MIN_2POW + i));
-
-		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-	}
-
-	/* Quantum-spaced bins. */
-	for (; i < ntbins + nqbins; i++) {
-		bin = &arena->bins[i];
-		bin->runcur = nullptr;
-		arena_run_tree_new(&bin->runs);
-
-		bin->reg_size = quantum * (i - ntbins + 1);
-
-		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-	}
-
-	/* (2^n)-spaced sub-page bins. */
-	for (; i < ntbins + nqbins + nsbins; i++) {
-		bin = &arena->bins[i];
-		bin->runcur = nullptr;
-		arena_run_tree_new(&bin->runs);
-
-		bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
-
-		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-	}
+  mSpare = nullptr;
+
+  mNumDirty = 0;
+  // Reduce the maximum amount of dirty pages we allow to be kept on
+  // thread local arenas. TODO: make this more flexible.
+  mMaxDirty = opt_dirty_max >> 3;
+
+  arena_avail_tree_new(&mRunsAvail);
+
+  /* Initialize bins. */
+  prev_run_size = pagesize;
+
+  /* (2^n)-spaced tiny bins. */
+  for (i = 0; i < ntbins; i++) {
+    bin = &mBins[i];
+    bin->runcur = nullptr;
+    arena_run_tree_new(&bin->runs);
+
+    bin->reg_size = (1ULL << (TINY_MIN_2POW + i));
+
+    prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+    memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+  }
+
+  /* Quantum-spaced bins. */
+  for (; i < ntbins + nqbins; i++) {
+    bin = &mBins[i];
+    bin->runcur = nullptr;
+    arena_run_tree_new(&bin->runs);
+
+    bin->reg_size = quantum * (i - ntbins + 1);
+
+    prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+    memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+  }
+
+  /* (2^n)-spaced sub-page bins. */
+  for (; i < ntbins + nqbins + nsbins; i++) {
+    bin = &mBins[i];
+    bin->runcur = nullptr;
+    arena_run_tree_new(&bin->runs);
+
+    bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
+
+    prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+    memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+  }
 
 #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
-	arena->magic = ARENA_MAGIC;
+  mMagic = ARENA_MAGIC;
 #endif
 
-	return (false);
+  return false;
 }
 
 static inline arena_t *
 arenas_fallback()
 {
 	/* Only reached if there is an OOM error. */
 
 	/*
@@ -4120,17 +4133,17 @@ arenas_extend()
 	 */
 	const size_t arenas_growth = 16;
 	arena_t *ret;
 
 
 	/* Allocate enough space for trailing bins. */
 	ret = (arena_t *)base_alloc(sizeof(arena_t)
 	    + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
-	if (!ret || arena_new(ret)) {
+	if (!ret || ret->Init()) {
 		return arenas_fallback();
         }
 
 	malloc_spin_lock(&arenas_lock);
 
 	/* Allocate and initialize arenas. */
 	if (narenas % arenas_growth == 0) {
 		size_t max_arenas = ((narenas + arenas_growth) / arenas_growth) * arenas_growth;
@@ -4625,19 +4638,19 @@ MALLOC_OUT:
    */
   arenas_extend();
   if (!arenas || !arenas[0]) {
 #ifndef XP_WIN
     malloc_mutex_unlock(&init_lock);
 #endif
     return true;
   }
-  /* arena_new() sets this to a lower value for thread local arenas;
+  /* arena_t::Init() sets this to a lower value for thread local arenas;
    * reset to the default value for the main arenas */
-  arenas[0]->dirty_max = opt_dirty_max;
+  arenas[0]->mMaxDirty = opt_dirty_max;
 
 #ifndef NO_TLS
   /*
    * Assign the initial arena to the initial thread.
    */
   thread_arena.set(arenas[0]);
 #endif
 
@@ -4866,23 +4879,23 @@ MozJemalloc::free(void* aPtr)
  */
 
 /* This was added by Mozilla for use by SQLite. */
 template<> inline size_t
 MozJemalloc::malloc_good_size(size_t aSize)
 {
   /*
    * This duplicates the logic in imalloc(), arena_malloc() and
-   * arena_malloc_small().
+   * arena_t::MallocSmall().
    */
   if (aSize < small_min) {
     /* Small (tiny). */
     aSize = pow2_ceil(aSize);
     /*
-     * We omit the #ifdefs from arena_malloc_small() --
+     * We omit the #ifdefs from arena_t::MallocSmall() --
      * it can be inaccurate with its size in some cases, but this
      * function must be accurate.
      */
     if (aSize < (1U << TINY_MIN_2POW))
       aSize = (1U << TINY_MIN_2POW);
   } else if (aSize <= small_max) {
     /* Small (quantum-spaced). */
     aSize = QUANTUM_CEILING(aSize);
@@ -4968,46 +4981,46 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
 
     if (!arena) {
       continue;
     }
 
     arena_headers = 0;
     arena_unused = 0;
 
-    malloc_spin_lock(&arena->lock);
-
-    arena_mapped = arena->stats.mapped;
+    malloc_spin_lock(&arena->mLock);
+
+    arena_mapped = arena->mStats.mapped;
 
     /* "committed" counts dirty and allocated memory. */
-    arena_committed = arena->stats.committed << pagesize_2pow;
-
-    arena_allocated = arena->stats.allocated_small +
-                      arena->stats.allocated_large;
-
-    arena_dirty = arena->ndirty << pagesize_2pow;
+    arena_committed = arena->mStats.committed << pagesize_2pow;
+
+    arena_allocated = arena->mStats.allocated_small +
+                      arena->mStats.allocated_large;
+
+    arena_dirty = arena->mNumDirty << pagesize_2pow;
 
     for (j = 0; j < ntbins + nqbins + nsbins; j++) {
-      arena_bin_t* bin = &arena->bins[j];
+      arena_bin_t* bin = &arena->mBins[j];
       size_t bin_unused = 0;
 
       rb_foreach_begin(arena_chunk_map_t, link, &bin->runs, mapelm) {
         run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
         bin_unused += run->nfree * bin->reg_size;
       } rb_foreach_end(arena_chunk_map_t, link, &bin->runs, mapelm)
 
       if (bin->runcur) {
         bin_unused += bin->runcur->nfree * bin->reg_size;
       }
 
       arena_unused += bin_unused;
       arena_headers += bin->stats.curruns * bin->reg0_offset;
     }
 
-    malloc_spin_unlock(&arena->lock);
+    malloc_spin_unlock(&arena->mLock);
 
     MOZ_ASSERT(arena_mapped >= arena_committed);
     MOZ_ASSERT(arena_committed >= arena_allocated + arena_dirty);
 
     /* "waste" is committed memory that is neither dirty nor
      * allocated. */
     aStats->mapped += arena_mapped;
     aStats->allocated += arena_allocated;
@@ -5033,17 +5046,17 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
 }
 
 #ifdef MALLOC_DOUBLE_PURGE
 
 /* Explicitly remove all of this chunk's MADV_FREE'd pages from memory. */
 static void
 hard_purge_chunk(arena_chunk_t *chunk)
 {
-	/* See similar logic in arena_purge(). */
+	/* See similar logic in arena_t::Purge(). */
 
 	size_t i;
 	for (i = arena_chunk_header_npages; i < chunk_npages; i++) {
 		/* Find all adjacent pages with CHUNK_MAP_MADVISED set. */
 		size_t npages;
 		for (npages = 0;
 		     chunk->map[i + npages].bits & CHUNK_MAP_MADVISED && i + npages < chunk_npages;
 		     npages++) {
@@ -5059,38 +5072,38 @@ hard_purge_chunk(arena_chunk_t *chunk)
 			pages_decommit(((char*)chunk) + (i << pagesize_2pow), npages << pagesize_2pow);
 			pages_commit(((char*)chunk) + (i << pagesize_2pow), npages << pagesize_2pow);
 		}
 		i += npages;
 	}
 }
 
 /* Explicitly remove all of this arena's MADV_FREE'd pages from memory. */
-static void
-hard_purge_arena(arena_t *arena)
+void
+arena_t::HardPurge()
 {
-	malloc_spin_lock(&arena->lock);
-
-	while (!arena->chunks_madvised.isEmpty()) {
-		arena_chunk_t *chunk = arena->chunks_madvised.popFront();
-		hard_purge_chunk(chunk);
-	}
-
-	malloc_spin_unlock(&arena->lock);
+  malloc_spin_lock(&mLock);
+
+  while (!mChunksMAdvised.isEmpty()) {
+    arena_chunk_t* chunk = mChunksMAdvised.popFront();
+    hard_purge_chunk(chunk);
+  }
+
+  malloc_spin_unlock(&mLock);
 }
 
 template<> inline void
 MozJemalloc::jemalloc_purge_freed_pages()
 {
   size_t i;
   malloc_spin_lock(&arenas_lock);
   for (i = 0; i < narenas; i++) {
     arena_t* arena = arenas[i];
     if (arena) {
-      hard_purge_arena(arena);
+      arena->HardPurge();
     }
   }
   malloc_spin_unlock(&arenas_lock);
 }
 
 #else /* !defined MALLOC_DOUBLE_PURGE */
 
 template<> inline void
@@ -5106,19 +5119,19 @@ template<> inline void
 MozJemalloc::jemalloc_free_dirty_pages(void)
 {
   size_t i;
   malloc_spin_lock(&arenas_lock);
   for (i = 0; i < narenas; i++) {
     arena_t* arena = arenas[i];
 
     if (arena) {
-      malloc_spin_lock(&arena->lock);
-      arena_purge(arena, true);
-      malloc_spin_unlock(&arena->lock);
+      malloc_spin_lock(&arena->mLock);
+      arena->Purge(true);
+      malloc_spin_unlock(&arena->mLock);
     }
   }
   malloc_spin_unlock(&arenas_lock);
 }
 
 /*
  * End non-standard functions.
  */
@@ -5138,17 +5151,17 @@ void
 {
 	unsigned i;
 
 	/* Acquire all mutexes in a safe order. */
 
 	malloc_spin_lock(&arenas_lock);
 	for (i = 0; i < narenas; i++) {
 		if (arenas[i])
-			malloc_spin_lock(&arenas[i]->lock);
+			malloc_spin_lock(&arenas[i]->mLock);
 	}
 
 	malloc_mutex_lock(&base_mtx);
 
 	malloc_mutex_lock(&huge_mtx);
 }
 
 #ifndef XP_DARWIN
@@ -5162,17 +5175,17 @@ void
 	/* Release all mutexes, now that fork() has completed. */
 
 	malloc_mutex_unlock(&huge_mtx);
 
 	malloc_mutex_unlock(&base_mtx);
 
 	for (i = 0; i < narenas; i++) {
 		if (arenas[i])
-			malloc_spin_unlock(&arenas[i]->lock);
+			malloc_spin_unlock(&arenas[i]->mLock);
 	}
 	malloc_spin_unlock(&arenas_lock);
 }
 
 #ifndef XP_DARWIN
 static
 #endif
 void
@@ -5183,17 +5196,17 @@ void
 	/* Reinitialize all mutexes, now that fork() has completed. */
 
 	malloc_mutex_init(&huge_mtx);
 
 	malloc_mutex_init(&base_mtx);
 
 	for (i = 0; i < narenas; i++) {
 		if (arenas[i])
-			malloc_spin_init(&arenas[i]->lock);
+			malloc_spin_init(&arenas[i]->mLock);
 	}
 	malloc_spin_init(&arenas_lock);
 }
 
 /*
  * End library-private functions.
  */
 /******************************************************************************/
--- a/mobile/android/app/mobile.js
+++ b/mobile/android/app/mobile.js
@@ -230,16 +230,17 @@ pref("extensions.getAddons.getWithPerfor
 /* preference for the locale picker */
 pref("extensions.getLocales.get.url", "");
 pref("extensions.compatability.locales.buildid", "0");
 
 /* Don't let XPIProvider install distribution add-ons; we do our own thing on mobile. */
 pref("extensions.installDistroAddons", false);
 
 pref("extensions.webextPermissionPrompts", true);
+pref("extensions.webextOptionalPermissionPrompts", true);
 
 // Add-on content security policies.
 pref("extensions.webextensions.base-content-security-policy", "script-src 'self' https://* moz-extension: blob: filesystem: 'unsafe-eval' 'unsafe-inline'; object-src 'self' https://* moz-extension: blob: filesystem:;");
 pref("extensions.webextensions.default-content-security-policy", "script-src 'self'; object-src 'self';");
 
 pref("extensions.legacy.enabled", false);
 
 /* block popups by default, and notify the user about blocked popups */
--- a/mobile/android/chrome/content/ExtensionPermissions.js
+++ b/mobile/android/chrome/content/ExtensionPermissions.js
@@ -5,17 +5,17 @@ XPCOMUtils.defineLazyModuleGetter(this, 
 
 var ExtensionPermissions = {
   // id -> object containing update details (see applyUpdate() )
   updates: new Map(),
 
   // Prepare the strings needed for a permission notification.
   _prepareStrings(info) {
     let appName = Strings.brand.GetStringFromName("brandShortName");
-    let info2 = Object.assign({appName, addonName: info.addon.name}, info);
+    let info2 = Object.assign({appName}, info);
     let strings = ExtensionData.formatPermissionStrings(info2, Strings.browser);
 
     // We dump the main body of the dialog into a big android
     // TextView.  Build a big string with the full contents here.
     let message = "";
     if (strings.msgs.length > 0) {
       message = [strings.listIntro, ...strings.msgs.map(s => `\u2022 ${s}`)].join("\n");
     }
@@ -38,18 +38,18 @@ var ExtensionPermissions = {
     // If we can't render an icon, show the default
     return "DEFAULT";
   },
 
   async observe(subject, topic, data) {
     switch (topic) {
       case "webextension-permission-prompt": {
         let {target, info} = subject.wrappedJSObject;
-
-        let details = this._prepareStrings(info);
+        let stringInfo = Object.assign({addonName: info.addon.name}, info);
+        let details = this._prepareStrings(stringInfo);
         details.icon = this._prepareIcon(info.icon);
         details.type = "Extension:PermissionPrompt";
         let accepted = await EventDispatcher.instance.sendRequestForResult(details);
 
         if (accepted) {
           info.resolve();
         } else {
           info.reject();
@@ -57,16 +57,17 @@ var ExtensionPermissions = {
         break;
       }
 
       case "webextension-update-permissions":
         let info = subject.wrappedJSObject;
         let {addon, resolve, reject} = info;
         let stringInfo = Object.assign({
           type: "update",
+          addonName: addon.name,
         }, info);
 
         let details = this._prepareStrings(stringInfo);
 
         // If there are no promptable permissions, just apply the update
         if (details.message.length == 0) {
           resolve();
           return;
@@ -82,20 +83,40 @@ var ExtensionPermissions = {
         if (first) {
           EventDispatcher.instance.sendRequest({
             type: "Extension:ShowUpdateIcon",
             value: true,
           });
         }
         break;
 
-      case "webextension-optional-permission-prompt":
-        // To be implemented in bug 1392176, just auto-approve until then
-        subject.wrappedJSObject.resolve(true);
-        break;
+      case "webextension-optional-permission-prompt": {
+        let info = subject.wrappedJSObject;
+        let {name, resolve} = info;
+        let stringInfo = Object.assign({
+          type: "optional",
+          addonName: name,
+        }, info);
+
+        let details = this._prepareStrings(stringInfo);
+
+        // If there are no promptable permissions, just apply the update
+        if (details.message.length == 0) {
+          resolve(true);
+          return;
+        }
+
+        // Store all the details about the update until the user chooses to
+        // look at update, at which point we will pick up in this.applyUpdate()
+        details.icon = this._prepareIcon(info.icon || "dummy.svg");
+
+        details.type = "Extension:PermissionPrompt";
+        let accepted = await EventDispatcher.instance.sendRequestForResult(details);
+        resolve(accepted);
+      }
     }
   },
 
   async applyUpdate(id) {
     if (!this.updates.has(id)) {
       return;
     }
 
--- a/mobile/android/components/extensions/ext-tabs.js
+++ b/mobile/android/components/extensions/ext-tabs.js
@@ -1,18 +1,16 @@
 /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */
 /* vim: set sts=2 sw=2 et tw=80: */
 "use strict";
 
 XPCOMUtils.defineLazyServiceGetter(this, "aboutNewTabService",
                                    "@mozilla.org/browser/aboutnewtab-service;1",
                                    "nsIAboutNewTabService");
 
-XPCOMUtils.defineLazyModuleGetter(this, "MatchPattern",
-                                  "resource://gre/modules/MatchPattern.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "PrivateBrowsingUtils",
                                   "resource://gre/modules/PrivateBrowsingUtils.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "PromiseUtils",
                                   "resource://gre/modules/PromiseUtils.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "Services",
                                   "resource://gre/modules/Services.jsm");
 
 const getBrowserWindow = window => {
--- a/mobile/android/locales/en-US/chrome/browser.properties
+++ b/mobile/android/locales/en-US/chrome/browser.properties
@@ -125,16 +125,24 @@ webextPerms.add.label=Add
 webextPerms.cancel.label=Cancel
 
 # LOCALIZATION NOTE (webextPerms.updateText)
 # %S is replaced with the localized name of the updated extension.
 webextPerms.updateText=%S has been updated. You must approve new permissions before the updated version will install. Choosing “Cancel” will maintain your current add-on version.
 
 webextPerms.updateAccept.label=Update
 
+# LOCALIZATION NOTE (webextPerms.optionalPermsHeader)
+# %S is replaced with the localized name of the extension requesting new
+# permissions.
+webextPerms.optionalPermsHeader=%S requests additional permissions.
+webextPerms.optionalPermsListIntro=It wants to:
+webextPerms.optionalPermsAllow.label=Allow
+webextPerms.optionalPermsDeny.label=Deny
+
 webextPerms.description.bookmarks=Read and modify bookmarks
 webextPerms.description.browserSettings=Read and modify browser settings
 webextPerms.description.browsingData=Clear recent browsing history, cookies, and related data
 webextPerms.description.clipboardRead=Get data from the clipboard
 webextPerms.description.clipboardWrite=Input data to the clipboard
 webextPerms.description.devtools=Extend developer tools to access your data in open tabs
 webextPerms.description.downloads=Download files and read and modify the browser’s download history
 webextPerms.description.downloads.open=Open files downloaded to your computer
--- a/mobile/android/moz.build
+++ b/mobile/android/moz.build
@@ -1,16 +1,17 @@
 # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 with Files('**'):
     BUG_COMPONENT = ('Firefox for Android', 'Build Config & IDE Support')
+    SCHEDULES.exclusive = ['android']
 
 with Files('bouncer/**'):
     BUG_COMPONENT = ('Firefox for Android', 'Distributions')
 
 with Files('branding/**'):
     BUG_COMPONENT = ('Firefox for Android', 'General')
 
 with Files('build/**'):
--- a/moz.build
+++ b/moz.build
@@ -31,16 +31,17 @@ with Files('mach'):
 with Files('*moz*'):
     BUG_COMPONENT = ('Core', 'Build Config')
 
 with Files('GNUmakefile'):
     BUG_COMPONENT = ('Core', 'Build Config')
 
 with Files('*gradle*'):
     BUG_COMPONENT = ('Firefox for Android', 'Build Config & IDE Support')
+    SCHEDULES.exclusive = ['android']
 
 with Files('**/l10n.toml'):
     BUG_COMPONENT = ('Core', 'Localization')
     FINAL = True
 
 with Files('README.txt'):
     BUG_COMPONENT = ('Core', 'General')
 
--- a/netwerk/protocol/http/moz.build
+++ b/netwerk/protocol/http/moz.build
@@ -118,16 +118,17 @@ EXTRA_JS_MODULES += [
 include('/ipc/chromium/chromium-config.mozbuild')
 
 FINAL_LIBRARY = 'xul'
 
 LOCAL_INCLUDES += [
     '/dom/base',
     '/netwerk/base',
     '/netwerk/cookie',
+    '/security/pkix/include',
 ]
 
 EXTRA_COMPONENTS += [
     'UAOverridesBootstrapper.js',
     'UAOverridesBootstrapper.manifest',
     'WellKnownOpportunisticUtils.js',
     'WellKnownOpportunisticUtils.manifest',
 ]
--- a/netwerk/protocol/http/nsHttpHandler.cpp
+++ b/netwerk/protocol/http/nsHttpHandler.cpp
@@ -62,16 +62,18 @@
 #include "mozilla/net/NeckoParent.h"
 #include "mozilla/ipc/URIUtils.h"
 #include "mozilla/Telemetry.h"
 #include "mozilla/Unused.h"
 #include "mozilla/BasePrincipal.h"
 
 #include "mozilla/dom/ContentParent.h"
 
+#include "nsNSSComponent.h"
+
 #if defined(XP_UNIX)
 #include <sys/utsname.h>
 #endif
 
 #if defined(XP_WIN)
 #include <windows.h>
 #endif
 
@@ -237,16 +239,17 @@ nsHttpHandler::nsHttpHandler()
     , mSpdySendBufferSize(ASpdySession::kTCPSendBufferSize)
     , mSpdyPushAllowance(32768)
     , mSpdyPullAllowance(ASpdySession::kInitialRwin)
     , mDefaultSpdyConcurrent(ASpdySession::kDefaultMaxConcurrent)
     , mSpdyPingThreshold(PR_SecondsToInterval(58))
     , mSpdyPingTimeout(PR_SecondsToInterval(8))
     , mConnectTimeout(90000)
     , mParallelSpeculativeConnectLimit(6)
+    , mSpeculativeConnectEnabled(true)
     , mRequestTokenBucketEnabled(true)
     , mRequestTokenBucketMinParallelism(6)
     , mRequestTokenBucketHz(100)
     , mRequestTokenBucketBurst(32)
     , mCriticalRequestPrioritization(true)
     , mTCPKeepaliveShortLivedEnabled(false)
     , mTCPKeepaliveShortLivedTimeS(60)
     , mTCPKeepaliveShortLivedIdleTimeS(10)
@@ -521,16 +524,18 @@ nsHttpHandler::Init()
         obsService->AddObserver(this, "net:clear-active-logins", true);
         obsService->AddObserver(this, "net:prune-dead-connections", true);
         // Sent by the TorButton add-on in the Tor Browser
         obsService->AddObserver(this, "net:prune-all-connections", true);
         obsService->AddObserver(this, "last-pb-context-exited", true);
         obsService->AddObserver(this, "browser:purge-session-history", true);
         obsService->AddObserver(this, NS_NETWORK_LINK_TOPIC, true);
         obsService->AddObserver(this, "application-background", true);
+        obsService->AddObserver(this, "psm:user-certificate-added", true);
+        obsService->AddObserver(this, "psm:user-certificate-deleted", true);
 
         if (!IsNeckoChild()) {
             obsService->AddObserver(this,
                                     "net:current-toplevel-outer-content-windowid",
                                     true);
         }
 
         if (mFastOpenSupported) {
@@ -2274,16 +2279,18 @@ nsHttpHandler::GetMisc(nsACString &value
     value = mMisc;
     return NS_OK;
 }
 
 //-----------------------------------------------------------------------------
 // nsHttpHandler::nsIObserver
 //-----------------------------------------------------------------------------
 
+static bool CanEnableSpeculativeConnect(); // forward declaration
+
 NS_IMETHODIMP
 nsHttpHandler::Observe(nsISupports *subject,
                        const char *topic,
                        const char16_t *data)
 {
     MOZ_ASSERT(NS_IsMainThread());
     LOG(("nsHttpHandler::Observe [topic=\"%s\"]\n", topic));
 
@@ -2419,23 +2426,60 @@ nsHttpHandler::Observe(nsISupports *subj
                     sCurrentTopLevelOuterContentWindowId);
             }
         }
     } else if (!strcmp(topic, "captive-portal-login") ||
                !strcmp(topic, "captive-portal-login-success")) {
          // We have detected a captive portal and we will reset the Fast Open
          // failure counter.
          ResetFastOpenConsecutiveFailureCounter();
+    } else if (!strcmp(topic, "psm:user-certificate-added")) {
+        // A user certificate has just been added.
+        // We should immediately disable speculative connect
+        mSpeculativeConnectEnabled = false;
+    } else if (!strcmp(topic, "psm:user-certificate-deleted")) {
+        // If a user certificate has been removed, we need to check if there
+        // are others installed
+        mSpeculativeConnectEnabled = CanEnableSpeculativeConnect();
     }
 
     return NS_OK;
 }
 
 // nsISpeculativeConnect
 
+static bool
+CanEnableSpeculativeConnect()
+{
+  MOZ_ASSERT(NS_IsMainThread(), "Main thread only");
+
+  nsCOMPtr<nsINSSComponent> component(do_GetService(PSM_COMPONENT_CONTRACTID));
+  if (!component) {
+    return false;
+  }
+
+  // Check if any 3rd party PKCS#11 module are installed, as they may produce
+  // client certificates
+  bool activeSmartCards = false;
+  nsresult rv = component->HasActiveSmartCards(activeSmartCards);
+  if (NS_FAILED(rv) || activeSmartCards) {
+    return false;
+  }
+
+  // If there are any client certificates installed, we can't enable speculative
+  // connect, as it may pop up the certificate chooser at any time.
+  bool hasUserCerts = false;
+  rv = component->HasUserCertsInstalled(hasUserCerts);
+  if (NS_FAILED(rv) || hasUserCerts) {
+    return false;
+  }
+
+  return true;
+}
+
 nsresult
 nsHttpHandler::SpeculativeConnectInternal(nsIURI *aURI,
                                           nsIPrincipal *aPrincipal,
                                           nsIInterfaceRequestor *aCallbacks,
                                           bool anonymous)
 {
     if (IsNeckoChild()) {
         ipc::URIParams params;
@@ -2516,16 +2560,26 @@ nsHttpHandler::SpeculativeConnectInterna
         return NS_ERROR_UNEXPECTED;
 
     // Construct connection info object
     bool usingSSL = false;
     rv = aURI->SchemeIs("https", &usingSSL);
     if (NS_FAILED(rv))
         return rv;
 
+    static bool sCheckedIfSpeculativeEnabled = false;
+    if (!sCheckedIfSpeculativeEnabled) {
+        sCheckedIfSpeculativeEnabled = true;
+        mSpeculativeConnectEnabled = CanEnableSpeculativeConnect();
+    }
+
+    if (usingSSL && !mSpeculativeConnectEnabled) {
+        return NS_ERROR_UNEXPECTED;
+    }
+
     nsAutoCString host;
     rv = aURI->GetAsciiHost(host);
     if (NS_FAILED(rv))
         return rv;
 
     int32_t port = -1;
     rv = aURI->GetPort(&port);
     if (NS_FAILED(rv))
--- a/netwerk/protocol/http/nsHttpHandler.h
+++ b/netwerk/protocol/http/nsHttpHandler.h
@@ -574,16 +574,20 @@ private:
     // The maximum amount of time to wait for socket transport to be
     // established. In milliseconds.
     uint32_t       mConnectTimeout;
 
     // The maximum number of current global half open sockets allowable
     // when starting a new speculative connection.
     uint32_t       mParallelSpeculativeConnectLimit;
 
+    // We may disable speculative connect if the browser has user certificates
+    // installed as that might randomly popup the certificate choosing window.
+    bool           mSpeculativeConnectEnabled;
+
     // For Rate Pacing of HTTP/1 requests through a netwerk/base/EventTokenBucket
     // Active requests <= *MinParallelism are not subject to the rate pacing
     bool           mRequestTokenBucketEnabled;
     uint16_t       mRequestTokenBucketMinParallelism;
     uint32_t       mRequestTokenBucketHz;  // EventTokenBucket HZ
     uint32_t       mRequestTokenBucketBurst; // EventTokenBucket Burst
 
     // Whether or not to block requests for non head js/css items (e.g. media)
--- a/python/mozbuild/mozbuild/artifacts.py
+++ b/python/mozbuild/mozbuild/artifacts.py
@@ -653,17 +653,17 @@ class TaskCache(CacheManager):
             product=artifact_job.product,
             job=job,
         )
         self.log(logging.DEBUG, 'artifact',
                  {'namespace': namespace},
                  'Searching Taskcluster index with namespace: {namespace}')
         try:
             taskId = find_task_id(namespace)
-        except Exception:
+        except KeyError:
             # Not all revisions correspond to pushes that produce the job we
             # care about; and even those that do may not have completed yet.
             raise ValueError('Task for {namespace} does not exist (yet)!'.format(namespace=namespace))
 
         artifacts = list_artifacts(taskId)
 
         urls = []
         for artifact_name in artifact_job.find_candidate_artifacts(artifacts):
--- a/python/mozbuild/mozbuild/frontend/context.py
+++ b/python/mozbuild/mozbuild/frontend/context.py
@@ -19,29 +19,32 @@ from __future__ import absolute_import, 
 import os
 
 from collections import (
     Counter,
     OrderedDict,
 )
 from mozbuild.util import (
     HierarchicalStringList,
+    ImmutableStrictOrderingOnAppendList,
     KeyedDefaultDict,
     List,
     ListWithAction,
     memoize,
     memoized_property,
     ReadOnlyKeyedDefaultDict,
     StrictOrderingOnAppendList,
     StrictOrderingOnAppendListWithAction,
     StrictOrderingOnAppendListWithFlagsFactory,
     TypedList,
     TypedNamedTuple,
 )
 
+from .. import schedules
+
 from ..testing import (
     all_test_flavors,
     read_manifestparser_manifest,
     read_reftest_manifest,
     read_wpt_manifest,
 )
 
 import mozpack.path as mozpath
@@ -600,16 +603,64 @@ def ContextDerivedTypedRecord(*fields):
             if name in self._fields and not isinstance(value, self._fields[name]):
                 value = self._fields[name](value)
             object.__setattr__(self, name, value)
 
     _TypedRecord._fields = dict(fields)
     return _TypedRecord
 
 
+class Schedules(object):
+    """Similar to a ContextDerivedTypedRecord, but with different behavior
+    for the properties:
+
+     * VAR.inclusive can only be appended to (+=), and can only contain values
+       from mozbuild.schedules.INCLUSIVE_COMPONENTS
+
+     * VAR.exclusive can only be assigned to (no +=), and can only contain
+       values from mozbuild.schedules.ALL_COMPONENTS
+    """
+    __slots__ = ('_exclusive', '_inclusive')
+
+    def __init__(self):
+        self._inclusive = TypedList(Enum(*schedules.INCLUSIVE_COMPONENTS))()
+        self._exclusive = ImmutableStrictOrderingOnAppendList(schedules.EXCLUSIVE_COMPONENTS)
+
+    # inclusive is mutable cannot be assigned to (+= only)
+    @property
+    def inclusive(self):
+        return self._inclusive
+
+    @inclusive.setter
+    def inclusive(self, value):
+        if value is not self._inclusive:
+            raise AttributeError("Cannot assign to this value - use += instead")
+        unexpected = [v for v in value if v not in schedules.INCLUSIVE_COMPONENTS]
+        if unexpected:
+            raise Exception("unexpected exclusive component(s) " + ', '.join(unexpected))
+
+    # exclusive is immuntable but can be set (= only)
+    @property
+    def exclusive(self):
+        return self._exclusive
+
+    @exclusive.setter
+    def exclusive(self, value):
+        if not isinstance(value, (tuple, list)):
+            raise Exception("expected a tuple or list")
+        unexpected = [v for v in value if v not in schedules.ALL_COMPONENTS]
+        if unexpected:
+            raise Exception("unexpected exclusive component(s) " + ', '.join(unexpected))
+        self._exclusive = ImmutableStrictOrderingOnAppendList(sorted(value))
+
+    # components provides a synthetic summary of all components
+    @property
+    def components(self):
+        return list(sorted(set(self._inclusive) | set(self._exclusive)))
+
 @memoize
 def ContextDerivedTypedHierarchicalStringList(type):
     """Specialized HierarchicalStringList for use with ContextDerivedValue
     types."""
     class _TypedListWithItems(ContextDerivedValue, HierarchicalStringList):
         __slots__ = ('_strings', '_children', '_context')
 
         def __init__(self, context):
@@ -670,16 +721,19 @@ OrderedSourceList = ContextDerivedTypedL
 OrderedTestFlavorList = TypedList(Enum(*all_test_flavors()),
                                   StrictOrderingOnAppendList)
 OrderedStringList = TypedList(unicode, StrictOrderingOnAppendList)
 DependentTestsEntry = ContextDerivedTypedRecord(('files', OrderedSourceList),
                                                 ('tags', OrderedStringList),
                                                 ('flavors', OrderedTestFlavorList))
 BugzillaComponent = TypedNamedTuple('BugzillaComponent',
                         [('product', unicode), ('component', unicode)])
+SchedulingComponents = ContextDerivedTypedRecord(
+        ('inclusive', TypedList(unicode, StrictOrderingOnAppendList)),
+        ('exclusive', TypedList(unicode, StrictOrderingOnAppendList)))
 
 
 class Files(SubContext):
     """Metadata attached to files.
 
     It is common to want to annotate files with metadata, such as which
     Bugzilla component tracks issues with certain files. This sub-context is
     where we stick that metadata.
@@ -788,16 +842,45 @@ class Files(SubContext):
             with Files('dom/base/nsGlobalWindow.cpp'):
                 IMPACTED_TESTS.flavors += [
                     'mochitest',
                 ]
 
             Would suggest that nsGlobalWindow.cpp is potentially relevant to
             any plain mochitest.
             """),
+        'SCHEDULES': (Schedules, list,
+            """Maps source files to the CI tasks that should be scheduled when
+            they change.  The tasks are grouped by named components, and those
+            names appear again in the taskgraph configuration
+            `($topsrcdir/taskgraph/).
+
+            Some components are "inclusive", meaning that changes to most files
+            do not schedule them, aside from those described in a Files
+            subcontext.  For example, py-lint tasks need not be scheduled for
+            most changes, but should be scheduled when any Python file changes.
+            Such components are named by appending to `SCHEDULES.inclusive`:
+
+            with Files('**.py'):
+                SCHEDULES.inclusive += ['py-lint']
+
+            Other components are 'exclusive', meaning that changes to most
+            files schedule them, but some files affect only one or two
+            components. For example, most files schedule builds and tests of
+            Firefox for Android, OS X, Windows, and Linux, but files under
+            `mobile/android/` affect Android builds and tests exclusively, so
+            builds for other operating systems are not needed.  Test suites
+            provide another example: most files schedule reftests, but changes
+            to reftest scripts need only schedule reftests and no other suites.
+
+            Exclusive components are named by setting `SCHEDULES.exclusive`:
+
+            with Files('mobile/android/**'):
+                SCHEDULES.exclusive = ['android']
+            """),
     }
 
     def __init__(self, parent, pattern=None):
         super(Files, self).__init__(parent)
         self.pattern = pattern
         self.finalized = set()
         self.test_files = set()
         self.test_tags = set()
--- a/python/mozbuild/mozbuild/frontend/mach_commands.py
+++ b/python/mozbuild/mozbuild/frontend/mach_commands.py
@@ -12,16 +12,17 @@ from mach.decorators import (
     CommandProvider,
     Command,
     SubCommand,
 )
 
 from mozbuild.base import MachCommandBase
 import mozpack.path as mozpath
 
+TOPSRCDIR = os.path.abspath(os.path.join(__file__, '../../../../../'))
 
 class InvalidPathException(Exception):
     """Represents an error due to an invalid path."""
 
 
 @CommandProvider
 class MozbuildFileCommands(MachCommandBase):
     @Command('mozbuild-reference', category='build-dev',
@@ -187,8 +188,28 @@ class MozbuildFileCommands(MachCommandBa
                 raise InvalidPathException('cannot use wildcard in version control mode')
 
             for path, f in reader.finder.find(p):
                 if path not in all_paths_set:
                     all_paths_set.add(path)
                     allpaths.append(path)
 
         return reader.files_info(allpaths)
+
+
+    @SubCommand('file-info', 'schedules',
+                'Show the combined SCHEDULES for the files listed.')
+    @CommandArgument('paths', nargs='+',
+                     help='Paths whose data to query')
+    def file_info_schedules(self, paths):
+        """Show what is scheduled by the given files.
+
+        Given a requested set of files (which can be specified using
+        wildcards), print the total set of scheduled components.
+        """
+        from mozbuild.frontend.reader import EmptyConfig, BuildReader
+        config = EmptyConfig(TOPSRCDIR)
+        reader = BuildReader(config)
+        schedules = set()
+        for p, m in reader.files_info(paths).items():
+            schedules |= set(m['SCHEDULES'].components)
+
+        print(", ".join(schedules))
--- a/python/mozbuild/mozbuild/mach_commands.py
+++ b/python/mozbuild/mozbuild/mach_commands.py
@@ -1779,17 +1779,16 @@ class PackageFrontend(MachCommandBase):
             unpack_file,
         )
         from requests.adapters import HTTPAdapter
         import redo
         import requests
         import shutil
 
         from taskgraph.generator import Kind
-        from taskgraph.optimize import optimize_task
         from taskgraph.util.taskcluster import (
             get_artifact_url,
             list_artifacts,
         )
         import yaml
 
         self._set_log_level(verbose)
         # Normally, we'd use self.log_manager.enable_unstructured(),
@@ -1878,16 +1877,22 @@ class PackageFrontend(MachCommandBase):
                                         record.digest)
                 records[record.filename] = DownloadRecord(
                     url, record.filename, record.size, record.digest,
                     record.algorithm, unpack=record.unpack,
                     version=record.version, visibility=record.visibility,
                     setup=record.setup)
 
         if from_build:
+            if 'TASK_ID' in os.environ:
+                self.log(logging.ERROR, 'artifact', {},
+                         'Do not use --from-build in automation; all dependencies '
+                         'should be determined in the decision task.')
+                return 1
+            from taskgraph.optimize import IndexSearch
             params = {
                 'message': '',
                 'project': '',
                 'level': os.environ.get('MOZ_SCM_LEVEL', '3'),
                 'base_repository': '',
                 'head_repository': '',
                 'head_rev': '',
                 'moz_build_date': '',
@@ -1923,17 +1928,18 @@ class PackageFrontend(MachCommandBase):
                     b = 'toolchain-{}'.format(b)
 
                 task = toolchains.get(aliases.get(b, b))
                 if not task:
                     self.log(logging.ERROR, 'artifact', {'build': user_value},
                              'Could not find a toolchain build named `{build}`')
                     return 1
 
-                task_id = optimize_task(task, {})
+                task_id = IndexSearch().should_replace_task(
+                    task, {}, task.optimization.get('index-search', []))
                 artifact_name = task.attributes.get('toolchain-artifact')
                 if task_id in (True, False) or not artifact_name:
                     self.log(logging.ERROR, 'artifact', {'build': user_value},
                              'Could not find artifacts for a toolchain build '
                              'named `{build}`')
                     return 1
 
                 record = ArtifactRecord(task_id, artifact_name)
new file mode 100644
--- /dev/null
+++ b/python/mozbuild/mozbuild/schedules.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Constants for SCHEDULES configuration in moz.build files and for
+skip-unless-schedules optimizations in task-graph generation.
+"""
+
+from __future__ import absolute_import, unicode_literals, print_function
+
+# TODO: ideally these lists could be specified in moz.build itself
+
+INCLUSIVE_COMPONENTS = [
+    'py-lint',
+    'js-lint',
+    'yaml-lint',
+]
+EXCLUSIVE_COMPONENTS = [
+    # os families
+    'android',
+    'linux',
+    'macosx',
+    'windows',
+]
+ALL_COMPONENTS = INCLUSIVE_COMPONENTS + EXCLUSIVE_COMPONENTS
new file mode 100644
--- /dev/null
+++ b/python/mozbuild/mozbuild/test/frontend/data/schedules/moz.build
@@ -0,0 +1,11 @@
+# Any copyright is dedicated to the Public Domain.
+# http://creativecommons.org/publicdomain/zero/1.0/
+
+with Files('*.win'):
+    SCHEDULES.exclusive = ['windows']
+
+with Files('*.osx'):
+    SCHEDULES.exclusive = ['macosx']
+
+with Files('subd/**.py'):
+    SCHEDULES.inclusive += ['py-lint']
new file mode 100644
--- /dev/null
+++ b/python/mozbuild/mozbuild/test/frontend/data/schedules/subd/moz.build
@@ -0,0 +1,2 @@
+with Files('yaml.py'):
+    SCHEDULES.inclusive += ['yaml-lint']
--- a/python/mozbuild/mozbuild/test/frontend/test_reader.py
+++ b/python/mozbuild/mozbuild/test/frontend/test_reader.py
@@ -475,11 +475,32 @@ class TestBuildReader(unittest.TestCase)
                              expected_flavors[path])
 
     def test_invalid_flavor(self):
         reader = self.reader('invalid-files-flavor')
 
         with self.assertRaises(BuildReaderError):
             reader.files_info(['foo.js'])
 
+    def test_schedules(self):
+        reader = self.reader('schedules')
+        info = reader.files_info(['somefile', 'foo.win', 'foo.osx', 'subd/aa.py', 'subd/yaml.py'])
+        # default: all exclusive, no inclusive
+        self.assertEqual(info['somefile']['SCHEDULES'].inclusive, [])
+        self.assertEqual(info['somefile']['SCHEDULES'].exclusive, ['android', 'linux', 'macosx', 'windows'])
+        # windows-only
+        self.assertEqual(info['foo.win']['SCHEDULES'].inclusive, [])
+        self.assertEqual(info['foo.win']['SCHEDULES'].exclusive, ['windows'])
+        # osx-only
+        self.assertEqual(info['foo.osx']['SCHEDULES'].inclusive, [])
+        self.assertEqual(info['foo.osx']['SCHEDULES'].exclusive, ['macosx'])
+        # top-level moz.build specifies subd/**.py with an inclusive option
+        self.assertEqual(info['subd/aa.py']['SCHEDULES'].inclusive, ['py-lint'])
+        self.assertEqual(info['subd/aa.py']['SCHEDULES'].exclusive, ['android', 'linux', 'macosx', 'windows'])
+        # Files('yaml.py') in subd/moz.build *overrides* Files('subdir/**.py')
+        self.assertEqual(info['subd/yaml.py']['SCHEDULES'].inclusive, ['yaml-lint'])
+        self.assertEqual(info['subd/yaml.py']['SCHEDULES'].exclusive, ['android', 'linux', 'macosx', 'windows'])
+
+        self.assertEqual(info['subd/yaml.py']['SCHEDULES'].components,
+                ['android', 'linux', 'macosx', 'windows', 'yaml-lint'])
 
 if __name__ == '__main__':
     main()
--- a/python/mozbuild/mozbuild/util.py
+++ b/python/mozbuild/mozbuild/util.py
@@ -480,16 +480,35 @@ class StrictOrderingOnAppendListMixin(ob
 class StrictOrderingOnAppendList(ListMixin, StrictOrderingOnAppendListMixin,
         list):
     """A list specialized for moz.build environments.
 
     We overload the assignment and append operations to require that incoming
     elements be ordered. This enforces cleaner style in moz.build files.
     """
 
+class ImmutableStrictOrderingOnAppendList(StrictOrderingOnAppendList):
+    """Like StrictOrderingOnAppendList, but not allowing mutations of the value.
+    """
+    def append(self, elt):
+        raise Exception("cannot use append on this type")
+
+    def extend(self, iterable):
+        raise Exception("cannot use extend on this type")
+
+    def __setslice__(self, i, j, iterable):
+        raise Exception("cannot assign to slices on this type")
+
+    def __setitem__(self, i, elt):
+        raise Exception("cannot assign to indexes on this type")
+
+    def __iadd__(self, other):
+        raise Exception("cannot use += on this type")
+
+
 class ListWithActionMixin(object):
     """Mixin to create lists with pre-processing. See ListWithAction."""
     def __init__(self, iterable=None, action=None):
         if iterable is None:
             iterable = []
         if not callable(action):
             raise ValueError('A callabe action is required to construct '
                              'a ListWithAction')
--- a/security/manager/ssl/nsNSSCertificateDB.cpp
+++ b/security/manager/ssl/nsNSSCertificateDB.cpp
@@ -763,23 +763,30 @@ nsNSSCertificateDB::ImportUserCertificat
   }
   slot = nullptr;
 
   {
     nsCOMPtr<nsIX509Cert> certToShow = nsNSSCertificate::Create(cert.get());
     DisplayCertificateAlert(ctx, "UserCertImported", certToShow, locker);
   }
 
+  nsresult rv = NS_OK;
   int numCACerts = collectArgs->numcerts - 1;
   if (numCACerts) {
     SECItem* caCerts = collectArgs->rawCerts + 1;
-    return ImportValidCACerts(numCACerts, caCerts, ctx, locker);
+    rv = ImportValidCACerts(numCACerts, caCerts, ctx, locker);
   }
 
-  return NS_OK;
+  nsCOMPtr<nsIObserverService> observerService =
+    mozilla::services::GetObserverService();
+  if (observerService) {
+    observerService->NotifyObservers(nullptr, "psm:user-certificate-added", nullptr);
+  }
+
+  return rv;
 }
 
 NS_IMETHODIMP
 nsNSSCertificateDB::DeleteCertificate(nsIX509Cert *aCert)
 {
   NS_ENSURE_ARG_POINTER(aCert);
   nsNSSShutDownPreventionLock locker;
   if (isAlreadyShutDown()) {
@@ -806,16 +813,23 @@ nsNSSCertificateDB::DeleteCertificate(ns
     // want to do that with user certs, because a user may  re-store
     // the cert onto the card again at which point we *will* want to
     // trust that cert if it chains up properly.
     nsNSSCertTrust trust(0, 0, 0);
     srv = ChangeCertTrustWithPossibleAuthentication(cert, trust.GetTrust(),
                                                     nullptr);
   }
   MOZ_LOG(gPIPNSSLog, LogLevel::Debug, ("cert deleted: %d", srv));
+
+  nsCOMPtr<nsIObserverService> observerService =
+    mozilla::services::GetObserverService();
+  if (observerService) {
+    observerService->NotifyObservers(nullptr, "psm:user-certificate-deleted", nullptr);
+  }
+
   return (srv) ? NS_ERROR_FAILURE : NS_OK;
 }
 
 NS_IMETHODIMP
 nsNSSCertificateDB::SetCertTrust(nsIX509Cert *cert,
                                  uint32_t type,
                                  uint32_t trusted)
 {
@@ -985,17 +999,25 @@ nsNSSCertificateDB::ImportPKCS12File(nsI
   }
   nsresult rv = BlockUntilLoadableRootsLoaded();
   if (NS_FAILED(rv)) {
     return rv;
   }
 
   NS_ENSURE_ARG(aFile);
   nsPKCS12Blob blob;
-  return blob.ImportFromFile(aFile);
+  rv = blob.ImportFromFile(aFile);
+
+  nsCOMPtr<nsIObserverService> observerService =
+    mozilla::services::GetObserverService();
+  if (NS_SUCCEEDED(rv) && observerService) {
+    observerService->NotifyObservers(nullptr, "psm:user-certificate-added", nullptr);
+  }
+
+  return rv;
 }
 
 NS_IMETHODIMP
 nsNSSCertificateDB::ExportPKCS12File(nsIFile* aFile, uint32_t count,
                                      nsIX509Cert** certs)
 {
   if (!NS_IsMainThread()) {
     return NS_ERROR_NOT_SAME_THREAD;
--- a/security/manager/ssl/nsNSSComponent.cpp
+++ b/security/manager/ssl/nsNSSComponent.cpp
@@ -1134,16 +1134,71 @@ LoadLoadableRootsTask::Run()
     if (NS_WARN_IF(NS_FAILED(rv))) {
       return rv;
     }
   }
   return NS_OK;
 }
 
 nsresult
+nsNSSComponent::HasActiveSmartCards(bool& result)
+{
+  MOZ_ASSERT(NS_IsMainThread(), "Main thread only");
+  if (!NS_IsMainThread()) {
+    return NS_ERROR_NOT_SAME_THREAD;
+  }
+
+#ifndef MOZ_NO_SMART_CARDS
+  nsNSSShutDownPreventionLock lock;
+  MutexAutoLock nsNSSComponentLock(mMutex);
+
+  // A non-null list means at least one smart card thread was active
+  if (mThreadList) {
+    result = true;
+    return NS_OK;
+  }
+#endif
+  result = false;
+  return NS_OK;
+}
+
+nsresult
+nsNSSComponent::HasUserCertsInstalled(bool& result)
+{
+  MOZ_ASSERT(NS_IsMainThread(), "Main thread only");
+  if (!NS_IsMainThread()) {
+    return NS_ERROR_NOT_SAME_THREAD;
+  }
+
+  nsNSSShutDownPreventionLock lock;
+  MutexAutoLock nsNSSComponentLock(mMutex);
+
+  if (!mNSSInitialized) {
+    return NS_ERROR_NOT_INITIALIZED;
+  }
+
+  result = false;
+  UniqueCERTCertList certList(
+    CERT_FindUserCertsByUsage(CERT_GetDefaultCertDB(), certUsageSSLClient,
+                              false, true, nullptr));
+  if (!certList) {
+    return NS_OK;
+  }
+
+  // check if the list is empty
+  if (CERT_LIST_END(CERT_LIST_HEAD(certList), certList)) {
+    return NS_OK;
+  }
+
+  // The list is not empty, meaning at least one cert is installed
+  result = true;
+  return NS_OK;
+}
+
+nsresult
 nsNSSComponent::BlockUntilLoadableRootsLoaded()
 {
   MonitorAutoLock rootsLoadedLock(mLoadableRootsLoadedMonitor);
   while (!mLoadableRootsLoaded) {
     nsresult rv = rootsLoadedLock.Wait();
     if (NS_WARN_IF(NS_FAILED(rv))) {
       return rv;
     }
--- a/security/manager/ssl/nsNSSComponent.h
+++ b/security/manager/ssl/nsNSSComponent.h
@@ -80,16 +80,20 @@ public:
   NS_IMETHOD IsCertContentSigningRoot(CERTCertificate* cert, bool& result) = 0;
 
 #ifdef XP_WIN
   NS_IMETHOD GetEnterpriseRoots(nsIX509CertList** enterpriseRoots) = 0;
 #endif
 
   NS_IMETHOD BlockUntilLoadableRootsLoaded() = 0;
 
+  // Main thread only
+  NS_IMETHOD HasActiveSmartCards(bool& result) = 0;
+  NS_IMETHOD HasUserCertsInstalled(bool& result) = 0;
+
   virtual ::already_AddRefed<mozilla::psm::SharedCertVerifier>
     GetDefaultCertVerifier() = 0;
 };
 
 NS_DEFINE_STATIC_IID_ACCESSOR(nsINSSComponent, NS_INSSCOMPONENT_IID)
 
 class nsNSSShutDownList;
 
@@ -139,16 +143,20 @@ public:
   NS_IMETHOD IsCertContentSigningRoot(CERTCertificate* cert, bool& result) override;
 
 #ifdef XP_WIN
   NS_IMETHOD GetEnterpriseRoots(nsIX509CertList** enterpriseRoots) override;
 #endif
 
   NS_IMETHOD BlockUntilLoadableRootsLoaded() override;
 
+  // Main thread only
+  NS_IMETHOD HasActiveSmartCards(bool& result) override;
+  NS_IMETHOD HasUserCertsInstalled(bool& result) override;
+
   ::already_AddRefed<mozilla::psm::SharedCertVerifier>
     GetDefaultCertVerifier() override;
 
   // The following two methods are thread-safe.
   static bool AreAnyWeakCiphersEnabled();
   static void UseWeakCiphersOnSocket(PRFileDesc* fd);
 
   static void FillTLSVersionRange(SSLVersionRange& rangeOut,
--- a/security/sandbox/linux/broker/SandboxBrokerPolicyFactory.cpp
+++ b/security/sandbox/linux/broker/SandboxBrokerPolicyFactory.cpp
@@ -102,16 +102,19 @@ SandboxBrokerPolicyFactory::SandboxBroke
   policy->AddDir(rdonly, "/sys/devices/cpu");
   policy->AddDir(rdonly, "/sys/devices/system/cpu");
   policy->AddDir(rdonly, "/lib");
   policy->AddDir(rdonly, "/lib64");
   policy->AddDir(rdonly, "/usr/lib");
   policy->AddDir(rdonly, "/usr/lib32");
   policy->AddDir(rdonly, "/usr/lib64");
   policy->AddDir(rdonly, "/etc");
+#ifdef MOZ_PULSEAUDIO
+  policy->AddPath(rdonly, "/var/lib/dbus/machine-id");
+#endif
   policy->AddDir(rdonly, "/usr/share");
   policy->AddDir(rdonly, "/usr/local/share");
   policy->AddDir(rdonly, "/usr/tmp");
   policy->AddDir(rdonly, "/var/tmp");
   // Various places where fonts reside
   policy->AddDir(rdonly, "/usr/X11R6/lib/X11/fonts");
   policy->AddDir(rdonly, "/nix/store");
   policy->AddDir(rdonly, "/run/host/fonts");
--- a/servo/Cargo.lock
+++ b/servo/Cargo.lock
@@ -1870,16 +1870,21 @@ source = "registry+https://github.com/ru
 dependencies = [
  "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "net2 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "mitochondria"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "mozjs_sys"
 version = "0.0.0"
 source = "git+https://github.com/servo/mozjs#834ce35c3f008010213351107b68f397989d2ffd"
 dependencies = [
  "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
  "libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -2592,16 +2597,17 @@ dependencies = [
  "js 0.1.6 (git+https://github.com/servo/rust-mozjs)",
  "jstraceable_derive 0.0.1",
  "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "metrics 0.0.1",
  "mime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "mime_guess 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mitochondria 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "msg 0.0.1",
  "net_traits 0.0.1",
  "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
  "offscreen_gl_context 0.11.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "open 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
  "phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -3906,16 +3912,17 @@ dependencies = [
 "checksum matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "efd7622e3022e1a6eaa602c4cea8912254e5582c9c692e9167714182244801b1"
 "checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4"
 "checksum metadeps 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "829fffe7ea1d747e23f64be972991bc516b2f1ac2ae4a3b33d8bea150c410151"
 "checksum mime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9d69889cdc6336ed56b174514ce876c4c3dc564cc23dd872e7bca589bb2a36c8"
 "checksum mime_guess 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "76da6df85047af8c0edfa53f48eb1073012ce1cc95c8fedc0a374f659a89dd65"
 "checksum miniz-sys 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "28eaee17666671fa872e567547e8428e83308ebe5808cdf6a0e28397dbe2c726"
 "checksum mio 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9e965267d4d58496fc4f740e9861118367f13570cadf66316ed2c3f2f14d87c7"
 "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
+"checksum mitochondria 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9de3eca27871df31c33b807f834b94ef7d000956f57aa25c5aed9c5f0aae8f6f"
 "checksum mozjs_sys 0.0.0 (git+https://github.com/servo/mozjs)" = "<none>"
 "checksum mp3-metadata 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2f61cf32f7fc3cec83a15a255ac60bceb6cac59a7ce190cb824ca25c0fce0feb"
 "checksum mp4parse 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7b81651f9ede53d59281b54c7eb51ae50a868ac4765dd3bdfbbc79ce3d8aca7a"
 "checksum multistr 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "90fb6e1b4f6ca2f2098a437e1c7f09c122da62bbf2bde45b3693defc1eb61e2d"
 "checksum net2 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)" = "bc01404e7568680f1259aa5729539f221cb1e6d047a0d9053cab4be8a73b5d67"
 "checksum nodrop 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "0dbbadd3f4c98dea0bd3d9b4be4c0cdaf1ab57035cb2e41fce3983db5add7cc5"
 "checksum nom 1.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b8c256fd9471521bcb84c3cdba98921497f1a331cbc15b8030fc63b82050ce"
 "checksum num-integer 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)" = "ef1a4bf6f9174aa5783a9b4cc892cacd11aebad6c69ad027a0b65c6ca5f8aa37"
--- a/servo/components/script/Cargo.toml
+++ b/servo/components/script/Cargo.toml
@@ -51,16 +51,17 @@ hyper_serde = "0.7"
 image = "0.12"
 ipc-channel = "0.8"
 js = {git = "https://github.com/servo/rust-mozjs", features = ["promises"]}
 jstraceable_derive = {path = "../jstraceable_derive"}
 lazy_static = "0.2"
 libc = "0.2"
 log = "0.3.5"
 metrics = {path = "../metrics"}
+mitochondria = "1.1.2"
 mime = "0.2.1"
 mime_guess = "1.8.0"
 msg = {path = "../msg"}
 net_traits = {path = "../net_traits"}
 num-traits = "0.1.32"
 offscreen_gl_context = { version = "0.11", features = ["serde"] }
 open = "1.1.1"
 parking_lot = "0.4"
--- a/servo/components/script/dom/bindings/js.rs
+++ b/servo/components/script/dom/bindings/js.rs
@@ -27,16 +27,17 @@ use core::nonzero::NonZero;
 use dom::bindings::conversions::DerivedFrom;
 use dom::bindings::inheritance::Castable;
 use dom::bindings::reflector::{DomObject, Reflector};
 use dom::bindings::trace::JSTraceable;
 use dom::bindings::trace::trace_reflector;
 use dom::node::Node;
 use heapsize::HeapSizeOf;
 use js::jsapi::{JSObject, JSTracer};
+use mitochondria::OnceCell;
 use script_layout_interface::TrustedNodeAddress;
 use script_thread::STACK_ROOTS;
 use std::cell::UnsafeCell;
 use std::default::Default;
 use std::hash::{Hash, Hasher};
 #[cfg(debug_assertions)]
 use std::intrinsics::type_name;
 use std::mem;
@@ -386,16 +387,65 @@ impl<T: DomObject> Default for MutNullab
 
 impl<T: DomObject> HeapSizeOf for MutNullableJS<T> {
     fn heap_size_of_children(&self) -> usize {
         // See comment on HeapSizeOf for JS<T>.
         0
     }
 }
 
+/// A holder that allows to lazily initialize the value only once
+/// `JS<T>`, using OnceCell
+/// Essentially a `OnceCell<JS<T>>`.
+///
+/// This should only be used as a field in other DOM objects; see warning
+/// on `JS<T>`.
+#[must_root]
+pub struct OnceCellJS<T: DomObject> {
+    ptr: OnceCell<JS<T>>,
+}
+
+impl<T: DomObject> OnceCellJS<T> {
+    /// Retrieve a copy of the current inner value. If it is `None`, it is
+    /// initialized with the result of `cb` first.
+    #[allow(unrooted_must_root)]
+    pub fn init_once<F>(&self, cb: F) -> &T
+        where F: FnOnce() -> Root<T>
+    {
+        debug_assert!(thread_state::get().is_script());
+        &self.ptr.init_once(|| JS::from_ref(&cb()))
+    }
+}
+
+impl<T: DomObject> Default for OnceCellJS<T> {
+    #[allow(unrooted_must_root)]
+    fn default() -> OnceCellJS<T> {
+        debug_assert!(thread_state::get().is_script());
+        OnceCellJS {
+            ptr: OnceCell::new(),
+        }
+    }
+}
+
+impl<T: DomObject> HeapSizeOf for OnceCellJS<T> {
+    fn heap_size_of_children(&self) -> usize {
+        // See comment on HeapSizeOf for JS<T>.
+        0
+    }
+}
+
+#[allow(unrooted_must_root)]
+unsafe impl<T: DomObject> JSTraceable for OnceCellJS<T> {
+    unsafe fn trace(&self, trc: *mut JSTracer) {
+        if let Some(ptr) = self.ptr.as_ref() {
+            ptr.trace(trc);
+        }
+    }
+}
+
 impl<T: DomObject> LayoutJS<T> {
     /// Returns an unsafe pointer to the interior of this JS object. This is
     /// the only method that be safely accessed from layout. (The fact that
     /// this is unsafe is what necessitates the layout wrappers.)
     pub unsafe fn unsafe_get(&self) -> *const T {
         debug_assert!(thread_state::get().is_layout());
         self.ptr.get()
     }
--- a/servo/components/script/dom/htmlformelement.rs
+++ b/servo/components/script/dom/htmlformelement.rs
@@ -8,17 +8,17 @@ use dom::bindings::codegen::Bindings::Do
 use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
 use dom::bindings::codegen::Bindings::HTMLButtonElementBinding::HTMLButtonElementMethods;
 use dom::bindings::codegen::Bindings::HTMLFormControlsCollectionBinding::HTMLFormControlsCollectionMethods;
 use dom::bindings::codegen::Bindings::HTMLFormElementBinding;
 use dom::bindings::codegen::Bindings::HTMLFormElementBinding::HTMLFormElementMethods;
 use dom::bindings::codegen::Bindings::HTMLInputElementBinding::HTMLInputElementMethods;
 use dom::bindings::codegen::Bindings::HTMLTextAreaElementBinding::HTMLTextAreaElementMethods;
 use dom::bindings::inheritance::{Castable, ElementTypeId, HTMLElementTypeId, NodeTypeId};
-use dom::bindings::js::{JS, MutNullableJS, Root, RootedReference};
+use dom::bindings::js::{JS, OnceCellJS, Root, RootedReference};
 use dom::bindings::refcounted::Trusted;
 use dom::bindings::reflector::DomObject;
 use dom::bindings::str::DOMString;
 use dom::blob::Blob;
 use dom::document::Document;
 use dom::element::{AttributeMutation, Element};
 use dom::eventtarget::EventTarget;
 use dom::file::File;
@@ -59,17 +59,17 @@ use task_source::TaskSource;
 
 #[derive(Clone, Copy, HeapSizeOf, JSTraceable, PartialEq)]
 pub struct GenerationId(u32);
 
 #[dom_struct]
 pub struct HTMLFormElement {
     htmlelement: HTMLElement,
     marked_for_reset: Cell<bool>,
-    elements: MutNullableJS<HTMLFormControlsCollection>,
+    elements: OnceCellJS<HTMLFormControlsCollection>,
     generation_id: Cell<GenerationId>,
     controls: DOMRefCell<Vec<JS<Element>>>,
 }
 
 impl HTMLFormElement {
     fn new_inherited(local_name: LocalName,
                      prefix: Option<Prefix>,
                      document: &Document) -> HTMLFormElement {
@@ -161,20 +161,16 @@ impl HTMLFormElementMethods for HTMLForm
 
     // https://html.spec.whatwg.org/multipage/#dom-form-reset
     fn Reset(&self) {
         self.reset(ResetFrom::FromForm);
     }
 
     // https://html.spec.whatwg.org/multipage/#dom-form-elements
     fn Elements(&self) -> Root<HTMLFormControlsCollection> {
-        if let Some(elements) = self.elements.get() {
-            return elements;
-        }
-
         #[derive(HeapSizeOf, JSTraceable)]
         struct ElementsFilter {
             form: Root<HTMLFormElement>
         }
         impl CollectionFilter for ElementsFilter {
             fn filter<'a>(&self, elem: &'a Element, _root: &'a Node) -> bool {
                 let form_owner = match elem.upcast::<Node>().type_id() {
                     NodeTypeId::Element(ElementTypeId::HTMLElement(t)) => {
@@ -215,21 +211,21 @@ impl HTMLFormElementMethods for HTMLForm
                 };
 
                 match form_owner {
                     Some(form_owner) => form_owner == self.form,
                     None => false,
                 }
             }
         }
-        let filter = box ElementsFilter { form: Root::from_ref(self) };
-        let window = window_from_node(self);
-        let elements = HTMLFormControlsCollection::new(&window, self.upcast(), filter);
-        self.elements.set(Some(&elements));
-        elements
+        Root::from_ref(self.elements.init_once(|| {
+            let filter = box ElementsFilter { form: Root::from_ref(self) };
+            let window = window_from_node(self);
+            HTMLFormControlsCollection::new(&window, self.upcast(), filter)
+        }))
     }
 
     // https://html.spec.whatwg.org/multipage/#dom-form-length
     fn Length(&self) -> u32 {
         self.Elements().Length() as u32
     }
 
     // https://html.spec.whatwg.org/multipage/#dom-form-item
--- a/servo/components/script/lib.rs
+++ b/servo/components/script/lib.rs
@@ -65,16 +65,17 @@ extern crate jstraceable_derive;
 extern crate lazy_static;
 extern crate libc;
 #[macro_use]
 extern crate log;
 extern crate metrics;
 #[macro_use]
 extern crate mime;
 extern crate mime_guess;
+extern crate mitochondria;
 extern crate msg;
 extern crate net_traits;
 extern crate num_traits;
 extern crate offscreen_gl_context;
 extern crate open;
 extern crate parking_lot;
 extern crate phf;
 #[macro_use]
--- a/taskcluster/ci/build/android-stuff.yml
+++ b/taskcluster/ci/build/android-stuff.yml
@@ -34,19 +34,19 @@ android-dependencies/opt:
             - builds/releng_base_android_64_builds.py
             - disable_signing.py
             - platform_supports_post_upload_to_latest.py
         script: "mozharness/scripts/fx_desktop_build.py"
         secrets: true
         custom-build-variant-cfg: api-16-gradle-dependencies
         tooltool-downloads: internal
         job-script: taskcluster/scripts/builder/build-android-dependencies.sh
-    optimizations:
-      - - skip-unless-changed
-        - - "mobile/android/config/**"
+    optimization:
+        skip-unless-changed:
+          - "mobile/android/config/**"
           - "testing/mozharness/configs/builds/releng_sub_android_configs/*gradle_dependencies.py"
           - "**/*.gradle"
           - "taskcluster/docker/android-build/**"
 
 android-test/opt:
     description: "Android armv7 unit tests"
     index:
         product: mobile
@@ -77,19 +77,19 @@ android-test/opt:
         config:
             - builds/releng_base_android_64_builds.py
             - disable_signing.py
             - platform_supports_post_upload_to_latest.py
         script: "mozharness/scripts/fx_desktop_build.py"
         secrets: true
         custom-build-variant-cfg: android-test
         tooltool-downloads: internal
-    optimizations:
-      - - skip-unless-changed
-        - - "mobile/android/base/**"
+    optimization:
+        skip-unless-changed:
+          - "mobile/android/base/**"
           - "mobile/android/config/**"
           - "mobile/android/tests/background/junit4/**"
           - "**/*.gradle"
 
 android-lint/opt:
     description: "Android lint"
     index:
         product: mobile
@@ -135,19 +135,19 @@ android-lint/opt:
         config:
             - builds/releng_base_android_64_builds.py
             - disable_signing.py
             - platform_supports_post_upload_to_latest.py
         script: "mozharness/scripts/fx_desktop_build.py"
         secrets: true
         custom-build-variant-cfg: android-lint
         tooltool-downloads: internal
-    optimizations:
-      - - skip-unless-changed
-        - - "mobile/android/**/*.java"
+    optimization:
+        skip-unless-changed:
+          - "mobile/android/**/*.java"
           - "mobile/android/**/*.jpeg"
           - "mobile/android/**/*.jpg"
           - "mobile/android/**/*.png"
           - "mobile/android/**/*.svg"
           - "mobile/android/**/*.xml" # Manifest & android resources
           - "mobile/android/**/Makefile.in"
           - "mobile/android/config/**"
           - "mobile/android/**/moz.build"
@@ -187,19 +187,19 @@ android-checkstyle/opt:
         config:
             - builds/releng_base_android_64_builds.py
             - disable_signing.py
             - platform_supports_post_upload_to_latest.py
         script: "mozharness/scripts/fx_desktop_build.py"
         secrets: true
         custom-build-variant-cfg: android-checkstyle
         tooltool-downloads: internal
-    optimizations:
-      - - skip-unless-changed
-        - - "mobile/android/**/checkstyle.xml"
+    optimization:
+        skip-unless-changed:
+          - "mobile/android/**/checkstyle.xml"
           - "mobile/android/**/*.java"
           - "mobile/android/**/Makefile.in"
           - "mobile/android/config/**"
           - "mobile/android/**/moz.build"
           - "**/*.gradle"
 
 android-findbugs/opt:
     description: "Android findbugs"
@@ -241,15 +241,15 @@ android-findbugs/opt:
         config:
             - builds/releng_base_android_64_builds.py
             - disable_signing.py
             - platform_supports_post_upload_to_latest.py
         script: "mozharness/scripts/fx_desktop_build.py"
         secrets: true
         custom-build-variant-cfg: android-findbugs
         tooltool-downloads: internal
-    optimizations:
-      - - skip-unless-changed
-        - - "mobile/android/**/*.java"
+    optimization:
+        skip-unless-changed:
+          - "mobile/android/**/*.java"
           - "mobile/android/**/Makefile.in"
           - "mobile/android/config/**"
           - "mobile/android/**/moz.build"
           - "**/*.gradle"
--- a/taskcluster/ci/build/kind.yml
+++ b/taskcluster/ci/build/kind.yml
@@ -16,10 +16,8 @@ transforms:
    - taskgraph.transforms.task:transforms
 
 jobs-from:
     - android.yml
     - android-stuff.yml
     - linux.yml
     - macosx.yml
     - windows.yml
-
-parse-commit: taskgraph.try_option_syntax:parse_message
--- a/taskcluster/ci/test/kind.yml
+++ b/taskcluster/ci/test/kind.yml
@@ -4,10 +4,8 @@ kind-dependencies:
     - build
     - build-signing
 
 transforms:
    - taskgraph.transforms.tests:transforms
    - taskgraph.transforms.job:transforms
    - taskgraph.transforms.coalesce:transforms
    - taskgraph.transforms.task:transforms
-
-parse-commit: taskgraph.try_option_syntax:parse_message
--- a/taskcluster/ci/upload-generated-sources/kind.yml
+++ b/taskcluster/ci/upload-generated-sources/kind.yml
@@ -26,10 +26,12 @@ job-template:
   worker:
      docker-image: {in-tree: "lint"}
      max-run-time: 600
   run:
     using: run-task
     command: >
             cd /builds/worker/checkouts/gecko &&
             ./mach python build/upload_generated_sources.py ${ARTIFACT_URL}
+  optimization:
+    only-if-dependencies-run: null
   scopes:
       - secrets:get:project/releng/gecko/build/level-{level}/gecko-generated-sources-upload
--- a/taskcluster/ci/upload-symbols/kind.yml
+++ b/taskcluster/ci/upload-symbols/kind.yml
@@ -38,10 +38,12 @@ job-template:
        os: linux
        max-run-time: 600
        command: ["/bin/bash", "bin/upload.sh"]
        docker-image: taskclusterprivate/upload_symbols:0.0.4
        env:
            GECKO_HEAD_REPOSITORY: # see transforms
            GECKO_HEAD_REV: # see transforms
            ARTIFACT_TASKID: {"task-reference": "<build>"}
+   optimization:
+       only-if-dependencies-run: null
    scopes:
        - docker-worker:image:taskclusterprivate/upload_symbols:0.0.4
--- a/taskcluster/docs/loading.rst
+++ b/taskcluster/docs/loading.rst
@@ -27,17 +27,8 @@ The return value is a list of inputs to 
 ``transforms`` property. The specific format for the input depends on the first
 transform - whatever it expects. The final transform should be
 ``taskgraph.transform.task:transforms``, which produces the output format the
 task-graph generation infrastructure expects.
 
 The ``transforms`` key in ``kind.yml`` is further documented in
 :doc:`transforms`.  For more information on how all of this works, consult the
 docstrings and comments in the source code itself.
-
-Try option syntax
------------------
-
-The ``parse-commit`` optional field specified in ``kind.yml`` links to a
-function to parse the command line options in the ``--message`` mach parameter.
-Currently, the only valid value is ``taskgraph.try_option_syntax:parse_message``.
-The parsed arguments are stored in ``config.config['args']``, it corresponds
-to the same object returned by ``parse_args`` from ``argparse`` Python module.
--- a/taskcluster/docs/optimization.rst
+++ b/taskcluster/docs/optimization.rst
@@ -1,44 +1,119 @@
 Optimization
 ============
 
 The objective of optimization to remove as many tasks from the graph as
 possible, as efficiently as possible, thereby delivering useful results as
-quickly as possible.  For example, ideally if only a test script is modified in
+quickly as possible. For example, ideally if only a test script is modified in
 a push, then the resulting graph contains only the corresponding test suite
 task.
 
 A task is said to be "optimized" when it is either replaced with an equivalent,
 already-existing task, or dropped from the graph entirely.
 
-Optimization Functions
-----------------------
+Optimization Strategies
+-----------------------
 
-During the optimization phase of task-graph generation, each task is optimized
-in post-order, meaning that each task's dependencies will be optimized before
-the task itself is optimized.
+Each task has a single named optimization strategy, and can provide an argument
+to that strategy. Each strategy is defined as an ``OptimizationStrategy``
+instance in ``taskcluster/taskgraph/optimization.py``.
 
-Each task has a ``task.optimizations`` property describing the optimization
-methods that apply.  Each is specified as a list of method and arguments. For
+Each task has a ``task.optimization`` property describing the optimization
+strategy that applies, specified as a dictionary mapping strategy to argument. For
 example::
 
-    task.optimizations = [
-        ['seta'],
-        ['skip-unless-changed', ['js/**', 'tests/**']],
-    ]
+    task.optimization = {'skip-unless-changed': ['js/**', 'tests/**']}
 
-These methods are defined in ``taskcluster/taskgraph/optimize.py``.  They are
-applied in order, and the first to return a success value causes the task to
-be optimized.
-
-Each method can return either a taskId (indicating that the given task can be
-replaced) or indicate that the task can be optimized away. If a task on which
-others depend is optimized away, task-graph generation will fail.
+Strategy implementations are shared across all tasks, so they may cache
+commonly-used information as instance variables.
 
 Optimizing Target Tasks
 -----------------------
 
 In some cases, such as try pushes, tasks in the target task set have been
 explicitly requested and are thus excluded from optimization. In other cases,
 the target task set is almost the entire task graph, so targetted tasks are
-considered for optimization.  This behavior is controlled with the
+considered for optimization. This behavior is controlled with the
 ``optimize_target_tasks`` parameter.
+
+.. note:
+
+    Because it is a mix of "what the push author wanted" and "what should run
+    when necessary", try pushes with the old option syntax (``-b do -p all``,
+    etc.) *do* optimize target tasks.  This can cause unexpected results when
+    requested jobs are optimized away.  If those jobs were actually necessary,
+    then a try push with ``try_task_config.json`` is the solution.
+
+Optimization Process
+--------------------
+
+Optimization proceeds in three phases: removing tasks, replacing tasks,
+and finally generating a subgraph containing only the remaining tasks.
+
+Assume the following task graph as context for these examples::
+
+    TC1 <--\     ,- UP1
+          , B1 <--- T1a
+    I1 <-|       `- T1b
+          ` B2 <--- T2a
+    TC2 <--/     |- T2b
+                 `- UP2
+
+Removing Tasks
+::::::::::::::
+
+This phase begins with tasks on which nothing depends and follows the
+dependency graph backward from there -- right to left in the diagram above. If
+a task is not removed, then nothing it depends on will be removed either.
+Thus if T1a and T1b are both removed, B1 may be removed as well. But if T2b is
+not removed, then B2 may not be removed either.
+
+For each task with no remaining dependencies, the decision whether to remove is
+made by calling the optimization strategy's ``should_remove_task`` method. If
+this method returns True, the task is removed.
+
+The optimization process takes a ``do_not_optimize`` argument containing a list
+of tasks that cannot be removed under any circumstances. This is used to
+"force" running specific tasks.
+
+Replacing Tasks
+:::::::::::::::
+
+This phase begins with tasks having no dependencies and follows the reversed
+dependency graph from there -- left to right in the diagram above. If a task is
+not replaced, then anything depending on that task cannot be replaced.
+Replacement is generally done on the basis of some hash of the inputs to the
+task. In the diagram above, if both TC1 and I1 are replaced with existing
+tasks, then B1 is a candidate for replacement. But if TC2 has no replacement,
+then replacement of B2 will not be considered.
+
+It is possible to replace a task with nothing.  This is similar to optimzing
+away, but is useful for utility tasks like UP1. If such a task is considered
+for replacement, then all of its dependencies (here, B1) have already been
+replaced and there is no utility in running the task and no need for a
+replacement task.  It is an error for a task on which others depend to be
+replaced with nothing.
+
+The ``do_not_optimize`` set applies to task replacement, as does an additional
+``existing_tasks`` dictionary which allows the caller to supply as set of
+known, pre-existing tasks. This is used for action tasks, for example, where it
+contains the entire task-graph generated by the original decision task.
+
+Subgraph Generation
+:::::::::::::::::::
+
+The first two phases annotate each task in the existing taskgraph with their
+fate: removed, replaced, or retained. The tasks that are replaced also have a
+replacement taskId.
+
+The last phase constructs a subgraph containing the retained tasks, and
+simultaneously rewrites all dependencies to refer to taskIds instead of labels.
+To do so, it assigns a taskId to each retained task and uses the replacement
+taskId for all replaced tasks.
+
+The result is an optimized taskgraph with tasks named by taskId instead of
+label. At this phase, the edges in the task graph diverge from the
+``task.dependencies`` attributes, as the latter may contain dependencies
+outside of the taskgraph (for replacement tasks).
+
+As a side-effect, this phase also expands all ``{"task-reference": ".."}``
+objects within the task definitions.
--- a/taskcluster/docs/parameters.rst
+++ b/taskcluster/docs/parameters.rst
@@ -74,50 +74,53 @@ Tree Information
    ``cedar``.
 
 ``level``
    The `SCM level
    <https://www.mozilla.org/en-US/about/governance/policies/commit/access-policy/>`_
    associated with this tree.  This dictates the names of resources used in the
    generated tasks, and those tasks will fail if it is incorrect.
 
+Try Configuration
+-----------------
+
+``try_mode``
+    The mode in which a try push is operating.  This can be one of
+    ``"try_task_config"``, ``"try_option_syntax"``, or ``None`` meaning no try
+    input was provided.
+
+``try_options``
+    The arguments given as try syntax (as a dictionary), or ``None`` if
+    ``try_mode`` is not ``try_option_syntax``.
+
+``try_task_config``
+    The contents of the ``try_task_config.json`` file, or ``None`` if
+    ``try_mode`` is not ``try_task_config``.
+
 Target Set
 ----------
 
 The "target set" is the set of task labels which must be included in a task
 graph.  The task graph generation process will include any tasks required by
 those in the target set, recursively.  In a decision task, this set can be
 specified programmatically using one of a variety of methods (e.g., parsing try
 syntax or reading a project-specific configuration file).
 
 ``filters``
     List of filter functions (from ``taskcluster/taskgraph/filter_tasks.py``) to
     apply. This is usually defined internally, as filters are typically
     global.
 
-``target_task_labels``
-    List of task labels to select. Labels not listed will be filtered out.
-    Enabled on try only.
-
 ``target_tasks_method``
     The method to use to determine the target task set.  This is the suffix of
     one of the functions in ``taskcluster/taskgraph/target_tasks.py``.
 
 ``optimize_target_tasks``
     If true, then target tasks are eligible for optimization.
 
 ``include_nightly``
     If true, then nightly tasks are eligible for optimization.
 
 ``release_history``
    History of recent releases by platform and locale, used when generating
    partial updates for nightly releases.
    Suitable contents can be generated with ``mach release-history``,
    which will print to the console by default.
-
-Morphed Set
------------
-
-``morph_templates``
-    Dict of JSON-e templates to apply to each task, keyed by template name.
-    Values are extra context that will be available to the template under the
-    ``input.<template>`` key. Available templates live in
-    ``taskcluster/taskgraph/templates``. Enabled on try only.
--- a/taskcluster/taskgraph/decision.py
+++ b/taskcluster/taskgraph/decision.py
@@ -12,16 +12,17 @@ import re
 
 import time
 import yaml
 
 from .generator import TaskGraphGenerator
 from .create import create_tasks
 from .parameters import Parameters
 from .taskgraph import TaskGraph
+from .try_option_syntax import parse_message
 from .actions import render_actions_json
 from taskgraph.util.partials import populate_release_history
 from . import GECKO
 
 from taskgraph.util.templates import Templates
 from taskgraph.util.time import (
     json_time_from_now,
     current_json_time,
@@ -31,20 +32,16 @@ logger = logging.getLogger(__name__)
 
 ARTIFACTS_DIR = 'artifacts'
 
 # For each project, this gives a set of parameters specific to the project.
 # See `taskcluster/docs/parameters.rst` for information on parameters.
 PER_PROJECT_PARAMETERS = {
     'try': {
         'target_tasks_method': 'try_tasks',
-        # Always perform optimization.  This makes it difficult to use try
-        # pushes to run a task that would otherwise be optimized, but is a
-        # compromise to avoid essentially disabling optimization in try.
-        'optimize_target_tasks': True,
         # By default, the `try_option_syntax` `target_task_method` ignores this
         # parameter, and enables/disables nightlies depending whether
         # `--include-nightly` is specified in the commit message.
         # We're setting the `include_nightly` parameter to True here for when
         # we submit decision tasks against Try that use other
         # `target_task_method`s, like `nightly_fennec` or `mozilla_beta_tasks`,
         # which reference the `include_nightly` parameter.
         'include_nightly': True,
@@ -163,18 +160,16 @@ def get_decision_parameters(options):
     ] if n in options}
 
     # Define default filter list, as most configurations shouldn't need
     # custom filters.
     parameters['filters'] = [
         'check_servo',
         'target_tasks_method',
     ]
-    parameters['target_task_labels'] = []
-    parameters['morph_templates'] = {}
 
     # owner must be an email, but sometimes (e.g., for ffxbld) it is not, in which
     # case, fake it
     if '@' not in parameters['owner']:
         parameters['owner'] += '@noreply.mozilla.org'
 
     # use the pushdate as build_date if given, else use current time
     parameters['build_date'] = parameters['pushdate'] or int(time.time())
@@ -186,36 +181,63 @@ def get_decision_parameters(options):
     try:
         parameters.update(PER_PROJECT_PARAMETERS[project])
     except KeyError:
         logger.warning("using default project parameters; add {} to "
                        "PER_PROJECT_PARAMETERS in {} to customize behavior "
                        "for this project".format(project, __file__))
         parameters.update(PER_PROJECT_PARAMETERS['default'])
 
-    # morph_templates and target_task_labels are only used on try, so don't
-    # bother loading them elsewhere
-    task_config_file = os.path.join(GECKO, 'try_task_config.json')
-    if project == 'try' and os.path.isfile(task_config_file):
-        with open(task_config_file, 'r') as fh:
-            task_config = json.load(fh)
-        parameters['morph_templates'] = task_config.get('templates', {})
-        parameters['target_task_labels'] = task_config.get('tasks')
-
     # `target_tasks_method` has higher precedence than `project` parameters
     if options.get('target_tasks_method'):
         parameters['target_tasks_method'] = options['target_tasks_method']
 
     # If the target method is nightly, we should build partials. This means
     # knowing what has been released previously.
     # An empty release_history is fine, it just means no partials will be built
     parameters.setdefault('release_history', dict())
     if 'nightly' in parameters.get('target_tasks_method', ''):
         parameters['release_history'] = populate_release_history('Firefox', project)
 
+    # if try_task_config.json is present, load it
+    task_config_file = os.path.join(os.getcwd(), 'try_task_config.json')
+
+    # load try settings
+    parameters['try_mode'] = None
+    if os.path.isfile(task_config_file):
+        parameters['try_mode'] = 'try_task_config'
+        with open(task_config_file, 'r') as fh:
+            parameters['try_task_config'] = json.load(fh)
+    else:
+        parameters['try_task_config'] = None
+
+    if 'try:' in parameters['message']:
+        parameters['try_mode'] = 'try_option_syntax'
+        args = parse_message(parameters['message'])
+        parameters['try_options'] = args
+    else:
+        parameters['try_options'] = None
+
+    parameters['optimize_target_tasks'] = {
+        # The user has explicitly requested a set of jobs, so run them all
+        # regardless of optimization.  Their dependencies can be optimized,
+        # though.
+        'try_task_config': False,
+
+        # Always perform optimization.  This makes it difficult to use try
+        # pushes to run a task that would otherwise be optimized, but is a
+        # compromise to avoid essentially disabling optimization in try.
+        # to run tasks that would otherwise be optimized, ues try_task_config.
+        'try_option_syntax': True,
+
+        # since no try jobs have been specified, the standard target task will
+        # be applied, and tasks should be optimized out of that.
+        None: True,
+    }[parameters['try_mode']]
+
     return Parameters(parameters)
 
 
 def write_artifact(filename, data):
     logger.info('writing artifact file `{}`'.format(filename))
     if not os.path.isdir(ARTIFACTS_DIR):
         os.mkdir(ARTIFACTS_DIR)
     path = os.path.join(ARTIFACTS_DIR, filename)
--- a/taskcluster/taskgraph/generator.py
+++ b/taskcluster/taskgraph/generator.py
@@ -37,22 +37,16 @@ class Kind(object):
         except KeyError:
             raise KeyError("{!r} does not define `loader`".format(self.path))
         return find_object(loader)
 
     def load_tasks(self, parameters, loaded_tasks):
         loader = self._get_loader()
         config = copy.deepcopy(self.config)
 
-        if 'parse-commit' in self.config:
-            parse_commit = find_object(config['parse-commit'])
-            config['args'] = parse_commit(parameters['message'])
-        else:
-            config['args'] = None
-
         kind_dependencies = config.get('kind-dependencies', [])
         kind_dependencies_tasks = [task for task in loaded_tasks
                                    if task.kind in kind_dependencies]
 
         inputs = loader(self.name, self.path, config, parameters, loaded_tasks)
 
         transforms = TransformSequence()
         for xform_path in config['transforms']:
@@ -61,17 +55,17 @@ class Kind(object):
 
         # perform the transformations on the loaded inputs
         trans_config = TransformConfig(self.name, self.path, config, parameters,
                                        kind_dependencies_tasks)
         tasks = [Task(self.name,
                       label=task_dict['label'],
                       attributes=task_dict['attributes'],
                       task=task_dict['task'],
-                      optimizations=task_dict.get('optimizations'),
+                      optimization=task_dict.get('optimization'),
                       dependencies=task_dict.get('dependencies'))
                  for task_dict in transforms(trans_config, inputs)]
         return tasks
 
 
 class TaskGraphGenerator(object):
     """
     The central controller for taskgraph.  This handles all phases of graph
--- a/taskcluster/taskgraph/graph.py
+++ b/taskcluster/taskgraph/graph.py
@@ -73,39 +73,49 @@ class Graph(object):
             add_edges = set((left, right, name)
                             for (left, right, name) in self.edges
                             if (right if reverse else left) in nodes)
             add_nodes = set((left if reverse else right) for (left, right, _) in add_edges)
             new_nodes = nodes | add_nodes
             new_edges = edges | add_edges
         return Graph(new_nodes, new_edges)
 
-    def visit_postorder(self):
-        """
-        Generate a sequence of nodes in postorder, such that every node is
-        visited *after* any nodes it links to.
-
-        Behavior is undefined (read: it will hang) if the graph contains a
-        cycle.
-        """
+    def _visit(self, reverse):
         queue = collections.deque(sorted(self.nodes))
-        links_by_node = self.links_dict()
+        links_by_node = self.reverse_links_dict() if reverse else self.links_dict()
         seen = set()
         while queue:
             node = queue.popleft()
             if node in seen:
                 continue
             links = links_by_node[node]
             if all((n in seen) for n in links):
                 seen.add(node)
                 yield node
             else:
                 queue.extend(n for n in links if n not in seen)
                 queue.append(node)
 
+    def visit_postorder(self):
+        """
+        Generate a sequence of nodes in postorder, such that every node is
+        visited *after* any nodes it links to.
+
+        Behavior is undefined (read: it will hang) if the graph contains a
+        cycle.
+        """
+        return self._visit(False)
+
+    def visit_preorder(self):
+        """
+        Like visit_postorder, but in reverse: evrey node is visited *before*
+        any nodes it links to.
+        """
+        return self._visit(True)
+
     def links_dict(self):
         """
         Return a dictionary mapping each node to a set of the nodes it links to
         (omitting edge names)
         """
         links = collections.defaultdict(set)
         for left, right, _ in self.edges:
             links[left].add(right)
--- a/taskcluster/taskgraph/morph.py
+++ b/taskcluster/taskgraph/morph.py
@@ -278,13 +278,15 @@ class apply_jsone_templates(object):
         return taskgraph, label_to_taskid
 
 
 def morph(taskgraph, label_to_taskid, parameters):
     """Apply all morphs"""
     morphs = [
         add_index_tasks,
         add_s3_uploader_task,
-        apply_jsone_templates(parameters.get('morph_templates')),
     ]
+    if parameters['try_mode'] == 'try_task_config':
+        morphs.append(apply_jsone_templates(parameters['try_task_config'].get('templates')))
+
     for m in morphs:
         taskgraph, label_to_taskid = m(taskgraph, label_to_taskid)
     return taskgraph, label_to_taskid
--- a/taskcluster/taskgraph/optimize.py
+++ b/taskcluster/taskgraph/optimize.py
@@ -1,219 +1,382 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+The objective of optimization is to remove as many tasks from the graph as
+possible, as efficiently as possible, thereby delivering useful results as
+quickly as possible.  For example, ideally if only a test script is modified in
+a push, then the resulting graph contains only the corresponding test suite
+task.
+
+See ``taskcluster/docs/optimization.rst`` for more information.
+"""
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import logging
 import os
-import requests
+from collections import defaultdict
 
 from .graph import Graph
 from . import files_changed
 from .taskgraph import TaskGraph
 from .util.seta import is_low_value_task
 from .util.taskcluster import find_task_id
 from .util.parameterization import resolve_task_references
+from mozbuild.util import memoize
 from slugid import nice as slugid
+from mozbuild.base import MozbuildObject
 
 logger = logging.getLogger(__name__)
 
-_optimizations = {}
+TOPSRCDIR = os.path.abspath(os.path.join(__file__, '../../../'))
 
 
-def optimize_task_graph(target_task_graph, params, do_not_optimize, existing_tasks=None):
+def optimize_task_graph(target_task_graph, params, do_not_optimize,
+                        existing_tasks=None, strategies=None):
+    """
+    Perform task optimization, returning a taskgraph and a map from label to
+    assigned taskId, including replacement tasks.
     """
-    Perform task optimization, without optimizing tasks named in
-    do_not_optimize.
-    """
-    named_links_dict = target_task_graph.graph.named_links_dict()
     label_to_taskid = {}
+    if not existing_tasks:
+        existing_tasks = {}
+
+    # instantiate the strategies for this optimization process
+    if not strategies:
+        strategies = _make_default_strategies()
+
+    optimizations = _get_optimizations(target_task_graph, strategies)
 
-    # This proceeds in two phases.  First, mark all optimized tasks (those
-    # which will be removed from the graph) as such, including a replacement
-    # taskId where applicable.  Second, generate a new task graph containing
-    # only the non-optimized tasks, with all task labels resolved to taskIds
-    # and with task['dependencies'] populated.
-    annotate_task_graph(target_task_graph=target_task_graph,
-                        params=params,
-                        do_not_optimize=do_not_optimize,
-                        named_links_dict=named_links_dict,
-                        label_to_taskid=label_to_taskid,
-                        existing_tasks=existing_tasks)
-    return get_subgraph(target_task_graph, named_links_dict, label_to_taskid), label_to_taskid
+    removed_tasks = remove_tasks(
+        target_task_graph=target_task_graph,
+        optimizations=optimizations,
+        params=params,
+        do_not_optimize=do_not_optimize)
+
+    replaced_tasks = replace_tasks(
+        target_task_graph=target_task_graph,
+        optimizations=optimizations,
+        params=params,
+        do_not_optimize=do_not_optimize,
+        label_to_taskid=label_to_taskid,
+        existing_tasks=existing_tasks,
+        removed_tasks=removed_tasks)
+
+    return get_subgraph(
+            target_task_graph, removed_tasks, replaced_tasks,
+            label_to_taskid), label_to_taskid
 
 
-def optimize_task(task, params):
-    """
-    Optimize a single task by running its optimizations in order until one
-    succeeds.
-    """
-    for opt in task.optimizations:
-        opt_type, args = opt[0], opt[1:]
-        opt_fn = _optimizations[opt_type]
-        opt_result = opt_fn(task, params, *args)
-        if opt_result:
-            return opt_result
+def _make_default_strategies():
+    return {
+        'never': OptimizationStrategy(),  # "never" is the default behavior
+        'index-search': IndexSearch(),
+        'seta': SETA(),
+        'skip-unless-changed': SkipUnlessChanged(),
+        'skip-unless-schedules': SkipUnlessSchedules(),
+        'skip-unless-schedules-or-seta': Either(SkipUnlessSchedules(), SETA()),
+        'only-if-dependencies-run': OnlyIfDependenciesRun(),
+    }
+
 
-    return False
+def _get_optimizations(target_task_graph, strategies):
+    def optimizations(label):
+        task = target_task_graph.tasks[label]
+        if task.optimization:
+            opt_by, arg = task.optimization.items()[0]
+            return (opt_by, strategies[opt_by], arg)
+        else:
+            return ('never', strategies['never'], None)
+    return optimizations
+
+
+def _log_optimization(verb, opt_counts):
+    if opt_counts:
+        logger.info(
+            '{} '.format(verb.title()) +
+            ', '.join(
+                '{} tasks by {}'.format(c, b)
+                for b, c in sorted(opt_counts.iteritems())) +
+            ' during optimization.')
+    else:
+        logger.info('No tasks {} during optimization'.format(verb))
 
 
-def annotate_task_graph(target_task_graph, params, do_not_optimize,
-                        named_links_dict, label_to_taskid, existing_tasks):
+def remove_tasks(target_task_graph, params, optimizations, do_not_optimize):
+    """
+    Implement the "Removing Tasks" phase, returning a set of task labels of all removed tasks.
     """
-    Annotate each task in the graph with .optimized (boolean) and .task_id
-    (possibly None), following the rules for optimization and calling the task
-    kinds' `optimize_task` method.
-
-    As a side effect, label_to_taskid is updated with labels for all optimized
-    tasks that are replaced with existing tasks.
-    """
+    opt_counts = defaultdict(int)
+    removed = set()
+    reverse_links_dict = target_task_graph.graph.reverse_links_dict()
 
-    # set .optimized for all tasks, and .task_id for optimized tasks
-    # with replacements
-    for label in target_task_graph.graph.visit_postorder():
-        task = target_task_graph.tasks[label]
-        named_task_dependencies = named_links_dict.get(label, {})
-
-        # check whether any dependencies have been optimized away
-        dependencies = [target_task_graph.tasks[l] for l in named_task_dependencies.itervalues()]
-        for t in dependencies:
-            if t.optimized and not t.task_id:
-                raise Exception(
-                    "task {} was optimized away, but {} depends on it".format(
-                        t.label, label))
-
-        # if this task is blacklisted, don't even consider optimizing
-        replacement_task_id = None
+    for label in target_task_graph.graph.visit_preorder():
+        # if we're not allowed to optimize, that's easy..
         if label in do_not_optimize:
-            optimized = False
-        # Let's check whether this task has been created before
-        elif existing_tasks is not None and label in existing_tasks:
-            optimized = True
-            replacement_task_id = existing_tasks[label]
-        # otherwise, examine the task itself (which may be an expensive operation)
-        else:
-            opt_result = optimize_task(task, params)
+            continue
 
-            # use opt_result to determine values for optimized, replacement_task_id
-            optimized = bool(opt_result)
-            replacement_task_id = opt_result if opt_result and opt_result is not True else None
+        # if there are remaining tasks depending on this one, do not remove..
+        if any(l not in removed for l in reverse_links_dict[label]):
+            continue
 
-        task.optimized = optimized
-        task.task_id = replacement_task_id
-        if replacement_task_id:
-            label_to_taskid[label] = replacement_task_id
+        # call the optimization strategy
+        task = target_task_graph.tasks[label]
+        opt_by, opt, arg = optimizations(label)
+        if opt.should_remove_task(task, params, arg):
+            removed.add(label)
+            opt_counts[opt_by] += 1
+            continue
 
-        if optimized:
-            if replacement_task_id:
-                logger.debug("optimizing `{}`, replacing with task `{}`"
-                             .format(label, replacement_task_id))
-            else:
-                logger.debug("optimizing `{}` away".format(label))
-                # note: any dependent tasks will fail when they see this
-        else:
-            if replacement_task_id:
-                raise Exception("{}: optimize_task returned False with a taskId".format(label))
+    _log_optimization('removed', opt_counts)
+    return removed
 
 
-def get_subgraph(annotated_task_graph, named_links_dict, label_to_taskid):
+def replace_tasks(target_task_graph, params, optimizations, do_not_optimize,
+                  label_to_taskid, removed_tasks, existing_tasks):
+    """
+    Implement the "Replacing Tasks" phase, returning a set of task labels of
+    all replaced tasks. The replacement taskIds are added to label_to_taskid as
+    a side-effect.
     """
-    Return the subgraph of annotated_task_graph consisting only of
+    opt_counts = defaultdict(int)
+    replaced = set()
+    links_dict = target_task_graph.graph.links_dict()
+
+    for label in target_task_graph.graph.visit_postorder():
+        # if we're not allowed to optimize, that's easy..
+        if label in do_not_optimize:
+            continue
+
+        # if this task depends on un-replaced, un-removed tasks, do not replace
+        if any(l not in replaced and l not in removed_tasks for l in links_dict[label]):
+            continue
+
+        # if the task already exists, that's an easy replacement
+        repl = existing_tasks.get(label)
+        if repl:
+            label_to_taskid[label] = repl
+            replaced.add(label)
+            opt_counts['existing_tasks'] += 1
+            continue
+
+        # call the optimization strategy
+        task = target_task_graph.tasks[label]
+        opt_by, opt, arg = optimizations(label)
+        repl = opt.should_replace_task(task, params, arg)
+        if repl:
+            if repl is True:
+                # True means remove this task; get_subgraph will catch any
+                # problems with removed tasks being depended on
+                removed_tasks.add(label)
+            else:
+                label_to_taskid[label] = repl
+                replaced.add(label)
+            opt_counts[opt_by] += 1
+            continue
+
+    _log_optimization('replaced', opt_counts)
+    return replaced
+
+
+def get_subgraph(target_task_graph, removed_tasks, replaced_tasks, label_to_taskid):
+    """
+    Return the subgraph of target_task_graph consisting only of
     non-optimized tasks and edges between them.
 
     To avoid losing track of taskIds for tasks optimized away, this method
     simultaneously substitutes real taskIds for task labels in the graph, and
     populates each task definition's `dependencies` key with the appropriate
     taskIds.  Task references are resolved in the process.
     """
 
+    # check for any dependency edges from included to removed tasks
+    bad_edges = [(l, r, n) for l, r, n in target_task_graph.graph.edges
+                 if l not in removed_tasks and r in removed_tasks]
+    if bad_edges:
+        probs = ', '.join('{} depends on {} as {} but it has been removed'.format(l, r, n)
+                          for l, r, n in bad_edges)
+        raise Exception("Optimization error: " + probs)
+
+    # fill in label_to_taskid for anything not removed or replaced
+    assert replaced_tasks <= set(label_to_taskid)
+    for label in sorted(target_task_graph.graph.nodes - removed_tasks - set(label_to_taskid)):
+        label_to_taskid[label] = slugid()
+
     # resolve labels to taskIds and populate task['dependencies']
     tasks_by_taskid = {}
-    for label in annotated_task_graph.graph.visit_postorder():
-        task = annotated_task_graph.tasks[label]
-        if task.optimized:
+    named_links_dict = target_task_graph.graph.named_links_dict()
+    omit = removed_tasks | replaced_tasks
+    for label, task in target_task_graph.tasks.iteritems():
+        if label in omit:
             continue
-        task.task_id = label_to_taskid[label] = slugid()
+        task.task_id = label_to_taskid[label]
         named_task_dependencies = {
-                name: label_to_taskid[label]
-                for name, label in named_links_dict.get(label, {}).iteritems()}
+            name: label_to_taskid[label]
+            for name, label in named_links_dict.get(label, {}).iteritems()}
         task.task = resolve_task_references(task.label, task.task, named_task_dependencies)
-        task.task.setdefault('dependencies', []).extend(named_task_dependencies.itervalues())
+        deps = task.task.setdefault('dependencies', [])
+        deps.extend(sorted(named_task_dependencies.itervalues()))
         tasks_by_taskid[task.task_id] = task
 
     # resolve edges to taskIds
     edges_by_taskid = (
         (label_to_taskid.get(left), label_to_taskid.get(right), name)
-        for (left, right, name) in annotated_task_graph.graph.edges
-        )
-    # ..and drop edges that are no longer in the task graph
+        for (left, right, name) in target_task_graph.graph.edges
+    )
+    # ..and drop edges that are no longer entirely in the task graph
+    #   (note that this omits edges to replaced tasks, but they are still in task.dependnecies)
     edges_by_taskid = set(
         (left, right, name)
         for (left, right, name) in edges_by_taskid
         if left in tasks_by_taskid and right in tasks_by_taskid
-        )
+    )
 
     return TaskGraph(
         tasks_by_taskid,
         Graph(set(tasks_by_taskid), edges_by_taskid))
 
 
-def optimization(name):
-    def wrap(func):
-        if name in _optimizations:
-            raise Exception("multiple optimizations with name {}".format(name))
-        _optimizations[name] = func
-        return func
-    return wrap
-
-
-@optimization('index-search')
-def opt_index_search(task, params, index_path):
-    try:
-        task_id = find_task_id(
-            index_path,
-            use_proxy=bool(os.environ.get('TASK_ID')))
-
-        return task_id or True
-    except requests.exceptions.HTTPError:
-        pass
-
-    return False
+class OptimizationStrategy(object):
+    def should_remove_task(self, task, params, arg):
+        """Determine whether to optimize this task by removing it.  Returns
+        True to remove."""
+        return False
 
-
-@optimization('seta')
-def opt_seta(task, params):
-    bbb_task = False
-
-    # for bbb tasks we need to send in the buildbot buildername
-    if task.task.get('provisionerId', '') == 'buildbot-bridge':
-        label = task.task.get('payload').get('buildername')
-        bbb_task = True
-    else:
-        label = task.label
-
-    # we would like to return 'False, None' while it's high_value_task
-    # and we wouldn't optimize it. Otherwise, it will return 'True, None'
-    if is_low_value_task(label,
-                         params.get('project'),
-                         params.get('pushlog_id'),
-                         params.get('pushdate'),
-                         bbb_task):
-        # Always optimize away low-value tasks
-        return True
-    else:
+    def should_replace_task(self, task, params, arg):
+        """Determine whether to optimize this task by replacing it.  Returns a
+        taskId to replace this task, True to replace with nothing, or False to
+        keep the task."""
         return False
 
 
-@optimization('skip-unless-changed')
-def opt_files_changed(task, params, file_patterns):
-    # pushlog_id == -1 - this is the case when run from a cron.yml job
-    if params.get('pushlog_id') == -1:
+class Either(OptimizationStrategy):
+    """Given one or more optimization strategies, remove a task if any of them
+    says to, and replace with a task if any finds a replacement (preferring the
+    earliest).  By default, each substrategy gets the same arg, but split_args
+    can return a list of args for each strategy, if desired."""
+    def __init__(self, *substrategies, **kwargs):
+        self.substrategies = substrategies
+        self.split_args = kwargs.pop('split_args', None)
+        if not self.split_args:
+            self.split_args = lambda arg: [arg] * len(substrategies)
+        if kwargs:
+            raise TypeError("unexpected keyword args")
+
+    def _for_substrategies(self, arg, fn):
+        for sub, arg in zip(self.substrategies, self.split_args(arg)):
+            rv = fn(sub, arg)
+            if rv:
+                return rv
+        return False
+
+    def should_remove_task(self, task, params, arg):
+        return self._for_substrategies(
+            arg,
+            lambda sub, arg: sub.should_remove_task(task, params, arg))
+
+    def should_replace_task(self, task, params, arg):
+        return self._for_substrategies(
+            arg,
+            lambda sub, arg: sub.should_replace_task(task, params, arg))
+
+
+class OnlyIfDependenciesRun(OptimizationStrategy):
+    """Run this taks only if its dependencies run."""
+
+    # This takes advantage of the behavior of the second phase of optimization:
+    # a task can only be replaced if it has no un-optimized dependencies. So if
+    # should_replace_task is called, then a task has no un-optimized
+    # dependencies and can be removed (indicated by returning True)
+
+    def should_replace_task(self, task, params, arg):
+        return True
+
+
+class IndexSearch(OptimizationStrategy):
+    def should_remove_task(self, task, params, index_paths):
+        "If this task has no dependencies, don't run it.."
         return True
 
-    changed = files_changed.check(params, file_patterns)
-    if not changed:
-        logger.debug('no files found matching a pattern in `skip-unless-changed` for ' +
-                     task.label)
+    def should_replace_task(self, task, params, index_paths):
+        "Look for a task with one of the given index paths"
+        for index_path in index_paths:
+            try:
+                task_id = find_task_id(
+                    index_path,
+                    use_proxy=bool(os.environ.get('TASK_ID')))
+                return task_id
+            except KeyError:
+                # 404 will end up here and go on to the next index path
+                pass
+
+        return False
+
+
+class SETA(OptimizationStrategy):
+    def should_remove_task(self, task, params, _):
+        bbb_task = False
+
+        # for bbb tasks we need to send in the buildbot buildername
+        if task.task.get('provisionerId', '') == 'buildbot-bridge':
+            label = task.task.get('payload').get('buildername')
+            bbb_task = True
+        else:
+            label = task.label
+
+        # we would like to return 'False, None' while it's high_value_task
+        # and we wouldn't optimize it. Otherwise, it will return 'True, None'
+        if is_low_value_task(label,
+                             params.get('project'),
+                             params.get('pushlog_id'),
+                             params.get('pushdate'),
+                             bbb_task):
+            # Always optimize away low-value tasks
+            return True
+        else:
+            return False
+
+
+class SkipUnlessChanged(OptimizationStrategy):
+    def should_remove_task(self, task, params, file_patterns):
+        # pushlog_id == -1 - this is the case when run from a cron.yml job
+        if params.get('pushlog_id') == -1:
+            return False
+
+        changed = files_changed.check(params, file_patterns)
+        if not changed:
+            logger.debug('no files found matching a pattern in `skip-unless-changed` for ' +
+                         task.label)
+            return True
+        return False
+
+
+class SkipUnlessSchedules(OptimizationStrategy):
+
+    @memoize
+    def scheduled_by_push(self, repository, revision):
+        changed_files = files_changed.get_changed_files(repository, revision)
+
+        mbo = MozbuildObject.from_environment()
+        # the decision task has a sparse checkout, so, mozbuild_reader will use
+        # a MercurialRevisionFinder with revision '.', which should be the same
+        # as `revision`; in other circumstances, it will use a default reader
+        rdr = mbo.mozbuild_reader(config_mode='empty')
+
+        components = set()
+        for p, m in rdr.files_info(changed_files).items():
+            components |= set(m['SCHEDULES'].components)
+
+        return components
+
+    def should_remove_task(self, task, params, conditions):
+        if params.get('pushlog_id') == -1:
+            return False
+
+        scheduled = self.scheduled_by_push(params['head_repository'], params['head_rev'])
+        conditions = set(conditions)
+        # if *any* of the condition components are scheduled, do not optimize
+        if conditions & scheduled:
+            return False
+
         return True
-    return False
--- a/taskcluster/taskgraph/parameters.py
+++ b/taskcluster/taskgraph/parameters.py
@@ -16,41 +16,37 @@ PARAMETER_NAMES = set([
     'build_date',
     'filters',
     'head_ref',
     'head_repository',
     'head_rev',
     'include_nightly',
     'level',
     'message',
-    'morph_templates',
     'moz_build_date',
     'optimize_target_tasks',
     'owner',
     'project',
     'pushdate',
     'pushlog_id',
     'release_history',
-    'target_task_labels',
     'target_tasks_method',
-])
-
-TRY_ONLY_PARAMETERS = set([
-    'morph_templates',
-    'target_task_labels',
+    'try_mode',
+    'try_options',
+    'try_task_config',
 ])
 
 
 class Parameters(ReadOnlyDict):
     """An immutable dictionary with nicer KeyError messages on failure"""
     def check(self):
         names = set(self)
         msg = []
 
-        missing = PARAMETER_NAMES - TRY_ONLY_PARAMETERS - names
+        missing = PARAMETER_NAMES - names
         if missing:
             msg.append("missing parameters: " + ", ".join(missing))
 
         extra = names - PARAMETER_NAMES
         if extra:
             msg.append("extra parameters: " + ", ".join(extra))
 
         if msg:
--- a/taskcluster/taskgraph/target_tasks.py
+++ b/taskcluster/taskgraph/target_tasks.py
@@ -1,22 +1,19 @@
 # -*- coding: utf-8 -*-
 
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
-import os
-
 from taskgraph import try_option_syntax
 from taskgraph.util.attributes import match_run_on_projects
 
-here = os.path.abspath(os.path.dirname(__file__))
 _target_task_methods = {}
 
 
 def _target_task(name):
     def wrap(func):
         _target_task_methods[name] = func
         return func
     return wrap
@@ -48,27 +45,24 @@ def filter_upload_symbols(task, paramete
 def standard_filter(task, parameters):
     return all(
         filter_func(task, parameters) for filter_func in
         (filter_on_nightly, filter_for_project, filter_upload_symbols)
     )
 
 
 def _try_task_config(full_task_graph, parameters):
-    if not parameters.get('target_task_labels'):
-        return []
-
-    return [t.label for t in full_task_graph.tasks.itervalues()
-            if t.label in parameters['target_task_labels']]
+    requested_tasks = parameters['try_task_config']['tasks']
+    return list(set(requested_tasks) & full_task_graph.graph.nodes)
 
 
 def _try_option_syntax(full_task_graph, parameters):
     """Generate a list of target tasks based on try syntax in
     parameters['message'] and, for context, the full task graph."""
-    options = try_option_syntax.TryOptionSyntax(parameters['message'], full_task_graph)
+    options = try_option_syntax.TryOptionSyntax(parameters, full_task_graph)
     target_tasks_labels = [t.label for t in full_task_graph.tasks.itervalues()
                            if options.task_matches(t)]
 
     attributes = {
         k: getattr(options, k) for k in [
             'env',
             'no_retry',
             'tag',
@@ -105,29 +99,33 @@ def _try_option_syntax(full_task_graph, 
                 routes.append("notify.email.{}.on-failed".format(owner))
                 routes.append("notify.email.{}.on-exception".format(owner))
 
     return target_tasks_labels
 
 
 @_target_task('try_tasks')
 def target_tasks_try(full_task_graph, parameters):
-    labels = _try_task_config(full_task_graph, parameters)
-
-    if 'try:' in parameters['message'] or not labels:
-        labels.extend(_try_option_syntax(full_task_graph, parameters))
-
-    return labels
+    try_mode = parameters['try_mode']
+    if try_mode == 'try_task_config':
+        return _try_task_config(full_task_graph, parameters)
+    elif try_mode == 'try_option_syntax':
+        return _try_option_syntax(full_task_graph, parameters)
+    else:
+        # With no try mode, we would like to schedule everything (following
+        # run_on_projects) and let optimization trim it down.  But optimization
+        # isn't yet up to the task, so instead we use try_option_syntax with
+        # an empty message (which basically just schedules `-j`objs)
+        return _try_option_syntax(full_task_graph, parameters)
 
 
 @_target_task('default')
 def target_tasks_default(full_task_graph, parameters):
     """Target the tasks which have indicated they should be run on this project
     via the `run_on_projects` attributes."""
-
     return [l for l, t in full_task_graph.tasks.iteritems()
             if standard_filter(t, parameters)]
 
 
 @_target_task('ash_tasks')
 def target_tasks_ash(full_task_graph, parameters):
     """Target tasks that only run on the ash branch."""
     def filter(task):
--- a/taskcluster/taskgraph/task.py
+++ b/taskcluster/taskgraph/task.py
@@ -8,64 +8,62 @@ from __future__ import absolute_import, 
 class Task(object):
     """
     Representation of a task in a TaskGraph.  Each Task has, at creation:
 
     - kind: the name of the task kind
     - label; the label for this task
     - attributes: a dictionary of attributes for this task (used for filtering)
     - task: the task definition (JSON-able dictionary)
-    - optimizations: optimizations to apply to the task (see taskgraph.optimize)
+    - optimization: optimization to apply to the task (see taskgraph.optimize)
     - dependencies: tasks this one depends on, in the form {name: label}, for example
       {'build': 'build-linux64/opt', 'docker-image': 'build-docker-image-desktop-test'}
 
     And later, as the task-graph processing proceeds:
 
     - task_id -- TaskCluster taskId under which this task will be created
-    - optimized -- true if this task need not be performed
 
     This class is just a convenience wraper for the data type and managing
     display, comparison, serialization, etc. It has no functionality of its own.
     """
     def __init__(self, kind, label, attributes, task,
-                 optimizations=None, dependencies=None):
+                 optimization=None, dependencies=None):
         self.kind = kind
         self.label = label
         self.attributes = attributes
         self.task = task
 
         self.task_id = None
-        self.optimized = False
 
         self.attributes['kind'] = kind
 
-        self.optimizations = optimizations or []
+        self.optimization = optimization
         self.dependencies = dependencies or {}
 
     def __eq__(self, other):
         return self.kind == other.kind and \
             self.label == other.label and \
             self.attributes == other.attributes and \
             self.task == other.task and \
             self.task_id == other.task_id and \
-            self.optimizations == other.optimizations and \
+            self.optimization == other.optimization and \
             self.dependencies == other.dependencies
 
     def __repr__(self):
         return ('Task({kind!r}, {label!r}, {attributes!r}, {task!r}, '
-                'optimizations={optimizations!r}, '
+                'optimization={optimization!r}, '
                 'dependencies={dependencies!r})'.format(**self.__dict__))
 
     def to_json(self):
         rv = {
             'kind': self.kind,
             'label': self.label,
             'attributes': self.attributes,
             'dependencies': self.dependencies,
-            'optimizations': self.optimizations,
+            'optimization': self.optimization,
             'task': self.task,
         }
         if self.task_id:
             rv['task_id'] = self.task_id
         return rv
 
     @classmethod
     def from_json(cls, task_dict):
@@ -74,13 +72,13 @@ class Task(object):
         the original Task object.  This is used to "resume" the task-graph
         generation process, for example in Action tasks.
         """
         rv = cls(
             kind=task_dict['kind'],
             label=task_dict['label'],
             attributes=task_dict['attributes'],
             task=task_dict['task'],
-            optimizations=task_dict['optimizations'],
+            optimization=task_dict['optimization'],
             dependencies=task_dict.get('dependencies'))
         if 'task_id' in task_dict:
             rv.task_id = task_dict['task_id']
         return rv
--- a/taskcluster/taskgraph/test/test_decision.py
+++ b/taskcluster/taskgraph/test/test_decision.py
@@ -7,17 +7,17 @@ from __future__ import absolute_import, 
 import os
 import json
 import yaml
 import shutil
 import unittest
 import tempfile
 
 from taskgraph import decision
-from mozunit import main
+from mozunit import main, MockedOpen
 
 
 class TestDecision(unittest.TestCase):
 
     def test_write_artifact_json(self):
         data = [{'some': 'data'}]
         tmpdir = tempfile.mkdtemp()
         try:
@@ -39,10 +39,58 @@ class TestDecision(unittest.TestCase):
             with open(os.path.join(decision.ARTIFACTS_DIR, "artifact.yml")) as f:
                 self.assertEqual(yaml.safe_load(f), data)
         finally:
             if os.path.exists(tmpdir):
                 shutil.rmtree(tmpdir)
             decision.ARTIFACTS_DIR = 'artifacts'
 
 
+class TestGetDecisionParameters(unittest.TestCase):
+
+    def setUp(self):
+        self.options = {
+            'base_repository': 'https://hg.mozilla.org/mozilla-unified',
+            'head_repository': 'https://hg.mozilla.org/mozilla-central',
+            'head_rev': 'abcd',
+            'head_ref': 'ef01',
+            'message': '',
+            'project': 'mozilla-central',
+            'pushlog_id': 143,
+            'pushdate': 1503691511,
+            'owner': 'nobody@mozilla.com',
+            'level': 3,
+        }
+
+    def test_simple_options(self):
+        params = decision.get_decision_parameters(self.options)
+        self.assertEqual(params['pushlog_id'], 143)
+        self.assertEqual(params['build_date'], 1503691511)
+        self.assertEqual(params['moz_build_date'], '20170825200511')
+        self.assertEqual(params['try_mode'], None)
+        self.assertEqual(params['try_options'], None)
+        self.assertEqual(params['try_task_config'], None)
+
+    def test_no_email_owner(self):
+        self.options['owner'] = 'ffxbld'
+        params = decision.get_decision_parameters(self.options)
+        self.assertEqual(params['owner'], 'ffxbld@noreply.mozilla.org')
+
+    def test_try_options(self):
+        self.options['message'] = 'try: -b do -t all'
+        params = decision.get_decision_parameters(self.options)
+        self.assertEqual(params['try_mode'], 'try_option_syntax')
+        self.assertEqual(params['try_options']['build_types'], 'do')
+        self.assertEqual(params['try_options']['unittests'], 'all')
+        self.assertEqual(params['try_task_config'], None)
+
+    def test_try_task_config(self):
+        ttc = {'tasks': ['a', 'b'], 'templates': {}}
+        ttc_file = os.path.join(os.getcwd(), 'try_task_config.json')
+        with MockedOpen({ttc_file: json.dumps(ttc)}):
+            params = decision.get_decision_parameters(self.options)
+            self.assertEqual(params['try_mode'], 'try_task_config')
+            self.assertEqual(params['try_options'], None)
+            self.assertEqual(params['try_task_config'], ttc)
+
+
 if __name__ == '__main__':
     main()
--- a/taskcluster/taskgraph/test/test_generator.py
+++ b/taskcluster/taskgraph/test/test_generator.py
@@ -54,16 +54,17 @@ class TestGenerator(unittest.TestCase):
         def target_tasks_method(full_task_graph, parameters):
             return self.target_tasks
 
         target_tasks_mod._target_task_methods['test_method'] = target_tasks_method
 
         parameters = {
             '_kinds': kinds,
             'target_tasks_method': 'test_method',
+            'try_mode': None,
         }
 
         return WithFakeKind('/root', parameters)
 
     def test_kind_ordering(self):
         "When task kinds depend on each other, they are loaded in postorder"
         self.tgg = self.maketgg(kinds=[
             ('_fake3', ['_fake2', '_fake1']),
--- a/taskcluster/taskgraph/test/test_graph.py
+++ b/taskcluster/taskgraph/test/test_graph.py
@@ -124,16 +124,41 @@ class TestGraph(unittest.TestCase):
     def test_visit_postorder_multi_edges(self):
         "postorder visit of a graph with duplicate edges satisfies invariant"
         self.assert_postorder(self.multi_edges.visit_postorder(), self.multi_edges.nodes)
 
     def test_visit_postorder_disjoint(self):
         "postorder visit of a disjoint graph satisfies invariant"
         self.assert_postorder(self.disjoint.visit_postorder(), self.disjoint.nodes)
 
+    def assert_preorder(self, seq, all_nodes):
+        seen = set()
+        for e in seq:
+            for l, r, n in self.tree.edges:
+                if r == e:
+                    self.failUnless(l in seen)
+            seen.add(e)
+        self.assertEqual(seen, all_nodes)
+
+    def test_visit_preorder_tree(self):
+        "preorder visit of a tree satisfies invariant"
+        self.assert_preorder(self.tree.visit_preorder(), self.tree.nodes)
+
+    def test_visit_preorder_diamonds(self):
+        "preorder visit of a graph full of diamonds satisfies invariant"
+        self.assert_preorder(self.diamonds.visit_preorder(), self.diamonds.nodes)
+
+    def test_visit_preorder_multi_edges(self):
+        "preorder visit of a graph with duplicate edges satisfies invariant"
+        self.assert_preorder(self.multi_edges.visit_preorder(), self.multi_edges.nodes)
+
+    def test_visit_preorder_disjoint(self):
+        "preorder visit of a disjoint graph satisfies invariant"
+        self.assert_preorder(self.disjoint.visit_preorder(), self.disjoint.nodes)
+
     def test_links_dict(self):
         "link dict for a graph with multiple edges is correct"
         self.assertEqual(self.multi_edges.links_dict(), {
             '2': set(['1']),
             '3': set(['1', '2']),
             '4': set(['3']),
         })
 
--- a/taskcluster/taskgraph/test/test_optimize.py
+++ b/taskcluster/taskgraph/test/test_optimize.py
@@ -1,249 +1,231 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import unittest
 
-from taskgraph.optimize import optimize_task_graph, resolve_task_references, optimization
-from taskgraph.optimize import annotate_task_graph, get_subgraph
+from taskgraph import optimize
 from taskgraph.taskgraph import TaskGraph
 from taskgraph import graph
 from taskgraph.task import Task
 from mozunit import main
+from slugid import nice as slugid
 
 
-class TestResolveTaskReferences(unittest.TestCase):
-
-    def do(self, input, output):
-        taskid_for_edge_name = {'edge%d' % n: 'tid%d' % n for n in range(1, 4)}
-        self.assertEqual(resolve_task_references('subject', input, taskid_for_edge_name), output)
+class Remove(optimize.OptimizationStrategy):
 
-    def test_in_list(self):
-        "resolve_task_references resolves task references in a list"
-        self.do({'in-a-list': ['stuff', {'task-reference': '<edge1>'}]},
-                {'in-a-list': ['stuff', 'tid1']})
-
-    def test_in_dict(self):
-        "resolve_task_references resolves task references in a dict"
-        self.do({'in-a-dict': {'stuff': {'task-reference': '<edge2>'}}},
-                {'in-a-dict': {'stuff': 'tid2'}})
+    def should_remove_task(self, task, params, arg):
+        return True
 
-    def test_multiple(self):
-        "resolve_task_references resolves multiple references in the same string"
-        self.do({'multiple': {'task-reference': 'stuff <edge1> stuff <edge2> after'}},
-                {'multiple': 'stuff tid1 stuff tid2 after'})
 
-    def test_embedded(self):
-        "resolve_task_references resolves ebmedded references"
-        self.do({'embedded': {'task-reference': 'stuff before <edge3> stuff after'}},
-                {'embedded': 'stuff before tid3 stuff after'})
+class Replace(optimize.OptimizationStrategy):
 
-    def test_escaping(self):
-        "resolve_task_references resolves escapes in task references"
-        self.do({'escape': {'task-reference': '<<><edge3>>'}},
-                {'escape': '<tid3>'})
-
-    def test_invalid(self):
-        "resolve_task_references raises a KeyError on reference to an invalid task"
-        self.assertRaisesRegexp(
-            KeyError,
-            "task 'subject' has no dependency named 'no-such'",
-            lambda: resolve_task_references('subject', {'task-reference': '<no-such>'}, {})
-        )
+    def should_replace_task(self, task, params, taskid):
+        return taskid
 
 
 class TestOptimize(unittest.TestCase):
 
-    kind = None
+    strategies = {
+        'never': optimize.OptimizationStrategy(),
+        'remove': Remove(),
+        'replace': Replace(),
+    }
 
-    @classmethod
-    def setUpClass(cls):
-        # set up some simple optimization functions
-        optimization('no-optimize')(lambda self, params: False)
-        optimization('optimize-away')(lambda self, params: True)
-        optimization('optimize-to-task')(lambda self, params, task: task)
-
-    def make_task(self, label, optimization=None, task_def=None, optimized=None, task_id=None):
+    def make_task(self, label, optimization=None, task_def=None, optimized=None,
+                  task_id=None, dependencies=None):
         task_def = task_def or {'sample': 'task-def'}
         task = Task(kind='test', label=label, attributes={}, task=task_def)
-        task.optimized = optimized
-        if optimization:
-            task.optimizations = [optimization]
-        else:
-            task.optimizations = []
+        task.optimization = optimization
         task.task_id = task_id
+        if dependencies is not None:
+            task.task['dependencies'] = sorted(dependencies)
         return task
 
     def make_graph(self, *tasks_and_edges):
         tasks = {t.label: t for t in tasks_and_edges if isinstance(t, Task)}
         edges = {e for e in tasks_and_edges if not isinstance(e, Task)}
         return TaskGraph(tasks, graph.Graph(set(tasks), edges))
 
-    def assert_annotations(self, graph, **annotations):
-        def repl(task_id):
-            return 'SLUGID' if task_id and len(task_id) == 22 else task_id
-        got_annotations = {
-            t.label: repl(t.task_id) or t.optimized for t in graph.tasks.itervalues()
-        }
-        self.assertEqual(got_annotations, annotations)
+    def make_opt_graph(self, *tasks_and_edges):
+        tasks = {t.task_id: t for t in tasks_and_edges if isinstance(t, Task)}
+        edges = {e for e in tasks_and_edges if not isinstance(e, Task)}
+        return TaskGraph(tasks, graph.Graph(set(tasks), edges))
+
+    def make_triangle(self, **opts):
+        """
+        Make a "triangle" graph like this:
 
-    def test_annotate_task_graph_no_optimize(self):
-        "annotating marks everything as un-optimized if the kind returns that"
-        graph = self.make_graph(
-            self.make_task('task1', ['no-optimize']),
-            self.make_task('task2', ['no-optimize']),
-            self.make_task('task3', ['no-optimize']),
-            ('task2', 'task1', 'build'),
-            ('task2', 'task3', 'image'),
-        )
-        annotate_task_graph(graph, {}, set(), graph.graph.named_links_dict(), {}, None)
-        self.assert_annotations(
-            graph,
-            task1=False,
-            task2=False,
-            task3=False
-        )
+          t1 <-------- t3
+           `---- t2 --'
+        """
+        return self.make_graph(
+            self.make_task('t1', opts.get('t1')),
+            self.make_task('t2', opts.get('t2')),
+            self.make_task('t3', opts.get('t3')),
+            ('t3', 't2', 'dep'),
+            ('t3', 't1', 'dep2'),
+            ('t2', 't1', 'dep'))
+
+    def assert_remove_tasks(self, graph, exp_removed, do_not_optimize=set()):
+        got_removed = optimize.remove_tasks(
+            target_task_graph=graph,
+            optimizations=optimize._get_optimizations(graph, self.strategies),
+            params={},
+            do_not_optimize=do_not_optimize)
+        self.assertEqual(got_removed, exp_removed)
+
+    def test_remove_tasks_never(self):
+        "A graph full of optimization=never has nothing removed"
+        graph = self.make_triangle()
+        self.assert_remove_tasks(graph, set())
 
-    def test_annotate_task_graph_optimize_away_dependency(self):
-        "raises exception if kind optimizes away a task on which another depends"
-        graph = self.make_graph(
-            self.make_task('task1', ['optimize-away']),
-            self.make_task('task2', ['no-optimize']),
-            ('task2', 'task1', 'build'),
-        )
-        self.assertRaises(
-            Exception,
-            lambda: annotate_task_graph(graph, {}, set(), graph.graph.named_links_dict(), {}, None)
-        )
+    def test_remove_tasks_all(self):
+        "A graph full of optimization=remove has removes everything"
+        graph = self.make_triangle(
+            t1={'remove': None},
+            t2={'remove': None},
+            t3={'remove': None})
+        self.assert_remove_tasks(graph, {'t1', 't2', 't3'})
+
+    def test_remove_tasks_blocked(self):
+        "Removable tasks that are depended on by non-removable tasks are not removed"
+        graph = self.make_triangle(
+            t1={'remove': None},
+            t3={'remove': None})
+        self.assert_remove_tasks(graph, {'t3'})
+
+    def test_remove_tasks_do_not_optimize(self):
+        "Removable tasks that are marked do_not_optimize are not removed"
+        graph = self.make_triangle(
+            t1={'remove': None},
+            t2={'remove': None},  # but do_not_optimize
+            t3={'remove': None})
+        self.assert_remove_tasks(graph, {'t3'}, do_not_optimize={'t2'})
 
-    def test_annotate_task_graph_do_not_optimize(self):
-        "annotating marks everything as un-optimized if in do_not_optimize"
-        graph = self.make_graph(
-            self.make_task('task1', ['optimize-away']),
-            self.make_task('task2', ['optimize-away']),
-            ('task2', 'task1', 'build'),
-        )
-        label_to_taskid = {}
-        annotate_task_graph(graph, {}, {'task1', 'task2'},
-                            graph.graph.named_links_dict(), label_to_taskid, None)
-        self.assert_annotations(
-            graph,
-            task1=False,
-            task2=False
-        )
-        self.assertEqual
+    def assert_replace_tasks(self, graph, exp_replaced, exp_removed=set(), exp_label_to_taskid={},
+                             do_not_optimize=None, label_to_taskid=None, removed_tasks=None,
+                             existing_tasks=None):
+        do_not_optimize = do_not_optimize or set()
+        label_to_taskid = label_to_taskid or {}
+        removed_tasks = removed_tasks or set()
+        existing_tasks = existing_tasks or {}
 
-    def test_annotate_task_graph_nos_do_not_propagate(self):
-        "a task with a non-optimized dependency can be optimized"
-        graph = self.make_graph(
-            self.make_task('task1', ['no-optimize']),
-            self.make_task('task2', ['optimize-to-task', 'taskid']),
-            self.make_task('task3', ['optimize-to-task', 'taskid']),
-            ('task2', 'task1', 'build'),
-            ('task2', 'task3', 'image'),
-        )
-        annotate_task_graph(graph, {}, set(),
-                            graph.graph.named_links_dict(), {}, None)
-        self.assert_annotations(
-            graph,
-            task1=False,
-            task2='taskid',
-            task3='taskid'
-        )
+        got_replaced = optimize.replace_tasks(
+            target_task_graph=graph,
+            optimizations=optimize._get_optimizations(graph, self.strategies),
+            params={},
+            do_not_optimize=do_not_optimize,
+            label_to_taskid=label_to_taskid,
+            removed_tasks=removed_tasks,
+            existing_tasks=existing_tasks)
+        self.assertEqual(got_replaced, exp_replaced)
+        self.assertEqual(removed_tasks, exp_removed)
+        self.assertEqual(label_to_taskid, exp_label_to_taskid)
+
+    def test_replace_tasks_never(self):
+        "No tasks are replaced when strategy is 'never'"
+        graph = self.make_triangle()
+        self.assert_replace_tasks(graph, set())
 
-    def test_get_subgraph_single_dep(self):
-        "when a single dependency is optimized, it is omitted from the graph"
-        graph = self.make_graph(
-            self.make_task('task1', optimized=True, task_id='dep1'),
-            self.make_task('task2', optimized=False),
-            self.make_task('task3', optimized=False),
-            ('task2', 'task1', 'build'),
-            ('task2', 'task3', 'image'),
-        )
-        label_to_taskid = {'task1': 'dep1'}
-        sub = get_subgraph(graph, graph.graph.named_links_dict(), label_to_taskid)
-        task2 = label_to_taskid['task2']
-        task3 = label_to_taskid['task3']
-        self.assertEqual(sub.graph.nodes, {task2, task3})
-        self.assertEqual(sub.graph.edges, {(task2, task3, 'image')})
-        self.assertEqual(sub.tasks[task2].task_id, task2)
-        self.assertEqual(sorted(sub.tasks[task2].task['dependencies']),
-                         sorted([task3, 'dep1']))
-        self.assertEqual(sub.tasks[task3].task_id, task3)
-        self.assertEqual(sorted(sub.tasks[task3].task['dependencies']), [])
+    def test_replace_tasks_all(self):
+        "All replacable tasks are replaced when strategy is 'replace'"
+        graph = self.make_triangle(
+            t1={'replace': 'e1'},
+            t2={'replace': 'e2'},
+            t3={'replace': 'e3'})
+        self.assert_replace_tasks(
+            graph,
+            exp_replaced={'t1', 't2', 't3'},
+            exp_label_to_taskid={'t1': 'e1', 't2': 'e2', 't3': 'e3'})
+
+    def test_replace_tasks_blocked(self):
+        "A task cannot be replaced if it depends on one that was not replaced"
+        graph = self.make_triangle(
+            t1={'replace': 'e1'},
+            t3={'replace': 'e3'})
+        self.assert_replace_tasks(
+            graph,
+            exp_replaced={'t1'},
+            exp_label_to_taskid={'t1': 'e1'})
 
-    def test_get_subgraph_dep_chain(self):
-        "when a dependency chain is optimized, it is omitted from the graph"
-        graph = self.make_graph(
-            self.make_task('task1', optimized=True, task_id='dep1'),
-            self.make_task('task2', optimized=True, task_id='dep2'),
-            self.make_task('task3', optimized=False),
-            ('task2', 'task1', 'build'),
-            ('task3', 'task2', 'image'),
-        )
-        label_to_taskid = {'task1': 'dep1', 'task2': 'dep2'}
-        sub = get_subgraph(graph, graph.graph.named_links_dict(), label_to_taskid)
-        task3 = label_to_taskid['task3']
-        self.assertEqual(sub.graph.nodes, {task3})
-        self.assertEqual(sub.graph.edges, set())
-        self.assertEqual(sub.tasks[task3].task_id, task3)
-        self.assertEqual(sorted(sub.tasks[task3].task['dependencies']), ['dep2'])
+    def test_replace_tasks_do_not_optimize(self):
+        "A task cannot be replaced if it depends on one that was not replaced"
+        graph = self.make_triangle(
+            t1={'replace': 'e1'},
+            t2={'replace': 'xxx'},  # but do_not_optimize
+            t3={'replace': 'e3'})
+        self.assert_replace_tasks(
+            graph,
+            exp_replaced={'t1'},
+            exp_label_to_taskid={'t1': 'e1'},
+            do_not_optimize={'t2'})
+
+    def test_replace_tasks_removed(self):
+        "A task can be replaced with nothing"
+        graph = self.make_triangle(
+            t1={'replace': 'e1'},
+            t2={'replace': True},
+            t3={'replace': True})
+        self.assert_replace_tasks(
+            graph,
+            exp_replaced={'t1'},
+            exp_removed={'t2', 't3'},
+            exp_label_to_taskid={'t1': 'e1'})
 
-    def test_get_subgraph_opt_away(self):
-        "when a leaf task is optimized away, it is omitted from the graph"
-        graph = self.make_graph(
-            self.make_task('task1', optimized=False),
-            self.make_task('task2', optimized=True),
-            ('task2', 'task1', 'build'),
-        )
-        label_to_taskid = {'task2': 'dep2'}
-        sub = get_subgraph(graph, graph.graph.named_links_dict(), label_to_taskid)
-        task1 = label_to_taskid['task1']
-        self.assertEqual(sub.graph.nodes, {task1})
-        self.assertEqual(sub.graph.edges, set())
-        self.assertEqual(sub.tasks[task1].task_id, task1)
-        self.assertEqual(sorted(sub.tasks[task1].task['dependencies']), [])
+    def assert_subgraph(self, graph, removed_tasks, replaced_tasks,
+                        label_to_taskid, exp_subgraph, exp_label_to_taskid):
+        self.maxDiff = None
+        optimize.slugid = ('tid{}'.format(i) for i in xrange(1, 10)).next
+        try:
+            got_subgraph = optimize.get_subgraph(graph, removed_tasks,
+                                                 replaced_tasks, label_to_taskid)
+        finally:
+            optimize.slugid = slugid
+        self.assertEqual(got_subgraph.graph, exp_subgraph.graph)
+        self.assertEqual(got_subgraph.tasks, exp_subgraph.tasks)
+        self.assertEqual(label_to_taskid, exp_label_to_taskid)
 
-    def test_get_subgraph_refs_resolved(self):
-        "get_subgraph resolves task references"
-        graph = self.make_graph(
-            self.make_task('task1', optimized=True, task_id='dep1'),
-            self.make_task(
-                'task2',
-                optimized=False,
-                task_def={'payload': {'task-reference': 'http://<build>/<test>'}}
-            ),
-            ('task2', 'task1', 'build'),
-            ('task2', 'task3', 'test'),
-            self.make_task('task3', optimized=False),
-        )
-        label_to_taskid = {'task1': 'dep1'}
-        sub = get_subgraph(graph, graph.graph.named_links_dict(), label_to_taskid)
-        task2 = label_to_taskid['task2']
-        task3 = label_to_taskid['task3']
-        self.assertEqual(sub.graph.nodes, {task2, task3})
-        self.assertEqual(sub.graph.edges, {(task2, task3, 'test')})
-        self.assertEqual(sub.tasks[task2].task_id, task2)
-        self.assertEqual(sorted(sub.tasks[task2].task['dependencies']), sorted([task3, 'dep1']))
-        self.assertEqual(sub.tasks[task2].task['payload'], 'http://dep1/' + task3)
-        self.assertEqual(sub.tasks[task3].task_id, task3)
+    def test_get_subgraph_no_change(self):
+        "get_subgraph returns a similarly-shaped subgraph when nothing is removed"
+        graph = self.make_triangle()
+        self.assert_subgraph(
+            graph, set(), set(), {},
+            self.make_opt_graph(
+                self.make_task('t1', task_id='tid1', dependencies={}),
+                self.make_task('t2', task_id='tid2', dependencies={'tid1'}),
+                self.make_task('t3', task_id='tid3', dependencies={'tid1', 'tid2'}),
+                ('tid3', 'tid2', 'dep'),
+                ('tid3', 'tid1', 'dep2'),
+                ('tid2', 'tid1', 'dep')),
+            {'t1': 'tid1', 't2': 'tid2', 't3': 'tid3'})
 
-    def test_optimize(self):
-        "optimize_task_graph annotates and extracts the subgraph from a simple graph"
-        input = self.make_graph(
-            self.make_task('task1', ['optimize-to-task', 'dep1']),
-            self.make_task('task2', ['no-optimize']),
-            self.make_task('task3', ['no-optimize']),
-            ('task2', 'task1', 'build'),
-            ('task2', 'task3', 'image'),
-        )
-        opt, label_to_taskid = optimize_task_graph(input, {}, set())
-        self.assertEqual(opt.graph, graph.Graph(
-            {label_to_taskid['task2'], label_to_taskid['task3']},
-            {(label_to_taskid['task2'], label_to_taskid['task3'], 'image')}))
+    def test_get_subgraph_removed(self):
+        "get_subgraph returns a smaller subgraph when tasks are removed"
+        graph = self.make_triangle()
+        self.assert_subgraph(
+            graph, {'t2', 't3'}, set(), {},
+            self.make_opt_graph(
+                self.make_task('t1', task_id='tid1', dependencies={})),
+            {'t1': 'tid1'})
+
+    def test_get_subgraph_replaced(self):
+        "get_subgraph returns a smaller subgraph when tasks are replaced"
+        graph = self.make_triangle()
+        self.assert_subgraph(
+            graph, set(), {'t1', 't2'}, {'t1': 'e1', 't2': 'e2'},
+            self.make_opt_graph(
+                self.make_task('t3', task_id='tid1', dependencies={'e1', 'e2'})),
+            {'t1': 'e1', 't2': 'e2', 't3': 'tid1'})
+
+    def test_get_subgraph_removed_dep(self):
+        "get_subgraph raises an Exception when a task depends on a removed task"
+        graph = self.make_triangle()
+        with self.assertRaises(Exception):
+            optimize.get_subgraph(graph, {'t2'}, set(), {})
 
 
 if __name__ == '__main__':
     main()
--- a/taskcluster/taskgraph/test/test_target_tasks.py
+++ b/taskcluster/taskgraph/test/test_target_tasks.py
@@ -1,14 +1,15 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
+import contextlib
 import unittest
 
 from taskgraph import target_tasks
 from taskgraph import try_option_syntax
 from taskgraph.graph import Graph
 from taskgraph.taskgraph import TaskGraph
 from taskgraph.task import Task
 from mozunit import main
@@ -60,48 +61,61 @@ class TestTargetTasks(unittest.TestCase)
         self.assertFalse(self.default_matches(['release'], 'baobab'))
 
     def test_default_nothing(self):
         """run_on_projects=[] includes nothing"""
         self.assertFalse(self.default_matches([], 'mozilla-central'))
         self.assertFalse(self.default_matches([], 'mozilla-inbound'))
         self.assertFalse(self.default_matches([], 'baobab'))
 
-    def test_try_tasks(self):
+    def make_task_graph(self):
         tasks = {
             'a': Task(kind=None, label='a', attributes={}, task={}),
             'b': Task(kind=None, label='b', attributes={'at-at': 'yep'}, task={}),
-            'c': Task(kind=None, label='c', attributes={}, task={}),
+            'c': Task(kind=None, label='c', attributes={'run_on_projects': ['try']}, task={}),
         }
         graph = Graph(nodes=set('abc'), edges=set())
-        tg = TaskGraph(tasks, graph)
+        return TaskGraph(tasks, graph)
 
-        method = target_tasks.get_method('try_tasks')
-        params = {
-            'message': '',
-            'target_task_labels': [],
-        }
-
+    @contextlib.contextmanager
+    def fake_TryOptionSyntax(self):
         orig_TryOptionSyntax = try_option_syntax.TryOptionSyntax
         try:
             try_option_syntax.TryOptionSyntax = FakeTryOptionSyntax
+            yield
+        finally:
+            try_option_syntax.TryOptionSyntax = orig_TryOptionSyntax
 
-            # no try specifier
-            self.assertEqual(method(tg, params), ['b'])
-
-            # try syntax only
-            params['message'] = 'try: me'
+    def test_just_try_it(self):
+        "try_mode = None runs try optoin syntax with no options"
+        tg = self.make_task_graph()
+        method = target_tasks.get_method('try_tasks')
+        with self.fake_TryOptionSyntax():
+            params = {
+                'try_mode': None,
+                'message': '',
+            }
             self.assertEqual(method(tg, params), ['b'])
 
-            # try task config only
-            params['message'] = ''
-            params['target_task_labels'] = ['c']
-            self.assertEqual(method(tg, params), ['c'])
+    def test_try_option_syntax(self):
+        "try_mode = try_option_syntax uses TryOptionSyntax"
+        tg = self.make_task_graph()
+        method = target_tasks.get_method('try_tasks')
+        with self.fake_TryOptionSyntax():
+            params = {
+                'try_mode': 'try_option_syntax',
+                'message': 'try: -p all',
+            }
+            self.assertEqual(method(tg, params), ['b'])
 
-            # both syntax and config
-            params['message'] = 'try: me'
-            self.assertEqual(set(method(tg, params)), set(['b', 'c']))
-        finally:
-            try_option_syntax.TryOptionSyntax = orig_TryOptionSyntax
+    def test_try_task_config(self):
+        "try_mode = try_task_config uses the try config"
+        tg = self.make_task_graph()
+        method = target_tasks.get_method('try_tasks')
+        params = {
+            'try_mode': 'try_task_config',
+            'try_task_config': {'tasks': ['a']},
+        }
+        self.assertEqual(method(tg, params), ['a'])
 
 
 if __name__ == '__main__':
     main()
--- a/taskcluster/taskgraph/test/test_taskgraph.py
+++ b/taskcluster/taskgraph/test/test_taskgraph.py
@@ -19,60 +19,60 @@ class TestTaskGraph(unittest.TestCase):
     def test_taskgraph_to_json(self):
         tasks = {
             'a': Task(kind='test', label='a',
                       attributes={'attr': 'a-task'},
                       task={'taskdef': True}),
             'b': Task(kind='test', label='b',
                       attributes={},
                       task={'task': 'def'},
-                      optimizations=[['seta']],
+                      optimization={'seta': None},
                       # note that this dep is ignored, superseded by that
                       # from the taskgraph's edges
                       dependencies={'first': 'a'}),
         }
         graph = Graph(nodes=set('ab'), edges={('a', 'b', 'edgelabel')})
         taskgraph = TaskGraph(tasks, graph)
 
         res = taskgraph.to_json()
 
         self.assertEqual(res, {
             'a': {
                 'kind': 'test',
                 'label': 'a',
                 'attributes': {'attr': 'a-task', 'kind': 'test'},
                 'task': {'taskdef': True},
                 'dependencies': {'edgelabel': 'b'},
-                'optimizations': [],
+                'optimization': None,
             },
             'b': {
                 'kind': 'test',
                 'label': 'b',
                 'attributes': {'kind': 'test'},
                 'task': {'task': 'def'},
                 'dependencies': {},
-                'optimizations': [['seta']],
+                'optimization': {'seta': None},
             }
         })
 
     def test_round_trip(self):
         graph = TaskGraph(tasks={
             'a': Task(
                 kind='fancy',
                 label='a',
                 attributes={},
                 dependencies={'prereq': 'b'},  # must match edges, below
-                optimizations=[['seta']],
+                optimization={'seta': None},
                 task={'task': 'def'}),
             'b': Task(
                 kind='pre',
                 label='b',
                 attributes={},
                 dependencies={},
-                optimizations=[['seta']],
+                optimization={'seta': None},
                 task={'task': 'def2'}),
         }, graph=Graph(nodes={'a', 'b'}, edges={('a', 'b', 'prereq')}))
 
         tasks, new_graph = TaskGraph.from_json(graph.to_json())
         self.assertEqual(graph, new_graph)
 
 
 if __name__ == '__main__':
--- a/taskcluster/taskgraph/test/test_try_option_syntax.py
+++ b/taskcluster/taskgraph/test/test_try_option_syntax.py
@@ -1,17 +1,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import unittest
 
-from taskgraph.try_option_syntax import TryOptionSyntax
+from taskgraph.try_option_syntax import TryOptionSyntax, parse_message
 from taskgraph.try_option_syntax import RIDEALONG_BUILDS
 from taskgraph.graph import Graph
 from taskgraph.taskgraph import TaskGraph
 from taskgraph.task import Task
 from mozunit import main
 
 
 def unittest_task(n, tp, bt='opt'):
@@ -58,274 +58,286 @@ unittest_tasks = {k: v for k, v in tasks
                   if 'unittest_try_name' in v.attributes}
 talos_tasks = {k: v for k, v in tasks.iteritems()
                if 'talos_try_name' in v.attributes}
 graph_with_jobs = TaskGraph(tasks, Graph(set(tasks), set()))
 
 
 class TestTryOptionSyntax(unittest.TestCase):
 
-    def test_empty_message(self):
-        "Given an empty message, it should return an empty value"
-        tos = TryOptionSyntax('', graph_with_jobs)
-        self.assertEqual(tos.build_types, [])
-        self.assertEqual(tos.jobs, [])
-        self.assertEqual(tos.unittests, [])
-        self.assertEqual(tos.talos, [])
-        self.assertEqual(tos.platforms, [])
-        self.assertEqual(tos.trigger_tests, 0)
-        self.assertEqual(tos.talos_trigger_tests, 0)
-        self.assertEqual(tos.env, [])
-        self.assertFalse(tos.profile)
-        self.assertIsNone(tos.tag)
-        self.assertFalse(tos.no_retry)
-
-    def test_message_without_try(self):
-        "Given a non-try message, it should return an empty value"
-        tos = TryOptionSyntax('Bug 1234: frobnicte the foo', graph_with_jobs)
-        self.assertEqual(tos.build_types, [])
-        self.assertEqual(tos.jobs, [])
-        self.assertEqual(tos.unittests, [])
-        self.assertEqual(tos.talos, [])
-        self.assertEqual(tos.platforms, [])
-        self.assertEqual(tos.trigger_tests, 0)
-        self.assertEqual(tos.talos_trigger_tests, 0)
-        self.assertEqual(tos.env, [])
-        self.assertFalse(tos.profile)
-        self.assertIsNone(tos.tag)
-        self.assertFalse(tos.no_retry)
-
     def test_unknown_args(self):
         "unknown arguments are ignored"
-        tos = TryOptionSyntax('try: --doubledash -z extra', graph_with_jobs)
+        parameters = {'try_options': parse_message('try: --doubledash -z extra')}
+        tos = TryOptionSyntax(parameters, graph_with_jobs)
         # equilvant to "try:"..
         self.assertEqual(tos.build_types, [])
         self.assertEqual(tos.jobs, None)
 
     def test_apostrophe_in_message(self):
         "apostrophe does not break parsing"
-        tos = TryOptionSyntax('Increase spammy log\'s log level. try: -b do', graph_with_jobs)
+        parameters = {'try_options': parse_message('Increase spammy log\'s log level. try: -b do')}
+        tos = TryOptionSyntax(parameters, graph_with_jobs)
         self.assertEqual(sorted(tos.build_types), ['debug', 'opt'])
 
     def test_b_do(self):
         "-b do should produce both build_types"
-        tos = TryOptionSyntax('try: -b do', graph_with_jobs)
+        parameters = {'try_options': parse_message('try: -b do')}
+        tos = TryOptionSyntax(parameters, graph_with_jobs)
         self.assertEqual(sorted(tos.build_types), ['debug', 'opt'])
 
     def test_b_d(self):
         "-b d should produce build_types=['debug']"
-        tos = TryOptionSyntax('try: -b d', graph_with_jobs)
+        parameters = {'try_options': parse_message('try: -b d')}
+        tos = TryOptionSyntax(parameters, graph_with_jobs)
         self.assertEqual(sorted(tos.build_types), ['debug'])
 
     def test_b_o(self):
         "-b o should produce build_types=['opt']"
-        tos = TryOptionSyntax('try: -b o', graph_with_jobs)
+        parameters = {'try_options': parse_message('try: -b o')}
+        tos = TryOptionSyntax(parameters, graph_with_jobs)
         self.assertEqual(sorted(tos.build_types), ['opt'])
 
     def test_build_o(self):
         "--build o should produce build_types=['opt']"