Merge mozilla-central to inbound. a=merge CLOSED TREE
authorGurzau Raul <rgurzau@mozilla.com>
Tue, 02 Jul 2019 01:21:33 +0300
changeset 540522 b7ea2e7247cf4e414db111b644b14104aec8ba19
parent 540521 ec3a228a84f28cbc4049549e44fab77ef227163b (current diff)
parent 540463 0176f11e448f372c7d45dcff967d6773efda9ed5 (diff)
child 540523 d8b7ed5e149f4d4724c3999b3b0fed9bfd3f8f96
push id11529
push userarchaeopteryx@coole-files.de
push dateThu, 04 Jul 2019 15:22:33 +0000
treeherdermozilla-beta@ebb510a784b8 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone69.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge mozilla-central to inbound. a=merge CLOSED TREE
js/src/wasm/cranelift/src/wasm2clif.rs
testing/web-platform/meta/mediacapture-record/MediaRecorder-disabled-tracks.https.html.ini
--- a/accessible/ipc/DocAccessibleParent.cpp
+++ b/accessible/ipc/DocAccessibleParent.cpp
@@ -841,22 +841,29 @@ mozilla::ipc::IPCResult DocAccessiblePar
 #endif  // defined(XP_WIN)
 
 #if !defined(XP_WIN)
 mozilla::ipc::IPCResult DocAccessibleParent::RecvBatch(
     const uint64_t& aBatchType, nsTArray<BatchData>&& aData) {
   // Only do something in Android. We can't ifdef the entire protocol out in
   // the ipdl because it doesn't allow preprocessing.
 #  if defined(ANDROID)
+  if (mShutdown) {
+    return IPC_OK();
+  }
   nsTArray<ProxyAccessible*> proxies(aData.Length());
   for (size_t i = 0; i < aData.Length(); i++) {
     DocAccessibleParent* doc = static_cast<DocAccessibleParent*>(
         aData.ElementAt(i).Document().get_PDocAccessibleParent());
     MOZ_ASSERT(doc);
 
+    if (doc->IsShutdown()) {
+      continue;
+    }
+
     ProxyAccessible* proxy = doc->GetAccessible(aData.ElementAt(i).ID());
     if (!proxy) {
       MOZ_ASSERT_UNREACHABLE("No proxy found!");
       continue;
     }
 
     proxies.AppendElement(proxy);
   }
--- a/browser/app/blocklist.xml
+++ b/browser/app/blocklist.xml
@@ -1,10 +1,10 @@
 <?xml version='1.0' encoding='UTF-8'?>
-<blocklist lastupdate="1561554563187" xmlns="http://www.mozilla.org/2006/addons-blocklist">
+<blocklist lastupdate="1561710520557" xmlns="http://www.mozilla.org/2006/addons-blocklist">
   <emItems>
     <emItem blockID="i334" id="{0F827075-B026-42F3-885D-98981EE7B1AE}">
       <prefs/>
       <versionRange minVersion="0" maxVersion="*" severity="3"/>
     </emItem>
     <emItem blockID="i1211" id="flvto@hotger.com">
       <prefs/>
       <versionRange minVersion="0" maxVersion="*" severity="1"/>
@@ -3248,16 +3248,32 @@
     <emItem blockID="2e886e0c-00ab-44c2-bcbe-7c8793d46d89" id="/^((\{4200b565-5c4a-410f-b4fb-478daae2537f\})|(\{a0bba103-21d5-49c8-96f3-4eabbe78ced3\})|(\{ec46fe21-5690-4757-8ebc-1c77f826fe6b\})|(\{ce45d605-3bb6-4fad-8c1b-238ecee0d3df\})|(\{c70bd1fe-1d7d-4ae5-a731-3d513e6c46ba\})|(\{aeec96ca-81b9-405c-bd70-01ea6a50be9d\})|(\{0a1603a8-839f-4890-b1e3-1b8e00a7a0c9\})|(\{45febc8f-eaeb-4cec-90ea-07a7edc22793\})|(\{a7c7febd-6726-4d0e-9947-2edfd8bea35a\})|(\{eda3389e-ae07-4a2c-9b50-ce4e9008f297\})|(\{0e5d1d65-4fbb-4dd9-9042-3b568d9e2489\})|(\{1461f0e5-3c4a-453e-aed2-ca45ff5db747\})|(\{e842e73d-9d8a-45a8-bf0d-ef974ab24767\})|(\{e1d4fa8a-3da0-4fee-8b4f-0c7233fcb49a\}))$/">
       <prefs/>
       <versionRange minVersion="0" maxVersion="*" severity="3"/>
     </emItem>
     <emItem blockID="0c227983-1180-4b4a-b25b-8160738e7238" id="/^((\{f0df6aa3-9792-4476-daa6-4709f93bbce3\})|(\{fe934134-3d0f-462b-d56e-e7187dfa8c98\})|(\{429999c4-1b8b-46fb-863f-ce19a08afc9c\})|(\{b8003074-2123-45be-91cf-654ef9671e1a\})|(\{9712066a-d491-4293-cd31-8ef8ee907d40\})|(\{dcfbb98b-783b-4df0-8427-e269114736cb\})|(\{66c44e3b-2df2-4741-ff07-0067cca4fe95\})|(\{af0a4d96-3403-496f-9d9a-5c766bf44bac\})|(\{82c60958-45da-4e6a-de21-879775c5473a\})|(\{c9118234-5787-488d-b30c-7d0a904fbabb\})|(\{f07d3da6-81ea-464f-9bef-6ff5470b307b\})|(\{c2454a12-7f57-440e-f695-0a9618f48b80\})|(\{f6e1d884-8100-49e7-88b9-bff8d9295cd2\}))$/">
       <prefs/>
       <versionRange minVersion="0" maxVersion="*" severity="3"/>
     </emItem>
+    <emItem blockID="3e88dad8-f640-46dd-8b00-4b955eea7b24" id="/^((\{65b88db7-9c07-4d03-80eb-2e5cf6cd7aa8\})|(\{aa2ef90f-db17-4ece-abab-4f87830457db\})|(\{e50969c9-088c-4978-9ffb-5d78015dabcc\})|(\{15fd1a8e-db53-41fa-9c46-93ec5b7473c1\})|(\{ed84b63e-faa2-4c48-b080-e9612cbc2e49\})|(\{c784f63e-5609-47a8-92ee-33a2bcb3239b\})|(\{1641b1ec-9a3d-4e3c-b52e-bc80671349f9\}))$/">
+      <prefs/>
+      <versionRange minVersion="0" maxVersion="*" severity="3"/>
+    </emItem>
+    <emItem blockID="bea9680c-28c0-48a1-b8d4-e418adeba748" id="/^((@searchincognito)|(@si-defaultsearch)|(@si-defaultsearch-listed)|(@searchassistincognito)|(@searchencrypt)|(@DiscreteSearch)|(@Discrete-Search)|(@searchsafe)|(@SearchSafeOrg)|(ffredirector@discretesearch\.com)|(ffredirector@encryptedsearch\.org)|(ffredirector@searchdefence\.com)|(ffredirector@searchencrypt\.com)|(ffredirector@searchencrypted\.com)|(ffredirector@searchincognito\.com)|(ffredirector@searchsafe\.co)|(ff_redirector@discretesearch\.com)|(ff_redirector@encryptedsearch\.org)|(ff_redirector@searchdefence\.com)|(ff_redirector@searchencrypt\.com)|(ff_redirector@searchencrypted\.com)|(ff_redirector@searchincognito\.com)|(ff_redirector@searchsafe\.co)|(@encryptedsearch)|(@searchdefence)|(@searchencrypted)|(@42e62954-834c-11e7-bb31-be2e44b06b34)|(@DiscreteSearchx)|(@4aec09f1-f1c9-456d-8c40-e0e86f302a0d)|(@566ff1c3-9432-4ed4-bd3d-b43cba47e051)|(@1df4e663-b9f3-4708-9f5d-44265b33397e)|(ff_redirector@searchsafe)|(\{9b62bdf8-a3c7-43d3-ba7f-0970cabffdaa\})|(\{95b48d11-b256-48ad-8ba1-bfe52f0a8bb8\})|(\{9e35a2be-64bd-49e3-aa47-fbeedf1834eb\})|(\{3ba10b5f-d9fa-4b40-8683-034d3dfc71d4\})|(\{20c31601-ebee-4677-a2f0-40e178bf7c77\})|(\{98e02622-f905-434e-9003-6c061b5c11c0\})|(@tabwow)|(gaidpiakchgkapdgbnoglpnbccdepnpk@chrome-store-foxified-258456913)|(@tabwow2)|(\{be8901e4-2a07-4554-aa05-a64351496e29\})|(moviestmpd@mozilla\.com)|(gaidpiakchgkapdgbnoglpnbccdepnpk@chrome-store-foxified-876542484)|(\{4a8ef415-e453-458f-bfbd-ae92569214db\})|(fireaction@mozilla\.com)|(\{bd9c448c-58b3-434f-9bb6-4ed2c155ba8e\})|(\{ebdfa19b-0906-4f78-9e95-7ef74d34c335\})|(websecure-unlisted@mozilla\.com)|(\{2d06d70b-8f32-4007-8f8b-1e0445bcebe7\})|(\{ddbe7345-acf4-4ebb-9baf-cd6d2df73b28\})|(\{b09d5b98-2d65-46fb-990c-69710577efa0\})|(\{3894384e-c719-4a0c-8d24-3816160fc56b\})|(search-encrypt-tab@mozilla\.xpi)|(\{1dafa1da-3894-48b9-ac8f-00bdc4f1868a\})|(\{99cfe634-328a-41a5-9a23-64094e4f4919\})|(inco-plugin@mozilla\.xpi)|(incognito-window@mozilla\.xpi)|(mac-search@mozilla\.xpi)|(fvdplayer@fvd\.com)|(playernewpp@ext\.com)|(\{492936c6-9121-4e54-8d4f-97f544e5bf98\})|(\{108a22ea-f316-4c2f-8427-fe65e02f9e2c\})|(cold@being\.net)|(\{38b99237-6c28-406f-898c-cc89df86051d\})|(search_redirect@mozilla\.xpi)|(\{d2ef4a8d-6ec0-4733-9f3f-2394178ecbf3\})|(tab_plugin@mozilla\.xpi)|(\{ae228e30-f40a-41a3-9e7e-53a094dcb8c6\})|(\{00ee7237-53cb-4036-8d4f-e78d78ca89e7\})|(\{d2f4002c-031b-4ad3-9fb1-afb003e8f932\})|(\{c0f366b3-7b3d-4486-a6f3-4ca1d7045091\})|(\{ccc6cfc4-3832-4d05-bf28-43a9722de93f\})|(\{dd02f638-ce6d-464e-8add-6ea0f314b1d1\})|(\{749ed3ff-4d23-4b32-812e-a35e3cf8c000\})|(tab_cleanup@mozilla\.xpi)|(incognito_tab@mozilla\.xpi)|(\{47c51f55-4f0b-499f-9fdd-c7c66bf4796a\})|(\{cd70c7c8-557d-46fa-9688-399c7c8d3d66\})|(\{681ad8e0-d1df-4cd2-a4cf-b97c1d6502a3\})|(\{0d58e690-bd48-4e3a-baf3-67aa40bc286a\})|(\{77bfbf26-4618-4120-9cb6-1fc7c92b8ddc\})|(\{037c6f6a-71f8-405b-9cff-fadf2ded6c47\})|(\{91cc3274-90d5-4e16-80e3-cd02fc513689\})|(\{2225b2af-0c3c-4345-adac-4f5bd40c2182\})|(\{81ca6b1e-a95b-4b44-9638-3ff3ea1a571d\})|(\{1e32acf8-fc1e-40ae-8783-c501ce50d597\})|(\{19670785-b1db-4d69-9538-2880ad8fdf20\})|(\{0113b4ad-15ca-4215-adeb-f0404f619ca6\})|(\{c7245149-4224-4c5c-91a4-84ea189f2227\})|(\{04dd2232-f1b1-4275-ae74-8bd27f3d850c\})|(prosearch@mozilla\.xpi)|(\{d549a064-98e7-49ed-ba9e-a724e79a004f\})|(\{fddd3bc6-9d4e-4ee7-b490-0d6141ff7d7f\})|(\{122795b5-ae28-4371-9b61-878f5db888ac\})|(\{e3d491de-802a-4f82-91eb-9403c9f43637\}))$/">
+      <prefs/>
+      <versionRange minVersion="0" maxVersion="*" severity="3"/>
+    </emItem>
+    <emItem blockID="3460b6b7-8251-4588-8f12-81ac8d288c88" id="/^((\{b7a0ecf9-212b-49ca-bec1-ead0c6dc2128\})|(\{6e977a6d-b31d-4735-a586-41dc25df2100\})|(\{67155a2a-6538-42b1-bdc9-f48b442f57e7\})|(\{b4d4abc0-5e6e-4a34-a7e3-bfe7319160b8\})|(\{2102c5a9-f3c4-4f71-bb6e-c45c9d07b6c8\})|(\{071c1c7a-cde3-4a22-aefe-7c8f98df6277\})|(\{aa2f3e70-7dcf-4b4e-92c5-54c8280b98de\})|(\{3b376420-b276-4a0c-9e28-340dcc0547ce\})|(\{ed192371-abcc-4520-ab76-d22afbe51dff\})|(\{ad5a457f-59c8-4d90-8e3e-59f13a3bc2b2\})|(\{06aa60ab-91ad-4b8a-bfda-98e33b65fbb5\})|(\{c2875a12-da6a-4f90-a919-1d2bef57fbff\})|(\{b01d1c5b-58b5-4411-86d0-555131c7bd07\})|(\{0a79c7eb-5fe9-4e37-841e-18686bc86a20\})|(\{341ca205-d6e0-4d03-93be-04939c429412\})|(\{855e09d9-ac3a-4885-828d-557734060c1f\})|(\{8ac01eb1-9819-4c41-b2b7-042d8cdb3f2e\}))$/">
+      <prefs/>
+      <versionRange minVersion="0" maxVersion="*" severity="3"/>
+    </emItem>
+    <emItem blockID="9f484302-44da-4b6a-afd8-94113b83c0f6" id="tab-api@mozilla.xpi">
+      <prefs/>
+      <versionRange minVersion="0" maxVersion="*" severity="3"/>
+    </emItem>
   </emItems>
   <pluginItems>
     <pluginItem blockID="p332">
       <match exp="libflashplayer\.so" name="filename"/>
       <match exp="^Shockwave Flash 11.(0|1) r[0-9]{1,3}$" name="description"/>
       <infoURL>https://get.adobe.com/flashplayer/</infoURL>
       <versionRange severity="0" vulnerabilitystatus="1">
         <targetApplication id="{ec8030f7-c20a-464f-9b0e-13a3a9e97384}">
--- a/browser/app/profile/firefox.js
+++ b/browser/app/profile/firefox.js
@@ -487,16 +487,19 @@ pref("browser.tabs.delayHidingAudioPlayi
 // for about: pages. This pref name did not age well: we will have multiple
 // types of privileged content processes, each with different privileges.
 // types of privleged content processes, each with different privleges.
 pref("browser.tabs.remote.separatePrivilegedContentProcess", true);
 // Pref to control whether we use a separate privileged content process
 // for certain mozilla webpages (which are listed in the pref
 // browser.tabs.remote.separatedMozillaDomains).
 pref("browser.tabs.remote.separatePrivilegedMozillaWebContentProcess", false);
+// This pref will cause assertions when a remoteType triggers a process switch
+// to a new remoteType it should not be able to trigger.
+pref("browser.tabs.remote.enforceRemoteTypeRestrictions", true);
 #endif
 
 #ifdef NIGHTLY_BUILD
 pref("browser.tabs.remote.useHTTPResponseProcessSelection", true);
 #else
 // Disabled outside of nightly due to bug 1554217
 pref("browser.tabs.remote.useHTTPResponseProcessSelection", false);
 #endif
--- a/browser/components/newtab/lib/BookmarkPanelHub.jsm
+++ b/browser/components/newtab/lib/BookmarkPanelHub.jsm
@@ -110,16 +110,18 @@ class _BookmarkPanelHub {
       this.toggleRecommendation(false);
       return;
     }
 
     const createElement = elem => target.document.createElementNS("http://www.w3.org/1999/xhtml", elem);
 
     if (!target.container.querySelector("#cfrMessageContainer")) {
       const recommendation = createElement("div");
+      const headerContainer = createElement("div");
+      headerContainer.classList.add("cfrMessageHeader");
       recommendation.setAttribute("id", "cfrMessageContainer");
       recommendation.addEventListener("click", async e => {
         target.hidePopup();
         const url = await FxAccounts.config.promiseEmailFirstURI("bookmark");
         win.ownerGlobal.openLinkIn(url, "tabshifted", {
           private: false,
           triggeringPrincipal: Services.scriptSecurityManager.createNullPrincipal({}),
           csp: null,
@@ -152,18 +154,19 @@ class _BookmarkPanelHub {
         this._l10n.setAttributes(cta, message.cta.string_id);
       } else {
         close.setAttribute("title", message.close_button.tooltiptext);
         title.textContent = message.title;
         content.textContent = message.text;
         cta.textContent = message.cta;
       }
 
-      recommendation.appendChild(close);
-      recommendation.appendChild(title);
+      headerContainer.appendChild(title);
+      headerContainer.appendChild(close);
+      recommendation.appendChild(headerContainer);
       recommendation.appendChild(content);
       recommendation.appendChild(cta);
       target.container.appendChild(recommendation);
     }
 
     this.toggleRecommendation(true);
   }
 
--- a/browser/components/newtab/test/browser/browser_asrouter_bookmarkpanel.js
+++ b/browser/components/newtab/test/browser/browser_asrouter_bookmarkpanel.js
@@ -33,18 +33,23 @@ add_task(async function test_fxa_message
   await BrowserTestUtils.waitForCondition(() => BookmarkingUI.status !== BookmarkingUI.STATUS_UPDATING);
 
   BookmarkingUI.star.click();
 
   await popupShownPromise;
 
   await BrowserTestUtils.waitForCondition(() => document.getElementById("cfrMessageContainer"), `Should create a
     container for the message`);
-  Assert.equal(document.getElementById("cfrMessageContainer").childElementCount, 4,
-    `Should attach 4 children elements`);
+  for (const selector of ["#cfrClose",
+      "#editBookmarkPanelRecommendationTitle",
+      "#editBookmarkPanelRecommendationContent",
+      "#editBookmarkPanelRecommendationCta"]) {
+    Assert.ok(document.getElementById("cfrMessageContainer").querySelector(selector),
+      `Should contain ${selector}`);
+  }
 
   const ftlFiles = Array.from(document.querySelectorAll("link"))
     .filter(l => l.getAttribute("href") === "browser/newtab/asrouter.ftl" ||
       l.getAttribute("href") === "browser/branding/sync-brand.ftl");
 
   Assert.equal(ftlFiles.length, 2, "Two fluent files required for translating the message");
 
   const popupHiddenPromise = BrowserTestUtils.waitForEvent(StarUI.panel, "popuphidden");
--- a/browser/extensions/webcompat/injections.js
+++ b/browser/extensions/webcompat/injections.js
@@ -124,16 +124,25 @@ for (const injection of [
     id: "bug1432935-breitbart",
     platform: "desktop",
     domain: "breitbart.com",
     bug: "1432935",
     contentScripts: {
       matches: ["*://*.breitbart.com/*"],
       css: [{file: "injections/css/bug1432935-breitbart.com-webkit-scrollbar.css"}],
     },
+  }, {
+    id: "bug1561371",
+    platform: "android",
+    domain: "mail.google.com",
+    bug: "1561371",
+    contentScripts: {
+      matches: ["*://mail.google.com/*"],
+      css: [{file: "injections/css/bug1561371-mail.google.com-allow-horizontal-scrolling.css"}],
+    },
   },
 ]) {
   Injections.push(injection);
 }
 
 let port = browser.runtime.connect();
 const ActiveInjections = new Map();
 
new file mode 100644
--- /dev/null
+++ b/browser/extensions/webcompat/injections/css/bug1561371-mail.google.com-allow-horizontal-scrolling.css
@@ -0,0 +1,12 @@
+/**
+ * mail.google.com - The HTML email view does not allow horizontal scrolling
+ * on Fennec due to a missing CSS rule which is only served to Chrome.
+ * Bug #1561371 - https://bugzilla.mozilla.org/show_bug.cgi?id=1561371
+ *
+ * HTML emails may sometimes contain content that does not wrap, yet the
+ * CSS served to Fennec does not permit scrolling horizontally. To prevent
+ * this UX frustration, we enable horizontal scrolling.
+ */
+body > #views > div {
+  overflow: auto;
+}
--- a/browser/extensions/webcompat/manifest.json
+++ b/browser/extensions/webcompat/manifest.json
@@ -1,13 +1,13 @@
 {
   "manifest_version": 2,
   "name": "Web Compat",
   "description": "Urgent post-release fixes for web compatibility.",
-  "version": "4.3.1",
+  "version": "4.3.2",
 
   "applications": {
     "gecko": {
       "id": "webcompat@mozilla.org",
       "strict_min_version": "59.0b5"
     }
   },
 
--- a/browser/extensions/webcompat/moz.build
+++ b/browser/extensions/webcompat/moz.build
@@ -22,17 +22,18 @@ FINAL_TARGET_FILES.features['webcompat@m
 ]
 
 FINAL_TARGET_FILES.features['webcompat@mozilla.org']['injections']['css'] += [
   'injections/css/bug0000000-dummy-css-injection.css',
   'injections/css/bug1305028-gaming.youtube.com-webkit-scrollbar.css',
   'injections/css/bug1432935-breitbart.com-webkit-scrollbar.css',
   'injections/css/bug1432935-discordapp.com-webkit-scorllbar-white-line.css',
   'injections/css/bug1518781-twitch.tv-webkit-scrollbar.css',
-  'injections/css/bug1526977-sreedharscce.in-login-fix.css'
+  'injections/css/bug1526977-sreedharscce.in-login-fix.css',
+  'injections/css/bug1561371-mail.google.com-allow-horizontal-scrolling.css'
 ]
 
 FINAL_TARGET_FILES.features['webcompat@mozilla.org']['injections']['js'] += [
   'injections/js/bug0000000-dummy-js-injection.js',
   'injections/js/bug1452707-window.controllers-shim-ib.absa.co.za.js',
   'injections/js/bug1457335-histography.io-ua-change.js',
   'injections/js/bug1472075-bankofamerica.com-ua-change.js',
   'injections/js/bug1472081-election.gov.np-window.sidebar-shim.js',
--- a/browser/extensions/webcompat/ua_overrides.js
+++ b/browser/extensions/webcompat/ua_overrides.js
@@ -20,34 +20,16 @@ for (const override of [
     config: {
       matches: ["*://webcompat-addon-testcases.schub.io/*"],
       uaTransformer: (originalUA) => {
         return UAHelpers.getPrefix(originalUA) + " AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36";
       },
     },
   }, {
     /*
-     * Bug 1464106 - directvnow.com - Create a UA override for Directvnow.com for playback on desktop
-     * WebCompat issue #3846 - https://webcompat.com/issues/3846
-     *
-     * directvnow.com is blocking Firefox via UA sniffing. Outreach is still going
-     * on, and playback works fine if we spoof as Chrome.
-     */
-    id: "bug1464106",
-    platform: "desktop",
-    domain: "directvnow.com",
-    bug: "1464106",
-    config: {
-      matches: ["*://*.directvnow.com/*"],
-      uaTransformer: (originalUA) => {
-        return UAHelpers.getPrefix(originalUA) + " AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36";
-      },
-    },
-  }, {
-    /*
      * Bug 1480710 - m.imgur.com - Build UA override
      * WebCompat issue #13154 - https://webcompat.com/issues/13154
      *
      * imgur returns a 404 for requests to CSS and JS file if requested with a Fennec
      * User Agent. By removing the Fennec identifies and adding Chrome Mobile's, we
      * receive the correct CSS and JS files.
      */
     id: "bug1480710",
@@ -57,34 +39,16 @@ for (const override of [
     config: {
       matches: ["*://m.imgur.com/*"],
       uaTransformer: (originalUA) => {
         return UAHelpers.getPrefix(originalUA) + " AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.85 Mobile Safari/537.36";
       },
     },
   }, {
     /*
-     * Bug 755590 - sites.google.com - top bar doesn't show up in Firefox for Android
-     *
-     * Google Sites does show a different top bar template based on the User Agent.
-     * For Fennec, this results in a broken top bar. Appending Chrome and Mobile Safari
-     * identifiers to the UA results in a correct rendering.
-     */
-    id: "bug755590",
-    platform: "android",
-    domain: "sites.google.com",
-    bug: "755590",
-    config: {
-      matches: ["*://sites.google.com/*"],
-      uaTransformer: (originalUA) => {
-        return originalUA + " Chrome/68.0.3440.85 Mobile Safari/537.366";
-      },
-    },
-  }, {
-    /*
      * Bug 945963 - tieba.baidu.com serves simplified mobile content to Firefox Android
      * WebCompat issue #18455 - https://webcompat.com/issues/18455
      *
      * tieba.baidu.com and tiebac.baidu.com serve a heavily simplified and less functional
      * mobile experience to Firefox for Android users. Adding the AppleWebKit indicator
      * to the User Agent gets us the same experience.
      */
     id: "bug945963",
@@ -94,60 +58,16 @@ for (const override of [
     config: {
       matches: ["*://tieba.baidu.com/*", "*://tiebac.baidu.com/*"],
       uaTransformer: (originalUA) => {
         return originalUA + " AppleWebKit/537.36 (KHTML, like Gecko)";
       },
     },
   }, {
     /*
-     * Bug 1518625 - rottentomatoes.com - Add UA override for videos on www.rottentomatoes.com
-     *
-     * The video framework loaded in via pdk.theplatform.com fails to
-     * acknowledge that Firefox does support HLS, so it fails to find a
-     * supported video format and shows the loading bar forever. Spoofing as
-     * Chrome works.
-     *
-     * Contrary to other PDK sites, rottentomatoes sometimes uses an iFrame to
-     * player.theplatform.com to show a video, so we need to override that domain
-     * as well.
-     */
-    id: "bug1518625",
-    platform: "android",
-    domain: "rottentomatoes.com",
-    bug: "1518625",
-    config: {
-      matches: [
-        "*://*.rottentomatoes.com/*",
-        "*://player.theplatform.com/*",
-      ],
-      uaTransformer: (_) => {
-        return "Mozilla/5.0 (Linux; Android 6.0.1; SM-G920F Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36";
-      },
-    },
-  }, {
-    /*
-     * Bug 1177298 - Write UA overrides for top Japanese Sites
-     * (Imported from ua-update.json.in)
-     *
-     * To receive the proper mobile version instead of the desktop version or
-     * a lower grade mobile experience, the UA is spoofed.
-     */
-    id: "bug1177298-1",
-    platform: "android",
-    domain: "weather.yahoo.co.jp",
-    bug: "1177298",
-    config: {
-      matches: ["*://weather.yahoo.co.jp/*"],
-      uaTransformer: (_) => {
-        return "Mozilla/5.0 (Linux; Android 5.0.2; Galaxy Nexus Build/IMM76B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.93 Mobile Safari/537.36";
-      },
-    },
-  }, {
-    /*
      * Bug 1177298 - Write UA overrides for top Japanese Sites
      * (Imported from ua-update.json.in)
      *
      * To receive the proper mobile version instead of the desktop version or
      * a lower grade mobile experience, the UA is spoofed.
      */
     id: "bug1177298-2",
     platform: "android",
@@ -174,34 +94,16 @@ for (const override of [
     config: {
       matches: ["*://*.nhk.or.jp/*"],
       uaTransformer: (originalUA) => {
         return originalUA + " AppleWebKit";
       },
     },
   }, {
     /*
-     * Bug 1177298 - Write UA overrides for top Japanese Sites
-     * (Imported from ua-update.json.in)
-     *
-     * To receive the proper mobile version instead of the desktop version or
-     * a lower grade mobile experience, the UA is spoofed.
-     */
-    id: "bug1177298-4",
-    platform: "android",
-    domain: "uniqlo.com",
-    bug: "1177298",
-    config: {
-      matches: ["*://*.uniqlo.com/*"],
-      uaTransformer: (originalUA) => {
-        return originalUA + " Mobile Safari";
-      },
-    },
-  }, {
-    /*
      * Bug 1338260 - Add UA override for directTV
      * (Imported from ua-update.json.in)
      *
      * DirectTV has issues with scrolling and cut-off images. Pretending to be
      * Chrome for Android fixes those issues.
      */
     id: "bug1338260",
     platform: "android",
@@ -243,35 +145,16 @@ for (const override of [
     config: {
       matches: ["*://*.mobile.de/*"],
       uaTransformer: (_) => {
         return "Mozilla/5.0 (Linux; Android 6.0.1; SM-G920F Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36";
       },
     },
   }, {
     /*
-     * Bug 1476436 - mobile.bet365.com - add UA override for fennec
-     * WebCompat issue #17010 - https://webcompat.com/issues/17010
-     *
-     * mobile.bet365.com serves fennec an alternative version with less interactive
-     * elements, although they work just fine. Spoofing as Chrome makes the
-     * interactive elements appear.
-     */
-    id: "bug1476436",
-    platform: "android",
-    domain: "mobile.bet365.com",
-    bug: "1476436",
-    config: {
-      matches: ["*://mobile.bet365.com/*"],
-      uaTransformer: (_) => {
-        return "Mozilla/5.0 (Linux; Android 6.0.1; SM-G920F Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36";
-      },
-    },
-  }, {
-    /*
      * Bug 1509831 - cc.com - Add UA override for CC.com
      * WebCompat issue #329 - https://webcompat.com/issues/329
      *
      * ComedyCentral blocks Firefox for not being able to play HLS, which was
      * true in previous versions, but no longer is. With a spoofed Chrome UA,
      * the site works just fine.
      */
     id: "bug1509831",
@@ -281,36 +164,16 @@ for (const override of [
     config: {
       matches: ["*://*.cc.com/*"],
       uaTransformer: (_) => {
         return "Mozilla/5.0 (Linux; Android 6.0.1; SM-G920F Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36";
       },
     },
   }, {
     /*
-     * Bug 1508564 - cnbc.com - Add UA override for videos on www.cnbc.com
-     * WebCompat issue #8410 - https://webcompat.com/issues/8410
-     *
-     * The video framework loaded in via pdk.theplatform.com fails to
-     * acknowledge that Firefox does support HLS, so it fails to find a
-     * supported video format and shows the loading bar forever. Spoofing as
-     * Chrome works.
-     */
-    id: "bug1508564",
-    platform: "android",
-    domain: "cnbc.com",
-    bug: "1508564",
-    config: {
-      matches: ["*://*.cnbc.com/*"],
-      uaTransformer: (_) => {
-        return "Mozilla/5.0 (Linux; Android 6.0.1; SM-G920F Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36";
-      },
-    },
-  }, {
-    /*
      * Bug 1508516 - cineflix.com.br - Add UA override for cineflix.com.br/m/
      * WebCompat issue #21553 - https://webcompat.com/issues/21553
      *
      * The site renders a blank page with any Firefox snipped in the UA as it
      * is running into an exception. Spoofing as Chrome makes the site work
      * fine.
      */
     id: "bug1508516",
--- a/browser/themes/shared/places/editBookmarkPanel.inc.css
+++ b/browser/themes/shared/places/editBookmarkPanel.inc.css
@@ -63,26 +63,21 @@
   position: relative;
   padding: 0 16px;
 }
 
 #editBookmarkPanelRecommendation > div::-moz-focus-inner {
   border: none;
 }
 
-#editBookmarkPanelRecommendation > div h1 {
-  max-width: 215px;
-}
-
 #editBookmarkPanelRecommendationTitle {
   font-size: 16px;
   font-weight: 400;
   line-height: 1.25;
-  margin-bottom: 6px;
-  padding-top: 2px;
+  margin: 0;
 }
 
 #editBookmarkPanelRecommendationContent {
   font-size: 11px;
   line-height: 1.5;
   margin: 0;
 }
 
@@ -98,21 +93,27 @@
   padding: 0;
   text-align: start;
 }
 
 #editBookmarkPanelRecommendationCta:hover {
   text-decoration: underline;
 }
 
+#editBookmarkPanelRecommendation .cfrMessageHeader {
+  display: flex;
+  justify-content: space-between;
+  align-items: start;
+  margin-bottom: 6px;
+  margin-top: 10px;
+  padding-top: 2px;
+}
+
 #editBookmarkPanelRecommendation #cfrClose {
-  position: absolute;
   padding: 10px;
-  inset-inline-end: 8px;
-  top: 15px;
   width: 12px;
   height: 12px;
   border: none;
   border-radius: var(--toolbarbutton-border-radius);
   background-color: transparent;
   background-image: url(chrome://browser/skin/stop.svg);
   background-size: 12px;
   background-repeat: no-repeat;
--- a/build/mach_bootstrap.py
+++ b/build/mach_bootstrap.py
@@ -42,17 +42,20 @@ MACH_MODULES = [
     'gfx/thebes/mach_commands.py',
     'layout/tools/reftest/mach_commands.py',
     'python/mach_commands.py',
     'python/safety/mach_commands.py',
     'python/mach/mach/commands/commandinfo.py',
     'python/mach/mach/commands/settings.py',
     'python/mozboot/mozboot/mach_commands.py',
     'python/mozbuild/mozbuild/mach_commands.py',
+    'python/mozbuild/mozbuild/artifact_commands.py',
+    'python/mozbuild/mozbuild/build_commands.py',
     'python/mozbuild/mozbuild/backend/mach_commands.py',
+    'python/mozbuild/mozbuild/code-analysis/mach_commands.py',
     'python/mozbuild/mozbuild/compilation/codecomplete.py',
     'python/mozbuild/mozbuild/frontend/mach_commands.py',
     'python/mozrelease/mozrelease/mach_commands.py',
     'taskcluster/mach_commands.py',
     'testing/awsy/mach_commands.py',
     'testing/firefox-ui/mach_commands.py',
     'testing/geckodriver/mach_commands.py',
     'testing/mach_commands.py',
--- a/devtools/client/debugger/src/utils/editor/index.js
+++ b/devtools/client/debugger/src/utils/editor/index.js
@@ -158,42 +158,53 @@ function isVisible(codeMirror: any, top:
     top,
     scrollTop,
     scrollTop + scrollArea.clientHeight - fontHeight
   );
 
   return inXView && inYView;
 }
 
-export function getLocationsInViewport({ codeMirror }: Object) {
+export function getLocationsInViewport(
+  { codeMirror }: Object,
+  // Offset represents an allowance of characters or lines offscreen to improve
+  // perceived performance of column breakpoint rendering
+  offsetHorizontalCharacters: number = 100,
+  offsetVerticalLines: number = 20
+) {
   // Get scroll position
   if (!codeMirror) {
     return {
       start: { line: 0, column: 0 },
       end: { line: 0, column: 0 },
     };
   }
   const charWidth = codeMirror.defaultCharWidth();
   const scrollArea = codeMirror.getScrollInfo();
   const { scrollLeft } = codeMirror.doc;
   const rect = codeMirror.getWrapperElement().getBoundingClientRect();
-  const topVisibleLine = codeMirror.lineAtHeight(rect.top, "window");
-  const bottomVisibleLine = codeMirror.lineAtHeight(rect.bottom, "window");
+  const topVisibleLine =
+    codeMirror.lineAtHeight(rect.top, "window") - offsetVerticalLines;
+  const bottomVisibleLine =
+    codeMirror.lineAtHeight(rect.bottom, "window") + offsetVerticalLines;
 
-  const leftColumn = Math.floor(scrollLeft > 0 ? scrollLeft / charWidth : 0);
+  const leftColumn = Math.floor(
+    scrollLeft > 0 ? scrollLeft / charWidth - offsetHorizontalCharacters : 0
+  );
   const rightPosition = scrollLeft + (scrollArea.clientWidth - 30);
-  const rightCharacter = Math.floor(rightPosition / charWidth);
+  const rightCharacter =
+    Math.floor(rightPosition / charWidth) + offsetHorizontalCharacters;
 
   return {
     start: {
-      line: topVisibleLine,
-      column: leftColumn,
+      line: topVisibleLine || 0,
+      column: leftColumn || 0,
     },
     end: {
-      line: bottomVisibleLine,
+      line: bottomVisibleLine || 0,
       column: rightCharacter,
     },
   };
 }
 
 export function markText(
   { codeMirror }: Object,
   className: string,
--- a/devtools/client/inspector/rules/rules.js
+++ b/devtools/client/inspector/rules/rules.js
@@ -134,16 +134,17 @@ function CssRuleView(inspector, document
   this.element = doc.getElementById("ruleview-container-focusable");
   this.addRuleButton = doc.getElementById("ruleview-add-rule-button");
   this.searchField = doc.getElementById("ruleview-searchbox");
   this.searchClearButton = doc.getElementById("ruleview-searchinput-clear");
   this.pseudoClassPanel = doc.getElementById("pseudo-class-panel");
   this.pseudoClassToggle = doc.getElementById("pseudo-class-panel-toggle");
   this.classPanel = doc.getElementById("ruleview-class-panel");
   this.classToggle = doc.getElementById("class-panel-toggle");
+  this.printSimulationButton = doc.getElementById("print-simulation-toggle");
 
   this._initPrintSimulation();
 
   this.searchClearButton.hidden = true;
 
   this.shortcuts = new KeyShortcuts({ window: this.styleWindow });
   this._onShortcut = this._onShortcut.bind(this);
   this.shortcuts.on("Escape", event => this._onShortcut("Escape", event));
@@ -342,18 +343,16 @@ CssRuleView.prototype = {
     // us to use `actorHasMethod`. Please see `getActorDescription` for more information.
     this._emulationFront = await this.target.getFront("emulation");
 
     // Show the toggle button if:
     // - Print simulation is supported for the current target.
     // - Not debugging content document.
     if (await this.target.actorHasMethod("emulation", "getIsPrintSimulationEnabled") &&
         !this.target.chrome) {
-      this.printSimulationButton =
-        this.styleDocument.getElementById("print-simulation-toggle");
       this.printSimulationButton.removeAttribute("hidden");
 
       this.printSimulationButton.addEventListener("click", this._onTogglePrintSimulation);
     }
   },
 
   /**
    * Get the type of a given node in the rule-view
--- a/dom/base/nsDOMWindowUtils.cpp
+++ b/dom/base/nsDOMWindowUtils.cpp
@@ -4181,8 +4181,22 @@ nsDOMWindowUtils::IsCssPropertyRecordedI
     return NS_ERROR_FAILURE;
   }
 
   bool knownProp = false;
   *aRecorded = Servo_IsCssPropertyRecordedInUseCounter(
       doc->GetStyleUseCounters(), &aPropName, &knownProp);
   return knownProp ? NS_OK : NS_ERROR_FAILURE;
 }
+
+NS_IMETHODIMP
+nsDOMWindowUtils::GetLayersId(uint64_t* aOutLayersId) {
+  nsIWidget* widget = GetWidget();
+  if (!widget) {
+    return NS_ERROR_FAILURE;
+  }
+  BrowserChild* child = widget->GetOwningBrowserChild();
+  if (!child) {
+    return NS_ERROR_FAILURE;
+  }
+  *aOutLayersId = (uint64_t)child->GetLayersId();
+  return NS_OK;
+}
--- a/dom/interfaces/base/nsIDOMWindowUtils.idl
+++ b/dom/interfaces/base/nsIDOMWindowUtils.idl
@@ -2005,16 +2005,18 @@ interface nsIDOMWindowUtils : nsISupport
   const long MOUSE_BUTTONS_NOT_SPECIFIED = -1;
 
   // Return values for getDirectionFromText().
   const long DIRECTION_LTR = 0;
   const long DIRECTION_RTL = 1;
   const long DIRECTION_NOT_SET = 2;
 
   void syncFlushCompositor();
+
+  unsigned long long getLayersId();
 };
 
 [scriptable, uuid(c694e359-7227-4392-a138-33c0cc1f15a6)]
 interface nsITranslationNodeList : nsISupports {
   readonly attribute unsigned long length;
   Node item(in unsigned long index);
 
   // A translation root is a block element, or an inline element
--- a/dom/ipc/ContentChild.cpp
+++ b/dom/ipc/ContentChild.cpp
@@ -919,17 +919,17 @@ nsresult ContentChild::ProvideWindowComm
       name = EmptyString();
     }
 
     MOZ_DIAGNOSTIC_ASSERT(!nsContentUtils::IsSpecialName(name));
 
     Unused << SendCreateWindowInDifferentProcess(
         aTabOpener, aChromeFlags, aCalledFromJS, aPositionSpecified,
         aSizeSpecified, uriToLoad, features, fullZoom, name,
-        Principal(triggeringPrincipal), csp, referrerInfo);
+        triggeringPrincipal, csp, referrerInfo);
 
     // We return NS_ERROR_ABORT, so that the caller knows that we've abandoned
     // the window open as far as it is concerned.
     return NS_ERROR_ABORT;
   }
 
   if (aTabOpener) {
     PopupIPCTabContext context;
--- a/dom/ipc/ContentParent.cpp
+++ b/dom/ipc/ContentParent.cpp
@@ -4969,25 +4969,55 @@ mozilla::ipc::IPCResult ContentParent::R
   return IPC_OK();
 }
 
 mozilla::ipc::IPCResult ContentParent::RecvCreateWindowInDifferentProcess(
     PBrowserParent* aThisTab, const uint32_t& aChromeFlags,
     const bool& aCalledFromJS, const bool& aPositionSpecified,
     const bool& aSizeSpecified, const Maybe<URIParams>& aURIToLoad,
     const nsCString& aFeatures, const float& aFullZoom, const nsString& aName,
-    const IPC::Principal& aTriggeringPrincipal, nsIContentSecurityPolicy* aCsp,
+    nsIPrincipal* aTriggeringPrincipal, nsIContentSecurityPolicy* aCsp,
     nsIReferrerInfo* aReferrerInfo) {
   MOZ_DIAGNOSTIC_ASSERT(!nsContentUtils::IsSpecialName(aName));
 
   nsCOMPtr<nsIRemoteTab> newRemoteTab;
   bool windowIsNew;
   nsCOMPtr<nsIURI> uriToLoad = DeserializeURI(aURIToLoad);
   int32_t openLocation = nsIBrowserDOMWindow::OPEN_NEWWINDOW;
 
+  // If we have enough data, check the schemes of the loader and loadee
+  // to make sure they make sense.
+  if (uriToLoad && uriToLoad->SchemeIs("file") &&
+      !GetRemoteType().EqualsLiteral(FILE_REMOTE_TYPE) &&
+      Preferences::GetBool("browser.tabs.remote.enforceRemoteTypeRestrictions",
+                           false)) {
+#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+#  ifdef DEBUG
+    nsAutoCString uriToLoadStr;
+    uriToLoad->GetAsciiSpec(uriToLoadStr);
+
+    nsCOMPtr<nsIURI> triggeringUri;
+    aTriggeringPrincipal->GetURI(getter_AddRefs(triggeringUri));
+    nsAutoCString triggeringUriStr;
+    if (triggeringUri) {
+      triggeringUri->GetAsciiSpec(triggeringUriStr);
+    }
+
+    NS_WARNING(nsPrintfCString(
+                   "RecvCreateWindowInDifferentProcess blocked loading file "
+                   "scheme from non-file remotetype: %s tried to load %s",
+                   triggeringUriStr.get(), uriToLoadStr.get())
+                   .get());
+#  endif
+    MOZ_CRASH(
+        "RecvCreateWindowInDifferentProcess blocked loading improper scheme");
+#endif
+    return IPC_OK();
+  }
+
   nsresult rv;
   mozilla::ipc::IPCResult ipcResult = CommonCreateWindow(
       aThisTab, /* aSetOpener = */ false, aChromeFlags, aCalledFromJS,
       aPositionSpecified, aSizeSpecified, uriToLoad, aFeatures, aFullZoom,
       /* aNextRemoteTabId = */ 0, aName, rv, newRemoteTab, &windowIsNew,
       openLocation, aTriggeringPrincipal, aReferrerInfo,
       /* aLoadUri = */ true, aCsp);
   if (!ipcResult) {
--- a/dom/ipc/ContentParent.h
+++ b/dom/ipc/ContentParent.h
@@ -526,18 +526,18 @@ class ContentParent final : public PCont
       nsIContentSecurityPolicy* aCsp, nsIReferrerInfo* aReferrerInfo,
       CreateWindowResolver&& aResolve);
 
   mozilla::ipc::IPCResult RecvCreateWindowInDifferentProcess(
       PBrowserParent* aThisTab, const uint32_t& aChromeFlags,
       const bool& aCalledFromJS, const bool& aPositionSpecified,
       const bool& aSizeSpecified, const Maybe<URIParams>& aURIToLoad,
       const nsCString& aFeatures, const float& aFullZoom, const nsString& aName,
-      const IPC::Principal& aTriggeringPrincipal,
-      nsIContentSecurityPolicy* aCsp, nsIReferrerInfo* aReferrerInfo);
+      nsIPrincipal* aTriggeringPrincipal, nsIContentSecurityPolicy* aCsp,
+      nsIReferrerInfo* aReferrerInfo);
 
   static void BroadcastBlobURLRegistration(
       const nsACString& aURI, BlobImpl* aBlobImpl, nsIPrincipal* aPrincipal,
       ContentParent* aIgnoreThisCP = nullptr);
 
   static void BroadcastBlobURLUnregistration(
       const nsACString& aURI, ContentParent* aIgnoreThisCP = nullptr);
 
--- a/dom/ipc/PContent.ipdl
+++ b/dom/ipc/PContent.ipdl
@@ -1292,17 +1292,17 @@ parent:
       uint32_t aChromeFlags,
       bool aCalledFromJS,
       bool aPositionSpecified,
       bool aSizeSpecified,
       URIParams? aURIToLoad,
       nsCString aFeatures,
       float aFullZoom,
       nsString aName,
-      Principal aTriggeringPrincipal,
+      nsIPrincipal aTriggeringPrincipal,
       nsIContentSecurityPolicy aCsp,
       nsIReferrerInfo aReferrerInfo);
 
     /**
      * Tells the parent to ungrab the pointer on the default display.
      *
      * This is for GTK platforms where we have to ensure the pointer ungrab happens in the
      * chrome process as that's the process that receives the pointer event.
--- a/dom/media/ipc/RemoteVideoDecoder.cpp
+++ b/dom/media/ipc/RemoteVideoDecoder.cpp
@@ -49,18 +49,25 @@ class KnowsCompositorVideo : public laye
     VideoBridgeChild* child =
         (aIdentifier.mParentProcessType == GeckoProcessType_GPU)
             ? VideoBridgeChild::GetSingletonToGPUProcess()
             : VideoBridgeChild::GetSingletonToParentProcess();
     if (!child) {
       return nullptr;
     }
 
+    // The RDD process will never use hardware decoding since it's
+    // sandboxed, so don't bother trying to create a sync object.
+    TextureFactoryIdentifier ident = aIdentifier;
+    if (XRE_IsRDDProcess()) {
+      ident.mSyncHandle = 0;
+    }
+
     RefPtr<KnowsCompositorVideo> knowsCompositor = new KnowsCompositorVideo();
-    knowsCompositor->IdentifyTextureHost(aIdentifier);
+    knowsCompositor->IdentifyTextureHost(ident);
     return knowsCompositor.forget();
   }
 
  private:
   KnowsCompositorVideo() = default;
   virtual ~KnowsCompositorVideo() = default;
 };
 
@@ -274,21 +281,20 @@ RemoteVideoDecoderParent::RemoteVideoDec
     RemoteDecoderManagerParent* aParent, const VideoInfo& aVideoInfo,
     float aFramerate, const CreateDecoderParams::OptionSet& aOptions,
     const Maybe<layers::TextureFactoryIdentifier>& aIdentifier,
     TaskQueue* aManagerTaskQueue, TaskQueue* aDecodeTaskQueue, bool* aSuccess,
     nsCString* aErrorDescription)
     : RemoteDecoderParent(aParent, aManagerTaskQueue, aDecodeTaskQueue),
       mVideoInfo(aVideoInfo) {
   if (aIdentifier) {
-    // Check to see if we have a direct PVideoBridge connection to the destination
-    // process specified in aIdentifier, and create a KnowsCompositor representing
-    // that connection if so.
-    // If this fails, then we fall back to returning the decoded frames directly
-    // via Output().
+    // Check to see if we have a direct PVideoBridge connection to the
+    // destination process specified in aIdentifier, and create a
+    // KnowsCompositor representing that connection if so. If this fails, then
+    // we fall back to returning the decoded frames directly via Output().
     mKnowsCompositor =
         KnowsCompositorVideo::TryCreateForIdentifier(*aIdentifier);
   }
 
   CreateDecoderParams params(mVideoInfo);
   params.mTaskQueue = mDecodeTaskQueue;
   params.mKnowsCompositor = mKnowsCompositor;
   params.mImageContainer = new layers::ImageContainer();
--- a/dom/webidl/APZTestData.webidl
+++ b/dom/webidl/APZTestData.webidl
@@ -47,16 +47,17 @@ namespace APZHitResultFlags {
   const unsigned short SCROLLBAR_VERTICAL = 0x0400;
   const unsigned short REQUIRES_TARGET_CONFIRMATION = 0x0800;
 };
 
 dictionary APZHitResult {
   float screenX;
   float screenY;
   unsigned short hitResult; // combination of the APZHitResultFlags.* flags
+  unsigned long long layersId;
   unsigned long long scrollId;
 };
 
 dictionary AdditionalDataEntry {
   DOMString key;
   DOMString value;
 };
 
--- a/gfx/gl/GLLibraryEGL.cpp
+++ b/gfx/gl/GLLibraryEGL.cpp
@@ -31,16 +31,17 @@
 #include "GLContext.h"
 #include "GLContextProvider.h"
 #include "ScopedGLHelpers.h"
 #ifdef MOZ_WIDGET_GTK
 #  include <gdk/gdk.h>
 #  ifdef MOZ_WAYLAND
 #    include <gdk/gdkwayland.h>
 #    include <dlfcn.h>
+#    include "mozilla/widget/nsWaylandDisplay.h"
 #  endif  // MOZ_WIDGET_GTK
 #endif    // MOZ_WAYLAND
 
 namespace mozilla {
 namespace gl {
 
 StaticMutex GLLibraryEGL::sMutex;
 StaticRefPtr<GLLibraryEGL> GLLibraryEGL::sEGLLibrary;
@@ -782,20 +783,17 @@ EGLDisplay GLLibraryEGL::CreateDisplay(b
       mIsWARP = true;
     }
   } else {
     void* nativeDisplay = EGL_DEFAULT_DISPLAY;
 #ifdef MOZ_WAYLAND
     // Some drivers doesn't support EGL_DEFAULT_DISPLAY
     GdkDisplay* gdkDisplay = gdk_display_get_default();
     if (!GDK_IS_X11_DISPLAY(gdkDisplay)) {
-      static auto sGdkWaylandDisplayGetWlDisplay =
-          (wl_display * (*)(GdkDisplay*))
-              dlsym(RTLD_DEFAULT, "gdk_wayland_display_get_wl_display");
-      nativeDisplay = sGdkWaylandDisplayGetWlDisplay(gdkDisplay);
+      nativeDisplay = WaylandDisplayGetWLDisplay(gdkDisplay);
       if (!nativeDisplay) {
         NS_WARNING("Failed to get wl_display.");
         return nullptr;
       }
     }
 #endif
     chosenDisplay = GetAndInitDisplay(*this, nativeDisplay);
   }
--- a/gfx/gl/moz.build
+++ b/gfx/gl/moz.build
@@ -145,12 +145,16 @@ FINAL_LIBRARY = 'xul'
 if CONFIG['MOZ_D3DCOMPILER_VISTA_DLL']:
     DEFINES['MOZ_D3DCOMPILER_VISTA_DLL'] = CONFIG['MOZ_D3DCOMPILER_VISTA_DLL']
 
 CXXFLAGS += CONFIG['MOZ_CAIRO_CFLAGS']
 CXXFLAGS += CONFIG['TK_CFLAGS']
 CFLAGS += CONFIG['MOZ_CAIRO_CFLAGS']
 CFLAGS += CONFIG['TK_CFLAGS']
 
+if CONFIG['MOZ_WAYLAND']:
+    CXXFLAGS += CONFIG['MOZ_WAYLAND_CFLAGS']
+    CFLAGS += CONFIG['MOZ_WAYLAND_CFLAGS']
+
 LOCAL_INCLUDES += CONFIG['SKIA_INCLUDES']
 
 if CONFIG['CC_TYPE'] in ('clang', 'gcc'):
     CXXFLAGS += ['-Wno-error=shadow']
--- a/gfx/layers/apz/src/APZCTreeManager.cpp
+++ b/gfx/layers/apz/src/APZCTreeManager.cpp
@@ -1309,17 +1309,17 @@ nsEventStatus APZCTreeManager::ReceiveIn
         if (StaticPrefs::apz_test_logging_enabled() &&
             mouseInput.mType == MouseInput::MOUSE_HITTEST) {
           ScrollableLayerGuid guid = apzc->GetGuid();
 
           MutexAutoLock lock(mTestDataLock);
           auto it = mTestData.find(guid.mLayersId);
           MOZ_ASSERT(it != mTestData.end());
           it->second->RecordHitResult(mouseInput.mOrigin, hitResult,
-                                      guid.mScrollId);
+                                      guid.mLayersId, guid.mScrollId);
         }
 
         TargetConfirmationFlags confFlags{hitResult};
         bool apzDragEnabled = StaticPrefs::apz_drag_enabled();
         if (apzDragEnabled && hitScrollbar) {
           // If scrollbar dragging is enabled and we hit a scrollbar, wait
           // for the main-thread confirmation because it contains drag metrics
           // that we need.
--- a/gfx/layers/apz/test/mochitest/apz_test_utils.js
+++ b/gfx/layers/apz/test/mochitest/apz_test_utils.js
@@ -665,17 +665,17 @@ function centerOf(element) {
 //   scrollId: the view-id of the scroll frame that was hit
 function hitTest(point) {
   var utils = getHitTestConfig().utils;
   dump("Hit-testing point (" + point.x + ", " + point.y + ")\n");
   utils.sendMouseEvent("MozMouseHittest", point.x, point.y, 0, 0, 0, true, 0, 0, true, true);
   var data = utils.getCompositorAPZTestData();
   ok(data.hitResults.length >= 1, "Expected at least one hit result in the APZTestData");
   var result = data.hitResults[data.hitResults.length - 1];
-  return { hitInfo: result.hitResult, scrollId: result.scrollId };
+  return { hitInfo: result.hitResult, scrollId: result.scrollId, layersId: result.layersId };
 }
 
 // Returns a canonical stringification of the hitInfo bitfield.
 function hitInfoToString(hitInfo) {
   var strs = [];
   for (var flag in APZHitResultFlags) {
     if ((hitInfo & APZHitResultFlags[flag]) != 0) {
       strs.push(flag);
@@ -688,19 +688,20 @@ function hitInfoToString(hitInfo) {
     return APZHitResultFlags[a] - APZHitResultFlags[b];
   });
   return strs.join(" | ");
 }
 
 // Takes an object returned by hitTest, along with the expected values, and
 // asserts that they match. Notably, it uses hitInfoToString to provide a
 // more useful message for the case that the hit info doesn't match
-function checkHitResult(hitResult, expectedHitInfo, expectedScrollId, desc) {
+function checkHitResult(hitResult, expectedHitInfo, expectedScrollId, expectedLayersId, desc) {
   is(hitInfoToString(hitResult.hitInfo), hitInfoToString(expectedHitInfo), desc + " hit info");
   is(hitResult.scrollId, expectedScrollId, desc + " scrollid");
+  is(hitResult.layersId, expectedLayersId, desc + " layersid");
 }
 
 // Symbolic constants used by hitTestScrollbar().
 var ScrollbarTrackLocation = {
     START: 1,
     END: 2,
 };
 var LayerState = {
@@ -712,16 +713,17 @@ var LayerState = {
 // This function takes a single argument which is expected to be
 // an object with the following fields:
 //   element: The scroll frame to perform the hit test on.
 //   directions: The direction(s) of scrollbars to test.
 //     If directions.vertical is true, the vertical scrollbar will be tested.
 //     If directions.horizontal is true, the horizontal scrollbar will be tested.
 //     Both may be true in a single call (in which case two tests are performed).
 //   expectedScrollId: The scroll id that is expected to be hit.
+//   expectedLayersId: The layers id that is expected to be hit.
 //   trackLocation: One of ScrollbarTrackLocation.{START, END}.
 //     Determines which end of the scrollbar track is targeted.
 //   expectThumb: Whether the scrollbar thumb is expected to be present
 //     at the targeted end of the scrollbar track.
 //   layerState: Whether the scroll frame is active or inactive.
 // The function performs the hit tests and asserts that the returned
 // hit test information is consistent with the passed parameters.
 // There is no return value.
@@ -782,29 +784,31 @@ function hitTestScrollbar(params) {
         x: boundingClientRect.right - (verticalScrollbarWidth / 2),
         y: (params.trackLocation == ScrollbarTrackLocation.START)
              ? (boundingClientRect.y + scrollbarArrowButtonHeight + 5)
              : (boundingClientRect.bottom - horizontalScrollbarHeight - scrollbarArrowButtonHeight - 5),
     };
     checkHitResult(hitTest(verticalScrollbarPoint),
                    expectedHitInfo | APZHitResultFlags.SCROLLBAR_VERTICAL,
                    params.expectedScrollId,
+                   params.expectedLayersId,
                    scrollframeMsg + " - vertical scrollbar");
   }
 
   if (params.directions.horizontal && horizontalScrollbarHeight > 0) {
     var horizontalScrollbarPoint = {
         x: (params.trackLocation == ScrollbarTrackLocation.START)
              ? (boundingClientRect.x + scrollbarArrowButtonWidth + 5)
              : (boundingClientRect.right - verticalScrollbarWidth - scrollbarArrowButtonWidth - 5),
         y: boundingClientRect.bottom - (horizontalScrollbarHeight / 2),
     };
     checkHitResult(hitTest(horizontalScrollbarPoint),
                    expectedHitInfo,
                    params.expectedScrollId,
+                   params.expectedLayersId,
                    scrollframeMsg + " - horizontal scrollbar");
   }
 }
 
 // Return a list of prefs for the given test identifier.
 function getPrefs(ident) {
   switch (ident) {
     case "TOUCH_EVENTS:PAN":
--- a/gfx/layers/apz/test/mochitest/helper_hittest_basic.html
+++ b/gfx/layers/apz/test/mochitest/helper_hittest_basic.html
@@ -24,31 +24,34 @@ function* test(testDriver) {
   var scroller = document.getElementById("scroller");
   var apzaware = document.getElementById("apzaware");
 
   checkHitResult(hitTest(centerOf(scroller)),
                  APZHitResultFlags.VISIBLE |
                  (config.isWebRender ? APZHitResultFlags.INACTIVE_SCROLLFRAME
                                      : APZHitResultFlags.IRREGULAR_AREA),
                  utils.getViewId(document.scrollingElement),
+                 utils.getLayersId(),
                  "inactive scrollframe");
 
   // The apz-aware div (which has a non-passive wheel listener) is not visible
   // and so the hit-test should just return the root scrollframe area that's
   // covering it
   checkHitResult(hitTest(centerOf(apzaware)),
                  APZHitResultFlags.VISIBLE,
                  utils.getViewId(document.scrollingElement),
+                 utils.getLayersId(),
                  "inactive scrollframe - apzaware block");
 
   // Hit test where the scroll thumbs should be.
   hitTestScrollbar({
     element: scroller,
     directions: { vertical: true, horizontal: true },
     expectedScrollId: utils.getViewId(document.scrollingElement),
+    expectedLayersId: utils.getLayersId(),
     trackLocation: ScrollbarTrackLocation.START,
     expectThumb: true,
     layerState: LayerState.INACTIVE,
   });
 
   // activate the scrollframe but keep the main-thread scroll position at 0.
   // also apply a async scroll offset in the y-direction such that the
   // scrollframe scrolls to the bottom of its range.
@@ -65,64 +68,70 @@ function* test(testDriver) {
   }
 
   var scrollerViewId = utils.getViewId(scroller);
 
   // Now we again test the middle of the scrollframe, which is now active
   checkHitResult(hitTest(centerOf(scroller)),
                  APZHitResultFlags.VISIBLE,
                  scrollerViewId,
+                 utils.getLayersId(),
                  "active scrollframe");
 
   // Test the apz-aware block
   var apzawarePosition = centerOf(apzaware); // main thread position
   apzawarePosition.y -= scrollY; // APZ position
   checkHitResult(hitTest(apzawarePosition),
                  APZHitResultFlags.VISIBLE |
                  (config.isWebRender ? APZHitResultFlags.APZ_AWARE_LISTENERS
                                      : APZHitResultFlags.IRREGULAR_AREA),
                  scrollerViewId,
+                 utils.getLayersId(),
                  "active scrollframe - apzaware block");
 
   // Test the scrollbars. Note that this time the vertical scrollthumb is
   // going to be at the bottom of the track. We'll test both the top and the
   // bottom.
 
   // top of scrollbar track
   hitTestScrollbar({
     element: scroller,
     directions: { vertical: true },
     expectedScrollId: scrollerViewId,
+    expectedLayersId: utils.getLayersId(),
     trackLocation: ScrollbarTrackLocation.START,
     expectThumb: false,
     layerState: LayerState.ACTIVE,
   });
   // bottom of scrollbar track (scrollthumb)
   hitTestScrollbar({
     element: scroller,
     directions: { vertical: true },
     expectedScrollId: scrollerViewId,
+    expectedLayersId: utils.getLayersId(),
     trackLocation: ScrollbarTrackLocation.END,
     expectThumb: true,
     layerState: LayerState.ACTIVE,
   });
   // left part of scrollbar track (has scrollthumb)
   hitTestScrollbar({
     element: scroller,
     directions: { horizontal: true },
     expectedScrollId: scrollerViewId,
+    expectedLayersId: utils.getLayersId(),
     trackLocation: ScrollbarTrackLocation.START,
     expectThumb: true,
     layerState: LayerState.ACTIVE,
   });
   // right part of scrollbar track
   hitTestScrollbar({
     element: scroller,
     directions: { horizontal: true },
     expectedScrollId: scrollerViewId,
+    expectedLayersId: utils.getLayersId(),
     trackLocation: ScrollbarTrackLocation.END,
     expectThumb: false,
     layerState: LayerState.ACTIVE,
   });
 
   subtestDone();
 }
 
--- a/gfx/layers/apz/test/mochitest/helper_hittest_checkerboard.html
+++ b/gfx/layers/apz/test/mochitest/helper_hittest_checkerboard.html
@@ -42,16 +42,17 @@ function* test(testDriver) {
   var scrollerViewId = utils.getViewId(scroller);
 
   // Hit-test the middle of the scrollframe, which is now inside the
   // checkerboarded region, and check that we hit the scrollframe and
   // not its parent.
   checkHitResult(hitTest(centerOf(scroller)),
                  APZHitResultFlags.VISIBLE,
                  scrollerViewId,
+                 utils.getLayersId(),
                  "active scrollframe");
 
   subtestDone();
 }
 
 waitUntilApzStable().then(runContinuation(test));
 
 </script>
--- a/gfx/layers/apz/test/mochitest/helper_hittest_clippath.html
+++ b/gfx/layers/apz/test/mochitest/helper_hittest_clippath.html
@@ -39,65 +39,77 @@ function* test(testDriver) {
   var subwindow = document.getElementById("sub").contentWindow;
   var subscroller = subwindow.document.scrollingElement;
   var subutils = SpecialPowers.getDOMWindowUtils(subwindow);
   subutils.setDisplayPortForElement(0, 0, 400, 1000, subscroller, 1);
   yield waitForApzFlushedRepaints(testDriver);
 
   var rootViewId = utils.getViewId(document.scrollingElement);
   var iframeViewId = subutils.getViewId(subscroller);
+  var layersId = utils.getLayersId();
+  is(subutils.getLayersId(), layersId, "iframe is not OOP");
 
   checkHitResult(hitTest({ x: 10, y: 10 }),
       APZHitResultFlags.VISIBLE,
       iframeViewId,
+      layersId,
       "(simple) uninteresting point inside the iframe");
   checkHitResult(hitTest({ x: 500, y: 10 }),
       APZHitResultFlags.VISIBLE,
       rootViewId,
+      layersId,
       "(simple) uninteresting point in the root scroller");
   checkHitResult(hitTest({ x: 110, y: 110 }),
       APZHitResultFlags.VISIBLE,
       iframeViewId,
+      layersId,
       "(simple) point in the iframe behind overlaying div, but outside the bounding box of the clip path");
   checkHitResult(hitTest({ x: 160, y: 160 }),
       config.isWebRender ? APZHitResultFlags.VISIBLE
                          : APZHitResultFlags.VISIBLE | APZHitResultFlags.IRREGULAR_AREA,
       config.isWebRender ? iframeViewId : rootViewId,
+      layersId,
       "(simple) point in the iframe behind overlaying div, inside the bounding box of the clip path, but outside the actual clip shape");
   checkHitResult(hitTest({ x: 300, y: 200 }),
       config.isWebRender ? APZHitResultFlags.VISIBLE
                          : APZHitResultFlags.VISIBLE | APZHitResultFlags.IRREGULAR_AREA,
       rootViewId,
+      layersId,
       "(simple) point inside the clip shape of the overlaying div");
 
   // Now we turn the "simple" clip-path that WR can handle into a more complex
   // one that needs a mask. Then run the checks again; the expected results for
   // WR are slightly different
   document.getElementById("clipped").style.clipPath = "polygon(50px 200px, 75px 75px, 200px 50px, 350px 200px, 200px 350px)";
   yield waitForApzFlushedRepaints(testDriver);
 
   checkHitResult(hitTest({ x: 10, y: 10 }),
       APZHitResultFlags.VISIBLE,
       iframeViewId,
+      layersId,
       "(complex) uninteresting point inside the iframe");
   checkHitResult(hitTest({ x: 500, y: 10 }),
       APZHitResultFlags.VISIBLE,
       rootViewId,
+      layersId,
       "(complex) uninteresting point in the root scroller");
   checkHitResult(hitTest({ x: 110, y: 110 }),
       APZHitResultFlags.VISIBLE,
       iframeViewId,
+      layersId,
       "(complex) point in the iframe behind overlaying div, but outside the bounding box of the clip path");
   checkHitResult(hitTest({ x: 160, y: 160 }),
       APZHitResultFlags.VISIBLE | APZHitResultFlags.IRREGULAR_AREA,
       rootViewId,
+      layersId,
       "(complex) point in the iframe behind overlaying div, inside the bounding box of the clip path, but outside the actual clip shape");
   checkHitResult(hitTest({ x: 300, y: 200 }),
       APZHitResultFlags.VISIBLE | APZHitResultFlags.IRREGULAR_AREA,
       rootViewId,
+      layersId,
       "(complex) point inside the clip shape of the overlaying div");
 }
 
 waitUntilApzStable()
     .then(runContinuation(test))
     .then(subtestDone);
 </script>
 </body></html>
--- a/gfx/layers/apz/test/mochitest/helper_hittest_clipped_fixed_modal.html
+++ b/gfx/layers/apz/test/mochitest/helper_hittest_clipped_fixed_modal.html
@@ -68,16 +68,17 @@ function* test(testDriver) {
   var subframe = document.querySelector(".content");
   utils.setDisplayPortForElement(0, 0, 800, 2000, subframe, 1);
   yield waitForApzFlushedRepaints(testDriver);
 
   var target = document.querySelector(".content");
   checkHitResult(hitTest(centerOf(target)),
                  APZHitResultFlags.VISIBLE,
                  utils.getViewId(subframe),
+                 utils.getLayersId(),
                  "content covered by a clipped fixed div");
 
   subtestDone();
 }
 
 waitUntilApzStable().then(runContinuation(test));
 
 </script>
--- a/gfx/layers/apz/test/mochitest/helper_hittest_fixed_in_scrolled_transform.html
+++ b/gfx/layers/apz/test/mochitest/helper_hittest_fixed_in_scrolled_transform.html
@@ -71,16 +71,17 @@ function* test(testDriver) {
   var subframe = document.querySelector(".subframe");
   utils.setDisplayPortForElement(0, 0, 800, 2000, subframe, 1);
   yield waitForApzFlushedRepaints(testDriver);
 
   var target = document.querySelector(".absoluteClip");
   checkHitResult(hitTest(centerOf(target)),
                  APZHitResultFlags.VISIBLE,
                  utils.getViewId(subframe),
+                 utils.getLayersId(),
                  "fixed item inside a scrolling transform");
 
   subtestDone();
 }
 
 waitUntilApzStable().then(runContinuation(test));
 
 </script>
--- a/gfx/layers/apz/test/mochitest/helper_hittest_float_bug1434846.html
+++ b/gfx/layers/apz/test/mochitest/helper_hittest_float_bug1434846.html
@@ -36,16 +36,17 @@
 
 function* test(testDriver) {
   var utils = getHitTestConfig().utils;
 
   hitTestScrollbar({
     element: document.getElementById("subframe"),
     directions: { vertical: true },
     expectedScrollId: utils.getViewId(document.scrollingElement),
+    expectedLayersId: utils.getLayersId(),
     trackLocation: ScrollbarTrackLocation.START,
     expectThumb: true,
     layerState: LayerState.INACTIVE,
   });
 
   subtestDone();
 }
 
--- a/gfx/layers/apz/test/mochitest/helper_hittest_float_bug1443518.html
+++ b/gfx/layers/apz/test/mochitest/helper_hittest_float_bug1443518.html
@@ -36,16 +36,17 @@
 
 function* test(testDriver) {
   var utils = getHitTestConfig().utils;
 
   hitTestScrollbar({
     element: document.getElementById("subframe"),
     directions: { horizontal: true },
     expectedScrollId: utils.getViewId(document.scrollingElement),
+    expectedLayersId: utils.getLayersId(),
     trackLocation: ScrollbarTrackLocation.START,
     expectThumb: true,
     layerState: LayerState.INACTIVE,
   });
 
   subtestDone();
 }
 
--- a/gfx/layers/apz/test/mochitest/helper_hittest_nested_transforms_bug1459696.html
+++ b/gfx/layers/apz/test/mochitest/helper_hittest_nested_transforms_bug1459696.html
@@ -63,16 +63,17 @@ function* test(testDriver) {
   var utils = getHitTestConfig().utils;
 
   var leftPane = document.getElementById("left-pane");
 
   checkHitResult(
     hitTest(centerOf(leftPane)),
     APZHitResultFlags.VISIBLE,
     utils.getViewId(leftPane),
+    utils.getLayersId(),
     "left pane was successfully hit");
 
   subtestDone();
 }
 
 waitUntilApzStable().then(runContinuation(test));
 
 </script>
--- a/gfx/layers/apz/test/mochitest/helper_hittest_pointerevents_svg.html
+++ b/gfx/layers/apz/test/mochitest/helper_hittest_pointerevents_svg.html
@@ -96,71 +96,81 @@ function* test(testDriver) {
 
   var rootViewId = utils.getViewId(document.scrollingElement);
   for (var testId = 1; testId <= 4; testId++) {
     var target = document.querySelector(`#testcase${testId} .scroller`);
     var scrollerViewId = utils.getViewId(target);
     checkHitResult(hitTest(centerOf(target)),
                    APZHitResultFlags.VISIBLE,
                    scrollerViewId,
+                   utils.getLayersId(),
                    `center of scroller in testcase ${testId}`);
 
     var bounds = target.getBoundingClientRect();
     var verticalScrollbarWidth = bounds.width - target.clientWidth;
     var horizontalScrollbarHeight = bounds.height - target.clientHeight;
 
     // these points should all hit the target scroller
     checkHitResult(hitTest({x: bounds.x + 1, y: bounds.y + 1}),
                    APZHitResultFlags.VISIBLE,
                    scrollerViewId,
+                   utils.getLayersId(),
                    `top left of scroller in testcase ${testId}`);
     checkHitResult(hitTest({x: bounds.x + 1, y: bounds.y + (bounds.height / 2)}),
                    APZHitResultFlags.VISIBLE,
                    scrollerViewId,
+                   utils.getLayersId(),
                    `middle left of scroller in testcase ${testId}`);
     checkHitResult(hitTest({x: bounds.x + 1, y: bounds.bottom - horizontalScrollbarHeight - 1}),
                    APZHitResultFlags.VISIBLE,
                    scrollerViewId,
+                   utils.getLayersId(),
                    `bottom left (excluding scrollbar) of scroller in testcase ${testId}`);
     if (horizontalScrollbarHeight > 0) {
       checkHitResult(hitTest({x: bounds.x + 1, y: bounds.bottom - 1}),
                      APZHitResultFlags.VISIBLE | APZHitResultFlags.SCROLLBAR,
                      scrollerViewId,
+                     utils.getLayersId(),
                      `bottom left of scroller in testcase ${testId}`);
     }
 
     // With the first two cases (circle masks) both WebRender and non-WebRender
     // emit dispatch-to-content regions for the right side, so for now we just
     // test for that. Eventually WebRender should be able to stop emitting DTC
     // and we can update this test to be more precise in that case.
     // For the two rectangular test cases we get precise results rather than
     // dispatch-to-content.
     if (testId == 1 || testId == 2) {
       checkHitResult(hitTest({x: bounds.right - verticalScrollbarWidth - 1, y: bounds.y + 1}),
                      APZHitResultFlags.VISIBLE | APZHitResultFlags.IRREGULAR_AREA,
                      rootViewId,
+                     utils.getLayersId(),
                      `top right of scroller in testcase ${testId}`);
       checkHitResult(hitTest({x: bounds.right - verticalScrollbarWidth - 1, y: bounds.bottom - horizontalScrollbarHeight - 1}),
                      APZHitResultFlags.VISIBLE | APZHitResultFlags.IRREGULAR_AREA,
                      rootViewId,
+                     utils.getLayersId(),
                      `bottom right of scroller in testcase ${testId}`);
     } else {
       checkHitResult(hitTest({x: bounds.right - verticalScrollbarWidth - 1, y: bounds.y + 1}),
                      APZHitResultFlags.VISIBLE,
                      scrollerViewId,
+                     utils.getLayersId(),
                      `top right of scroller in testcase ${testId}`);
       checkHitResult(hitTest({x: bounds.right - verticalScrollbarWidth - 1, y: bounds.bottom - horizontalScrollbarHeight - 1}),
                      APZHitResultFlags.VISIBLE,
                      scrollerViewId,
+                     utils.getLayersId(),
                      `bottom right of scroller in testcase ${testId}`);
     }
 
     checkHitResult(hitTest({x: bounds.right - 1, y: bounds.y + (bounds.height / 2)}),
                    APZHitResultFlags.VISIBLE | APZHitResultFlags.IRREGULAR_AREA,
                    rootViewId,
+                   utils.getLayersId(),
                    `middle right of scroller in testcase ${testId}`);
   }
 }
 
 waitUntilApzStable()
     .then(runContinuation(test))
     .then(subtestDone);
 </script>
--- a/gfx/layers/apz/test/mochitest/helper_hittest_sticky_bug1478304.html
+++ b/gfx/layers/apz/test/mochitest/helper_hittest_sticky_bug1478304.html
@@ -41,16 +41,17 @@ function* test(testDriver) {
 
   var subframe = document.getElementById("subframe");
   var sticky = document.getElementById("sticky");
 
   checkHitResult(
     hitTest(centerOf(sticky)),
     APZHitResultFlags.VISIBLE,
     utils.getViewId(subframe),
+    utils.getLayersId(),
     "subframe was successfully hit");
 
   subtestDone();
 }
 
 waitUntilApzStable().then(runContinuation(test));
 
 </script>
--- a/gfx/layers/apz/test/mochitest/helper_hittest_touchaction.html
+++ b/gfx/layers/apz/test/mochitest/helper_hittest_touchaction.html
@@ -108,204 +108,227 @@ var config = getHitTestConfig();
 function* test(testDriver) {
   for (var scroller of document.querySelectorAll(".taBigBox > div")) {
     // layerize all the scrollable divs
     config.utils.setDisplayPortForElement(0, 0, 100, 100, scroller, 1);
   }
   yield waitForApzFlushedRepaints(testDriver);
 
   var scrollId = config.utils.getViewId(document.scrollingElement);
+  var layersId = config.utils.getLayersId();
 
   // Elements with APZ aware listeners round-trip through the dispatch-to-content
   // region and end up as IRREGULAR_AREA when WebRender is disabled.
   var touchListenerFlag = config.isWebRender
         ? APZHitResultFlags.APZ_AWARE_LISTENERS
         : APZHitResultFlags.IRREGULAR_AREA;
 
   checkHitResult(
       hitTest(centerOf("taNone")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_X_DISABLED |
       APZHitResultFlags.PAN_Y_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: none");
   checkHitResult(
       hitTest(centerOf("taInnerNonePanX")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_X_DISABLED |
       APZHitResultFlags.PAN_Y_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: pan-x inside touch-action: none");
   checkHitResult(
       hitTest(centerOf("taInnerNoneManip")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_X_DISABLED |
       APZHitResultFlags.PAN_Y_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: manipulation inside touch-action: none");
 
   checkHitResult(
       hitTest(centerOf("taPanX")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_Y_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: pan-x");
   checkHitResult(
       hitTest(centerOf("taInnerPanXY")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_X_DISABLED |
       APZHitResultFlags.PAN_Y_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: pan-y inside touch-action: pan-x");
   checkHitResult(
       hitTest(centerOf("taInnerPanXManip")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_Y_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: manipulation inside touch-action: pan-x");
 
   checkHitResult(
       hitTest(centerOf("taPanY")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_X_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: pan-y");
   checkHitResult(
       hitTest(centerOf("taInnerPanYX")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_X_DISABLED |
       APZHitResultFlags.PAN_Y_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: pan-x inside touch-action: pan-y");
   checkHitResult(
       hitTest(centerOf("taInnerPanYY")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_X_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: pan-y inside touch-action: pan-y");
 
   checkHitResult(
       hitTest(centerOf("taPanXY")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: pan-x pan-y");
   checkHitResult(
       hitTest(centerOf("taInnerPanXYNone")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_X_DISABLED |
       APZHitResultFlags.PAN_Y_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: none inside touch-action: pan-x pan-y");
 
   checkHitResult(
       hitTest(centerOf("taManip")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: manipulation");
   checkHitResult(
       hitTest(centerOf("taInnerManipPanX")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_Y_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: pan-x inside touch-action: manipulation");
   checkHitResult(
       hitTest(centerOf("taInnerManipNone")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_X_DISABLED |
       APZHitResultFlags.PAN_Y_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: none inside touch-action: manipulation");
   checkHitResult(
       hitTest(centerOf("taInnerManipListener")),
       APZHitResultFlags.VISIBLE |
       touchListenerFlag |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "div with touch listener inside touch-action: manipulation");
 
   checkHitResult(
       hitTest(centerOf("taListener")),
       APZHitResultFlags.VISIBLE |
       touchListenerFlag,
       scrollId,
+      layersId,
       "div with touch listener");
   checkHitResult(
       hitTest(centerOf("taInnerListenerPanX")),
       APZHitResultFlags.VISIBLE |
       touchListenerFlag |
       APZHitResultFlags.PAN_Y_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       scrollId,
+      layersId,
       "touch-action: pan-x inside div with touch listener");
 
   checkHitResult(
       hitTest(centerOf("taScrollerPanY")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_X_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       config.utils.getViewId(document.getElementById("taScrollerPanY")),
+      layersId,
       "touch-action: pan-y on scroller");
   checkHitResult(
       hitTest(centerOf("taScroller")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_X_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       config.utils.getViewId(document.getElementById("taScroller")),
+      layersId,
       "touch-action: pan-y on div inside scroller");
   checkHitResult(
       hitTest(centerOf("taScroller2")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       config.utils.getViewId(document.getElementById("taScroller2")),
+      layersId,
       "zooming restrictions from pan-x outside scroller get inherited in");
 
   checkHitResult(
       hitTest(centerOf("taScrollerPanX")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.PAN_Y_DISABLED |
       APZHitResultFlags.PINCH_ZOOM_DISABLED |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       config.utils.getViewId(document.getElementById("taScrollerPanX")),
+      layersId,
       "touch-action: pan-x on scroller inside manipulation");
   checkHitResult(
       hitTest(centerOf("taScroller3")),
       APZHitResultFlags.VISIBLE |
       APZHitResultFlags.DOUBLE_TAP_ZOOM_DISABLED,
       config.utils.getViewId(document.getElementById("taScroller3")),
+      layersId,
       "touch-action: manipulation outside scroller gets inherited in");
 }
 
 if (!config.isWebRender) {
   ok(true, "This test is WebRender-only because we get a bunch of dispatch-to-content regions without it and the test isn't very interesting.");
   subtestDone();
 } else {
   waitUntilApzStable()
--- a/gfx/layers/apz/testutil/APZTestData.cpp
+++ b/gfx/layers/apz/testutil/APZTestData.cpp
@@ -82,16 +82,17 @@ struct APZTestDataToJSConverter {
     aOutHitResult.mScreenX.Construct() = aResult.point.x;
     aOutHitResult.mScreenY.Construct() = aResult.point.y;
     static_assert(MaxEnumValue<gfx::CompositorHitTestInfo::valueType>::value <
                       std::numeric_limits<uint16_t>::digits,
                   "CompositorHitTestFlags MAX value have to be less than "
                   "number of bits in uint16_t");
     aOutHitResult.mHitResult.Construct() =
         static_cast<uint16_t>(aResult.result.serialize());
+    aOutHitResult.mLayersId.Construct() = aResult.layersId.mId;
     aOutHitResult.mScrollId.Construct() = aResult.scrollId;
   }
 };
 
 bool APZTestData::ToJS(JS::MutableHandleValue aOutValue,
                        JSContext* aContext) const {
   dom::APZTestData result;
   APZTestDataToJSConverter::ConvertAPZTestData(*this, result);
--- a/gfx/layers/apz/testutil/APZTestData.h
+++ b/gfx/layers/apz/testutil/APZTestData.h
@@ -70,18 +70,19 @@ class APZTestData {
   }
   void LogTestDataForRepaintRequest(SequenceNumber aSequenceNumber,
                                     ViewID aScrollId, const std::string& aKey,
                                     const std::string& aValue) {
     LogTestDataImpl(mRepaintRequests, aSequenceNumber, aScrollId, aKey, aValue);
   }
   void RecordHitResult(const ScreenPoint& aPoint,
                        const mozilla::gfx::CompositorHitTestInfo& aResult,
+                       const LayersId& aLayersId,
                        const ViewID& aScrollId) {
-    mHitResults.AppendElement(HitResult{aPoint, aResult, aScrollId});
+    mHitResults.AppendElement(HitResult{aPoint, aResult, aLayersId, aScrollId});
   }
   void RecordAdditionalData(const std::string& aKey,
                             const std::string& aValue) {
     mAdditionalData[aKey] = aValue;
   }
 
   // Convert this object to a JS representation.
   bool ToJS(JS::MutableHandleValue aOutValue, JSContext* aContext) const;
@@ -92,16 +93,17 @@ class APZTestData {
   struct ScrollFrameData : ScrollFrameDataBase {};
   typedef std::map<ViewID, ScrollFrameData> BucketBase;
   struct Bucket : BucketBase {};
   typedef std::map<SequenceNumber, Bucket> DataStoreBase;
   struct DataStore : DataStoreBase {};
   struct HitResult {
     ScreenPoint point;
     mozilla::gfx::CompositorHitTestInfo result;
+    LayersId layersId;
     ViewID scrollId;
   };
 
  private:
   DataStore mPaints;
   DataStore mRepaintRequests;
   nsTArray<HitResult> mHitResults;
   // Additional free-form data that's not grouped paint or scroll frame.
@@ -192,22 +194,24 @@ struct ParamTraits<mozilla::layers::APZT
 
 template <>
 struct ParamTraits<mozilla::layers::APZTestData::HitResult> {
   typedef mozilla::layers::APZTestData::HitResult paramType;
 
   static void Write(Message* aMsg, const paramType& aParam) {
     WriteParam(aMsg, aParam.point);
     WriteParam(aMsg, aParam.result);
+    WriteParam(aMsg, aParam.layersId);
     WriteParam(aMsg, aParam.scrollId);
   }
 
   static bool Read(const Message* aMsg, PickleIterator* aIter,
                    paramType* aResult) {
     return (ReadParam(aMsg, aIter, &aResult->point) &&
             ReadParam(aMsg, aIter, &aResult->result) &&
+            ReadParam(aMsg, aIter, &aResult->layersId) &&
             ReadParam(aMsg, aIter, &aResult->scrollId));
   }
 };
 
 }  // namespace IPC
 
 #endif /* mozilla_layers_APZTestData_h */
--- a/gfx/skia/skia/src/utils/SkPolyUtils.cpp
+++ b/gfx/skia/skia/src/utils/SkPolyUtils.cpp
@@ -1046,26 +1046,26 @@ private:
 // Then as we pop the vertices from the queue we generate events which indicate that an edge
 // should be added or removed from an edge list. If any intersections are detected in the edge
 // list, then we know the polygon is self-intersecting and hence not simple.
 bool SkIsSimplePolygon(const SkPoint* polygon, int polygonSize) {
     if (polygonSize < 3) {
         return false;
     }
 
-    // need to be able to represent all the vertices in the 16-bit indices
-    if (polygonSize > std::numeric_limits<uint16_t>::max()) {
-        return false;
-    }
-
     // If it's convex, it's simple
     if (SkIsConvexPolygon(polygon, polygonSize)) {
         return true;
     }
 
+    // practically speaking, it takes too long to process large polygons
+    if (polygonSize > 2048) {
+        return false;
+    }
+
     SkTDPQueue <Vertex, Vertex::Left> vertexQueue(polygonSize);
     for (int i = 0; i < polygonSize; ++i) {
         Vertex newVertex;
         if (!polygon[i].isFinite()) {
             return false;
         }
         newVertex.fPosition = polygon[i];
         newVertex.fIndex = i;
@@ -1140,31 +1140,38 @@ static bool is_reflex_vertex(const SkPoi
                              uint16_t prevIndex, uint16_t currIndex, uint16_t nextIndex) {
     int side = compute_side(inputPolygonVerts[prevIndex],
                             inputPolygonVerts[currIndex] - inputPolygonVerts[prevIndex],
                             inputPolygonVerts[nextIndex]);
     // if reflex point, we need to add extra edges
     return (side*winding*offset < 0);
 }
 
-bool SkOffsetSimplePolygon(const SkPoint* inputPolygonVerts, int inputPolygonSize, SkScalar offset,
+bool SkOffsetSimplePolygon(const SkPoint* inputPolygonVerts, int inputPolygonSize,
+                           const SkRect& bounds, SkScalar offset,
                            SkTDArray<SkPoint>* offsetPolygon, SkTDArray<int>* polygonIndices) {
     if (inputPolygonSize < 3) {
         return false;
     }
 
     // need to be able to represent all the vertices in the 16-bit indices
     if (inputPolygonSize >= std::numeric_limits<uint16_t>::max()) {
         return false;
     }
 
     if (!SkScalarIsFinite(offset)) {
         return false;
     }
 
+    // can't inset more than the half bounds of the polygon
+    if (offset > SkTMin(SkTAbs(SK_ScalarHalf*bounds.width()),
+                        SkTAbs(SK_ScalarHalf*bounds.height()))) {
+        return false;
+    }
+
     // offsetting close to zero just returns the original poly
     if (SkScalarNearlyZero(offset)) {
         for (int i = 0; i < inputPolygonSize; ++i) {
             *offsetPolygon->push() = inputPolygonVerts[i];
             *polygonIndices->push() = i;
         }
         return true;
     }
--- a/gfx/skia/skia/src/utils/SkPolyUtils.h
+++ b/gfx/skia/skia/src/utils/SkPolyUtils.h
@@ -8,16 +8,18 @@
 #ifndef SkOffsetPolygon_DEFINED
 #define SkOffsetPolygon_DEFINED
 
 #include <functional>
 
 #include "SkTDArray.h"
 #include "SkPoint.h"
 
+struct SkRect;
+
 /**
  * Generates a polygon that is inset a constant from the boundary of a given convex polygon.
  *
  * @param inputPolygonVerts  Array of points representing the vertices of the original polygon.
  *  It should be convex and have no coincident points.
  * @param inputPolygonSize  Number of vertices in the original polygon.
  * @param inset  How far we wish to inset the polygon. This should be a positive value.
  * @param insetPolygon  The resulting inset polygon, if any.
@@ -28,24 +30,25 @@ bool SkInsetConvexPolygon(const SkPoint*
 
 /**
  * Generates a simple polygon (if possible) that is offset a constant distance from the boundary
  * of a given simple polygon.
  * The input polygon must be simple and have no coincident vertices or collinear edges.
  *
  * @param inputPolygonVerts  Array of points representing the vertices of the original polygon.
  * @param inputPolygonSize  Number of vertices in the original polygon.
+ * @param bounds Bounding rectangle for the original polygon.
  * @param offset How far we wish to offset the polygon.
  *   Positive values indicate insetting, negative values outsetting.
  * @param offsetPolgon  The resulting offset polygon, if any.
  * @param polygonIndices  The indices of the original polygon that map to the new one.
  * @return true if an offset simple polygon exists, false otherwise.
  */
 bool SkOffsetSimplePolygon(const SkPoint* inputPolygonVerts, int inputPolygonSize,
-                           SkScalar offset, SkTDArray<SkPoint>* offsetPolygon,
+                           const SkRect& bounds, SkScalar offset, SkTDArray<SkPoint>* offsetPolygon,
                            SkTDArray<int>* polygonIndices = nullptr);
 
 /**
  * Compute the number of points needed for a circular join when offsetting a vertex.
  * The lengths of offset0 and offset1 don't have to equal |offset| -- only the direction matters.
  * The segment lengths will be approximately four pixels.
  *
  * @param offset0  Starting offset vector direction.
--- a/gfx/skia/skia/src/utils/SkShadowTessellator.cpp
+++ b/gfx/skia/skia/src/utils/SkShadowTessellator.cpp
@@ -20,17 +20,17 @@
 #endif
 
 
 /**
  * Base class
  */
 class SkBaseShadowTessellator {
 public:
-    SkBaseShadowTessellator(const SkPoint3& zPlaneParams, bool transparent);
+    SkBaseShadowTessellator(const SkPoint3& zPlaneParams, const SkRect& bounds, bool transparent);
     virtual ~SkBaseShadowTessellator() {}
 
     sk_sp<SkVertices> releaseVertices() {
         if (!fSucceeded) {
             return nullptr;
         }
         return SkVertices::MakeCopy(SkVertices::kTriangles_VertexMode, this->vertexCount(),
                                     fPositions.begin(), nullptr, fColors.begin(),
@@ -94,16 +94,17 @@ protected:
     SkTDArray<SkPoint>  fPositions;
     SkTDArray<SkColor>  fColors;
     SkTDArray<uint16_t> fIndices;
 
     SkTDArray<SkPoint>   fPathPolygon;
     SkTDArray<SkPoint>   fClipPolygon;
     SkTDArray<SkVector>  fClipVectors;
 
+    SkRect              fPathBounds;
     SkPoint             fCentroid;
     SkScalar            fArea;
     SkScalar            fLastArea;
     SkScalar            fLastCross;
 
     int                 fFirstVertexIndex;
     SkVector            fFirstOutset;
     SkPoint             fFirstPoint;
@@ -150,18 +151,20 @@ static bool duplicate_pt(const SkPoint& 
 }
 
 static SkScalar perp_dot(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2) {
     SkVector v0 = p1 - p0;
     SkVector v1 = p2 - p1;
     return v0.cross(v1);
 }
 
-SkBaseShadowTessellator::SkBaseShadowTessellator(const SkPoint3& zPlaneParams, bool transparent)
+SkBaseShadowTessellator::SkBaseShadowTessellator(const SkPoint3& zPlaneParams, const SkRect& bounds,
+                                                 bool transparent)
         : fZPlaneParams(zPlaneParams)
+        , fPathBounds(bounds)
         , fCentroid({0, 0})
         , fArea(0)
         , fLastArea(0)
         , fLastCross(0)
         , fFirstVertexIndex(-1)
         , fSucceeded(false)
         , fTransparent(transparent)
         , fIsConvex(true)
@@ -554,28 +557,28 @@ bool SkBaseShadowTessellator::computeCon
     if (!SkIsSimplePolygon(&fPathPolygon[0], fPathPolygon.count())) {
         return false;
     }
 
     // generate inner ring
     SkTDArray<SkPoint> umbraPolygon;
     SkTDArray<int> umbraIndices;
     umbraIndices.setReserve(fPathPolygon.count());
-    if (!SkOffsetSimplePolygon(&fPathPolygon[0], fPathPolygon.count(), inset,
-                               &umbraPolygon, &umbraIndices)) {
+    if (!SkOffsetSimplePolygon(&fPathPolygon[0], fPathPolygon.count(), fPathBounds, inset,
+                                &umbraPolygon, &umbraIndices)) {
         // TODO: figure out how to handle this case
         return false;
     }
 
     // generate outer ring
     SkTDArray<SkPoint> penumbraPolygon;
     SkTDArray<int> penumbraIndices;
     penumbraPolygon.setReserve(umbraPolygon.count());
     penumbraIndices.setReserve(umbraPolygon.count());
-    if (!SkOffsetSimplePolygon(&fPathPolygon[0], fPathPolygon.count(), -outset,
+    if (!SkOffsetSimplePolygon(&fPathPolygon[0], fPathPolygon.count(), fPathBounds, -outset,
                                &penumbraPolygon, &penumbraIndices)) {
         // TODO: figure out how to handle this case
         return false;
     }
 
     if (!umbraPolygon.count() || !penumbraPolygon.count()) {
         return false;
     }
@@ -899,22 +902,24 @@ private:
 
     typedef SkBaseShadowTessellator INHERITED;
 };
 
 SkAmbientShadowTessellator::SkAmbientShadowTessellator(const SkPath& path,
                                                        const SkMatrix& ctm,
                                                        const SkPoint3& zPlaneParams,
                                                        bool transparent)
-        : INHERITED(zPlaneParams, transparent) {
+        : INHERITED(zPlaneParams, path.getBounds(), transparent) {
     // Set base colors
-    auto baseZ = heightFunc(path.getBounds().centerX(), path.getBounds().centerY());
+    auto baseZ = heightFunc(fPathBounds.centerX(), fPathBounds.centerY());
     // umbraColor is the interior value, penumbraColor the exterior value.
     auto outset = SkDrawShadowMetrics::AmbientBlurRadius(baseZ);
     auto inset = outset * SkDrawShadowMetrics::AmbientRecipAlpha(baseZ) - outset;
+    inset = SkScalarPin(inset, 0, SkTMin(path.getBounds().width(),
+                                         path.getBounds().height()));
 
     if (!this->computePathPolygon(path, ctm)) {
         return;
     }
     if (fPathPolygon.count() < 3 || !SkScalarIsFinite(fArea)) {
         fSucceeded = true; // We don't want to try to blur these cases, so we will
                            // return an empty SkVertices instead.
         return;
@@ -994,17 +999,17 @@ private:
 
     typedef SkBaseShadowTessellator INHERITED;
 };
 
 SkSpotShadowTessellator::SkSpotShadowTessellator(const SkPath& path, const SkMatrix& ctm,
                                                  const SkPoint3& zPlaneParams,
                                                  const SkPoint3& lightPos, SkScalar lightRadius,
                                                  bool transparent)
-    : INHERITED(zPlaneParams, transparent) {
+    : INHERITED(zPlaneParams, path.getBounds(), transparent) {
 
     // Compute the blur radius, scale and translation for the spot shadow.
     SkMatrix shadowTransform;
     SkScalar outset;
     if (!SkDrawShadowMetrics::GetSpotShadowTransform(lightPos, lightRadius,
                                                      ctm, zPlaneParams, path.getBounds(),
                                                      &shadowTransform, &outset)) {
         return;
--- a/gfx/webrender_bindings/Moz2DImageRenderer.cpp
+++ b/gfx/webrender_bindings/Moz2DImageRenderer.cpp
@@ -307,17 +307,17 @@ static RefPtr<ScaledFont> GetScaledFont(
 
 static bool Moz2DRenderCallback(const Range<const uint8_t> aBlob,
                                 gfx::IntSize aSize, gfx::SurfaceFormat aFormat,
                                 const uint16_t* aTileSize,
                                 const mozilla::wr::TileOffset* aTileOffset,
                                 const mozilla::wr::LayoutIntRect* aDirtyRect,
                                 Range<uint8_t> aOutput) {
   AUTO_PROFILER_TRACING("WebRender", "RasterizeSingleBlob", GRAPHICS);
-  MOZ_ASSERT(aSize.width > 0 && aSize.height > 0);
+  MOZ_RELEASE_ASSERT(aSize.width > 0 && aSize.height > 0);
   if (aSize.width <= 0 || aSize.height <= 0) {
     return false;
   }
 
   auto stride = aSize.width * gfx::BytesPerPixel(aFormat);
 
   if (aOutput.length() < static_cast<size_t>(aSize.height * stride)) {
     return false;
--- a/gfx/webrender_bindings/src/moz2d_renderer.rs
+++ b/gfx/webrender_bindings/src/moz2d_renderer.rs
@@ -472,16 +472,17 @@ impl AsyncBlobImageRasterizer for Moz2dB
 
     fn rasterize(&mut self, requests: &[BlobImageParams], low_priority: bool) -> Vec<(BlobImageRequest, BlobImageResult)> {
         // All we do here is spin up our workers to callback into gecko to replay the drawing commands.
         let _marker = GeckoProfilerMarker::new(b"BlobRasterization\0");
 
         let requests: Vec<Job> = requests.into_iter().map(|params| {
             let command = &self.blob_commands[&params.request.key];
             let blob = Arc::clone(&command.data);
+            assert!(params.descriptor.rect.size.width > 0 && params.descriptor.rect.size.height  > 0);
             Job {
                 request: params.request,
                 descriptor: params.descriptor,
                 commands: blob,
                 dirty_rect: params.dirty_rect,
                 tile_size: command.tile_size,
             }
         }).collect();
@@ -517,16 +518,17 @@ fn rasterize_blob(job: Job) -> (BlobImag
         * descriptor.format.bytes_per_pixel()) as usize;
 
     let mut output = vec![0u8; buf_size];
 
     let dirty_rect = match job.dirty_rect {
         DirtyRect::Partial(rect) => Some(rect),
         DirtyRect::All => None,
     };
+    assert!(descriptor.rect.size.width > 0 && descriptor.rect.size.height  > 0);
 
     let result = unsafe {
         if wr_moz2d_render_cb(
             ByteSlice::new(&job.commands[..]),
             descriptor.rect.size.width,
             descriptor.rect.size.height,
             descriptor.format,
             job.tile_size.as_ref(),
--- a/gfx/wr/webrender/src/resource_cache.rs
+++ b/gfx/wr/webrender/src/resource_cache.rs
@@ -166,17 +166,16 @@ struct BlobImageTemplate {
     dirty_rect: BlobDirtyRect,
     viewport_tiles: Option<TileRange>,
 }
 
 struct ImageResource {
     data: CachedImageData,
     descriptor: ImageDescriptor,
     tiling: Option<TileSize>,
-    viewport_tiles: Option<TileRange>,
 }
 
 #[derive(Clone, Debug)]
 pub struct ImageTiling {
     pub image_size: DeviceIntSize,
     pub tile_size: TileSize,
 }
 
@@ -846,17 +845,16 @@ impl ResourceCache {
             // if tiling was not requested.
             tiling = Some(DEFAULT_TILE_SIZE);
         }
 
         let resource = ImageResource {
             descriptor,
             data,
             tiling,
-            viewport_tiles: None,
         };
 
         self.resources.image_templates.insert(image_key, resource);
     }
 
     pub fn update_image_template(
         &mut self,
         image_key: ImageKey,
@@ -904,17 +902,16 @@ impl ResourceCache {
             }
             _ => {}
         }
 
         *image = ImageResource {
             descriptor,
             data,
             tiling,
-            viewport_tiles: image.viewport_tiles,
         };
     }
 
     // Happens before scene building.
     pub fn add_blob_image(
         &mut self,
         key: BlobImageKey,
         descriptor: &ImageDescriptor,
@@ -1240,16 +1237,17 @@ impl ResourceCache {
                                 &template.descriptor.size.into(),
                                 tile_size,
                                 tile,
                             )),
                         },
                         format: template.descriptor.format,
                     };
 
+                    assert!(descriptor.rect.size.width > 0 && descriptor.rect.size.height > 0);
                     // TODO: We only track dirty rects for non-tiled blobs but we
                     // should also do it with tiled ones unless we settle for a small
                     // tile size.
                     blob_request_params.push(
                         BlobImageParams {
                             request: BlobImageRequest {
                                 key: *key,
                                 tile: Some(tile),
@@ -1288,16 +1286,17 @@ impl ResourceCache {
 
                 let dirty_rect = if needs_upload {
                     // The texture cache entry has been evicted, treat it as all dirty.
                     DirtyRect::All
                 } else {
                     template.dirty_rect
                 };
 
+                assert!(template.descriptor.size.width > 0 && template.descriptor.size.height > 0);
                 blob_request_params.push(
                     BlobImageParams {
                         request: BlobImageRequest {
                             key: *key,
                             tile: None,
                         },
                         descriptor: BlobImageDescriptor {
                             rect: blob_size(template.descriptor.size).into(),
@@ -2261,17 +2260,16 @@ impl ResourceCache {
                     CachedImageData::Raw(arc)
                 }
             };
 
             res.image_templates.images.insert(key, ImageResource {
                 data,
                 descriptor: template.descriptor,
                 tiling: template.tiling,
-                viewport_tiles: None,
             });
         }
 
         external_images
     }
 }
 
 /// For now the blob's coordinate space have the same pixel sizes as the
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -67,16 +67,17 @@ struct NurseryChunk {
 static_assert(sizeof(js::NurseryChunk) == gc::ChunkSize,
               "Nursery chunk size must match gc::Chunk size.");
 
 } /* namespace js */
 
 inline void js::NurseryChunk::poisonAndInit(JSRuntime* rt, size_t extent) {
   MOZ_ASSERT(extent <= ChunkSize);
   MOZ_MAKE_MEM_UNDEFINED(this, extent);
+  MOZ_MAKE_MEM_UNDEFINED(&trailer, sizeof(trailer));
 
   Poison(this, JS_FRESH_NURSERY_PATTERN, extent, MemCheckKind::MakeUndefined);
 
   new (&trailer) gc::ChunkTrailer(rt, &rt->gc.storeBuffer());
 }
 
 inline void js::NurseryChunk::poisonAfterEvict(size_t extent) {
   MOZ_ASSERT(extent <= ChunkSize);
@@ -185,18 +186,19 @@ bool js::Nursery::init(uint32_t maxNurse
 
   if (!allocateNextChunk(0, lock)) {
     return false;
   }
   capacity_ = roundSize(tunables().gcMinNurseryBytes());
   MOZ_ASSERT(capacity_ >= ArenaSize);
   /* After this point the Nursery has been enabled */
 
-  setCurrentChunk(0, true);
+  setCurrentChunk(0);
   setStartPosition();
+  poisonAndInitCurrentChunk(true);
 
   char* env = getenv("JS_GC_PROFILE_NURSERY");
   if (env) {
     if (0 == strcmp(env, "help")) {
       fprintf(stderr,
               "JS_GC_PROFILE_NURSERY=N\n"
               "\tReport minor GC's taking at least N microseconds.\n");
       exit(0);
@@ -238,18 +240,19 @@ void js::Nursery::enable() {
     AutoLockGCBgAlloc lock(runtime());
     if (!allocateNextChunk(0, lock)) {
       return;
     }
     capacity_ = roundSize(tunables().gcMinNurseryBytes());
     MOZ_ASSERT(capacity_ >= ArenaSize);
   }
 
-  setCurrentChunk(0, true);
+  setCurrentChunk(0);
   setStartPosition();
+  poisonAndInitCurrentChunk(true);
 #ifdef JS_GC_ZEAL
   if (runtime()->hasZealMode(ZealMode::GenerationalGC)) {
     enterZealMode();
   }
 #endif
 
   MOZ_ALWAYS_TRUE(runtime()->gc.storeBuffer().enable());
 }
@@ -303,18 +306,19 @@ void js::Nursery::enterZealMode() {
     capacity_ = chunkCountLimit() * ChunkSize;
     setCurrentEnd();
   }
 }
 
 void js::Nursery::leaveZealMode() {
   if (isEnabled()) {
     MOZ_ASSERT(isEmpty());
-    setCurrentChunk(0, true);
+    setCurrentChunk(0);
     setStartPosition();
+    poisonAndInitCurrentChunk(true);
   }
 }
 #endif  // JS_GC_ZEAL
 
 JSObject* js::Nursery::allocateObject(JSContext* cx, size_t size,
                                       size_t nDynamicSlots,
                                       const js::Class* clasp) {
   // Ensure there's enough space to replace the contents with a
@@ -405,16 +409,17 @@ void* js::Nursery::allocate(size_t size)
         if (!allocateNextChunk(chunkno, lock)) {
           return nullptr;
         }
       }
       timeInChunkAlloc_ += ReallyNow() - start;
       MOZ_ASSERT(chunkno < allocatedChunkCount());
     }
     setCurrentChunk(chunkno);
+    poisonAndInitCurrentChunk();
   }
 
   void* thing = (void*)position();
   position_ = position() + size;
   // We count this regardless of the profiler's state, assuming that it costs
   // just as much to count it, as to check the profiler's state and decide not
   // to count it.
   stats().noteNurseryAlloc();
@@ -827,16 +832,17 @@ void js::Nursery::collect(JS::GCReason r
   // it is only used here, and ObjectGroup pointers are never
   // nursery-allocated.
   MOZ_ASSERT(!IsNurseryAllocable(AllocKind::OBJECT_GROUP));
 
   TenureCountCache tenureCounts;
   previousGC.reason = JS::GCReason::NO_REASON;
   if (!isEmpty()) {
     doCollection(reason, tenureCounts);
+    poisonAndInitCurrentChunk();
   } else {
     previousGC.nurseryUsedBytes = 0;
     previousGC.nurseryCapacity = capacity();
     previousGC.nurseryCommitted = committed();
     previousGC.tenuredBytes = 0;
     previousGC.tenuredCells = 0;
   }
 
@@ -1127,26 +1133,23 @@ void js::Nursery::clear() {
   for (unsigned i = currentStartChunk_; i < currentChunk_; ++i) {
     chunk(i).poisonAfterEvict();
   }
   MOZ_ASSERT(maxChunkCount() > 0);
   chunk(currentChunk_)
       .poisonAfterEvict(position() - chunk(currentChunk_).start());
 #endif
 
-  if (runtime()->hasZealMode(ZealMode::GenerationalGC)) {
-    /* Only reset the alloc point when we are close to the end. */
-    if (currentChunk_ + 1 == maxChunkCount()) {
-      setCurrentChunk(0, true);
-    } else {
-      // poisonAfterSweep poisons the chunk trailer. Ensure it's
-      // initialized.
-      chunk(currentChunk_).poisonAndInit(runtime());
-    }
-  } else {
+  /*
+   * Reset the start chunk & position if we're not in this zeal mode, or we're
+   * in it and close to the end of the nursery.
+   */
+  if (!runtime()->hasZealMode(ZealMode::GenerationalGC) ||
+      (runtime()->hasZealMode(ZealMode::GenerationalGC) &&
+       currentChunk_ + 1 == maxChunkCount())) {
     setCurrentChunk(0);
   }
 
   /* Set current start position for isEmpty checks. */
   setStartPosition();
 }
 
 size_t js::Nursery::spaceToEnd(unsigned chunkCount) const {
@@ -1177,40 +1180,35 @@ size_t js::Nursery::spaceToEnd(unsigned 
     bytes = currentEnd_ - currentStartPosition_;
   }
 
   MOZ_ASSERT(bytes <= maxChunkCount() * ChunkSize);
 
   return bytes;
 }
 
-MOZ_ALWAYS_INLINE void js::Nursery::setCurrentChunk(unsigned chunkno,
-                                                    bool fullPoison) {
+MOZ_ALWAYS_INLINE void js::Nursery::setCurrentChunk(unsigned chunkno) {
   MOZ_ASSERT(chunkno < chunkCountLimit());
   MOZ_ASSERT(chunkno < allocatedChunkCount());
 
-  if (!fullPoison && chunkno == currentChunk_ &&
-      position_ < chunk(chunkno).end() && position_ >= chunk(chunkno).start()) {
-    // When we setup a new chunk the whole chunk must be poisoned with the
-    // correct value (JS_FRESH_NURSERY_PATTERN).
-    //  1. The first time it was used it was fully poisoned with the
-    //     correct value.
-    //  2. When it is swept, only the used part is poisoned with the swept
-    //     value.
-    //  3. We repoison the swept part here, with the correct value.
-    chunk(chunkno).poisonAndInit(runtime(), position_ - chunk(chunkno).start());
-  } else {
-    chunk(chunkno).poisonAndInit(runtime());
-  }
-
   currentChunk_ = chunkno;
   position_ = chunk(chunkno).start();
   setCurrentEnd();
 }
 
+void js::Nursery::poisonAndInitCurrentChunk(bool fullPoison) {
+  if (fullPoison || runtime()->hasZealMode(ZealMode::GenerationalGC) ||
+      !isSubChunkMode()) {
+    chunk(currentChunk_).poisonAndInit(runtime());
+  } else {
+    MOZ_ASSERT(isSubChunkMode());
+    chunk(currentChunk_).poisonAndInit(runtime(), capacity_);
+  }
+}
+
 MOZ_ALWAYS_INLINE void js::Nursery::setCurrentEnd() {
   MOZ_ASSERT_IF(isSubChunkMode(),
                 currentChunk_ == 0 && currentEnd_ <= chunk(0).end());
   currentEnd_ =
       chunk(currentChunk_).start() + Min(capacity_, NurseryChunkUsableSize);
   if (canAllocateStrings_) {
     currentStringEnd_ = currentEnd_;
   }
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -579,17 +579,18 @@ class Nursery {
 
   /*
    * Set the current chunk. This updates the currentChunk_, position_
    * currentEnd_ and currentStringEnd_ values as approprite. It'll also
    * poison the chunk, either a portion of the chunk if it is already the
    * current chunk, or the whole chunk if fullPoison is true or it is not
    * the current chunk.
    */
-  void setCurrentChunk(unsigned chunkno, bool fullPoison = false);
+  void setCurrentChunk(unsigned chunkno);
+  void poisonAndInitCurrentChunk(bool fullPoison = false);
   void setCurrentEnd();
   void setStartPosition();
 
   /*
    * Allocate the next chunk, or the first chunk for initialization.
    * Callers will probably want to call setCurrentChunk(0) next.
    */
   MOZ_MUST_USE bool allocateNextChunk(unsigned chunkno,
--- a/js/src/wasm/WasmAST.h
+++ b/js/src/wasm/WasmAST.h
@@ -397,31 +397,27 @@ enum class AstExprKind {
   Block,
   Branch,
   BranchTable,
   Call,
   CallIndirect,
   ComparisonOperator,
   Const,
   ConversionOperator,
-#ifdef ENABLE_WASM_BULKMEM_OPS
   DataOrElemDrop,
-#endif
   Drop,
   ExtraConversionOperator,
   First,
   GetGlobal,
   GetLocal,
   If,
   Load,
-#ifdef ENABLE_WASM_BULKMEM_OPS
   MemFill,
   MemOrTableCopy,
   MemOrTableInit,
-#endif
   MemoryGrow,
   MemorySize,
   Nop,
   Pop,
   RefNull,
   Return,
   SetGlobal,
   SetLocal,
@@ -816,17 +812,16 @@ class AstWake : public AstExpr {
   static const AstExprKind Kind = AstExprKind::Wake;
   explicit AstWake(const AstLoadStoreAddress& address, AstExpr* count)
       : AstExpr(Kind, ExprType::I32), address_(address), count_(count) {}
 
   const AstLoadStoreAddress& address() const { return address_; }
   AstExpr& count() const { return *count_; }
 };
 
-#ifdef ENABLE_WASM_BULKMEM_OPS
 class AstMemOrTableCopy : public AstExpr {
   bool isMem_;
   AstRef destTable_;
   AstExpr* dest_;
   AstRef srcTable_;
   AstExpr* src_;
   AstExpr* len_;
 
@@ -914,17 +909,16 @@ class AstMemOrTableInit : public AstExpr
   AstRef& targetMemory() {
     MOZ_ASSERT(isMem());
     return target_;
   }
   AstExpr& dst() const { return *dst_; }
   AstExpr& src() const { return *src_; }
   AstExpr& len() const { return *len_; }
 };
-#endif
 
 #ifdef ENABLE_WASM_REFTYPES
 class AstTableFill : public AstExpr {
   AstRef targetTable_;
   AstExpr* start_;
   AstExpr* val_;
   AstExpr* len_;
 
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -6850,27 +6850,28 @@ class BaseCompiler final : public BaseCo
   MOZ_MUST_USE bool emitAtomicRMW(ValType type, Scalar::Type viewType,
                                   AtomicOp op);
   MOZ_MUST_USE bool emitAtomicStore(ValType type, Scalar::Type viewType);
   MOZ_MUST_USE bool emitWait(ValType type, uint32_t byteSize);
   MOZ_MUST_USE bool emitWake();
   MOZ_MUST_USE bool emitAtomicXchg(ValType type, Scalar::Type viewType);
   void emitAtomicXchg64(MemoryAccessDesc* access, ValType type,
                         WantResult wantResult);
-#ifdef ENABLE_WASM_BULKMEM_OPS
+  MOZ_MUST_USE bool bulkmemOpsEnabled();
   MOZ_MUST_USE bool emitMemOrTableCopy(bool isMem);
   MOZ_MUST_USE bool emitDataOrElemDrop(bool isData);
   MOZ_MUST_USE bool emitMemFill();
   MOZ_MUST_USE bool emitMemOrTableInit(bool isMem);
-#endif
+#ifdef ENABLE_WASM_REFTYPES
   MOZ_MUST_USE bool emitTableFill();
   MOZ_MUST_USE bool emitTableGet();
   MOZ_MUST_USE bool emitTableGrow();
   MOZ_MUST_USE bool emitTableSet();
   MOZ_MUST_USE bool emitTableSize();
+#endif
   MOZ_MUST_USE bool emitStructNew();
   MOZ_MUST_USE bool emitStructGet();
   MOZ_MUST_USE bool emitStructSet();
   MOZ_MUST_USE bool emitStructNarrow();
 };
 
 void BaseCompiler::emitAddI32() {
   int32_t c;
@@ -10193,18 +10194,31 @@ bool BaseCompiler::emitWake() {
 
   if (deadCode_) {
     return true;
   }
 
   return emitInstanceCall(lineOrBytecode, SASigWake);
 }
 
-#ifdef ENABLE_WASM_BULKMEM_OPS
+// Bulk memory must be available if shared memory is enabled.
+bool BaseCompiler::bulkmemOpsEnabled() {
+#ifndef ENABLE_WASM_BULKMEM_OPS
+  if (!env_.sharedMemoryEnabled) {
+    return iter_.fail("bulk memory ops disabled");
+  }
+#endif
+  return true;
+}
+
 bool BaseCompiler::emitMemOrTableCopy(bool isMem) {
+  if (!bulkmemOpsEnabled()) {
+    return false;
+  }
+
   uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
 
   uint32_t dstMemOrTableIndex = 0;
   uint32_t srcMemOrTableIndex = 0;
   Nothing nothing;
   if (!iter_.readMemOrTableCopy(isMem, &dstMemOrTableIndex, &nothing,
                                 &srcMemOrTableIndex, &nothing, &nothing)) {
     return false;
@@ -10229,16 +10243,20 @@ bool BaseCompiler::emitMemOrTableCopy(bo
       return false;
     }
   }
 
   return true;
 }
 
 bool BaseCompiler::emitDataOrElemDrop(bool isData) {
+  if (!bulkmemOpsEnabled()) {
+    return false;
+  }
+
   uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
 
   uint32_t segIndex = 0;
   if (!iter_.readDataOrElemDrop(isData, &segIndex)) {
     return false;
   }
 
   if (deadCode_) {
@@ -10249,32 +10267,40 @@ bool BaseCompiler::emitDataOrElemDrop(bo
   pushI32(int32_t(segIndex));
 
   return emitInstanceCall(lineOrBytecode,
                           isData ? SASigDataDrop : SASigElemDrop,
                           /*pushReturnedValue=*/false);
 }
 
 bool BaseCompiler::emitMemFill() {
+  if (!bulkmemOpsEnabled()) {
+    return false;
+  }
+
   uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
 
   Nothing nothing;
   if (!iter_.readMemFill(&nothing, &nothing, &nothing)) {
     return false;
   }
 
   if (deadCode_) {
     return true;
   }
 
   return emitInstanceCall(lineOrBytecode, SASigMemFill,
                           /*pushReturnedValue=*/false);
 }
 
 bool BaseCompiler::emitMemOrTableInit(bool isMem) {
+  if (!bulkmemOpsEnabled()) {
+    return false;
+  }
+
   uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
 
   uint32_t segIndex = 0;
   uint32_t dstTableIndex = 0;
   Nothing nothing;
   if (!iter_.readMemOrTableInit(isMem, &segIndex, &dstTableIndex, &nothing,
                                 &nothing, &nothing)) {
     return false;
@@ -10295,18 +10321,18 @@ bool BaseCompiler::emitMemOrTableInit(bo
     if (!emitInstanceCall(lineOrBytecode, SASigTableInit,
                           /*pushReturnedValue=*/false)) {
       return false;
     }
   }
 
   return true;
 }
-#endif
-
+
+#ifdef ENABLE_WASM_REFTYPES
 MOZ_MUST_USE
 bool BaseCompiler::emitTableFill() {
   uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
 
   Nothing nothing;
   uint32_t tableIndex;
   if (!iter_.readTableFill(&tableIndex, &nothing, &nothing, &nothing)) {
     return false;
@@ -10392,16 +10418,17 @@ bool BaseCompiler::emitTableSize() {
   }
   if (deadCode_) {
     return true;
   }
   // size(table:u32) -> u32
   pushI32(tableIndex);
   return emitInstanceCall(lineOrBytecode, SASigTableSize);
 }
+#endif
 
 bool BaseCompiler::emitStructNew() {
   uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
 
   uint32_t typeIndex;
   BaseOpIter::ValueVector args;
   if (!iter_.readStructNew(&typeIndex, &args)) {
     return false;
@@ -11510,32 +11537,30 @@ bool BaseCompiler::emitBody() {
                 emitConvertFloatingToInt64Callout,
                 SymbolicAddress::SaturatingTruncateDoubleToUint64, ValType::F64,
                 ValType::I64));
 #else
             CHECK_NEXT(emitConversionOOM(
                 emitTruncateF64ToI64<TRUNC_UNSIGNED | TRUNC_SATURATING>,
                 ValType::F64, ValType::I64));
 #endif
-#ifdef ENABLE_WASM_BULKMEM_OPS
           case uint32_t(MiscOp::MemCopy):
             CHECK_NEXT(emitMemOrTableCopy(/*isMem=*/true));
           case uint32_t(MiscOp::DataDrop):
             CHECK_NEXT(emitDataOrElemDrop(/*isData=*/true));
           case uint32_t(MiscOp::MemFill):
             CHECK_NEXT(emitMemFill());
           case uint32_t(MiscOp::MemInit):
             CHECK_NEXT(emitMemOrTableInit(/*isMem=*/true));
           case uint32_t(MiscOp::TableCopy):
             CHECK_NEXT(emitMemOrTableCopy(/*isMem=*/false));
           case uint32_t(MiscOp::ElemDrop):
             CHECK_NEXT(emitDataOrElemDrop(/*isData=*/false));
           case uint32_t(MiscOp::TableInit):
             CHECK_NEXT(emitMemOrTableInit(/*isMem=*/false));
-#endif  // ENABLE_WASM_BULKMEM_OPS
 #ifdef ENABLE_WASM_REFTYPES
           case uint32_t(MiscOp::TableFill):
             CHECK_NEXT(emitTableFill());
           case uint32_t(MiscOp::TableGrow):
             CHECK_NEXT(emitTableGrow());
           case uint32_t(MiscOp::TableSize):
             CHECK_NEXT(emitTableSize());
 #endif
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -2857,18 +2857,24 @@ static bool EmitAtomicXchg(FunctionCompi
   if (!f.inDeadCode() && !ins) {
     return false;
   }
 
   f.iter().setResult(ins);
   return true;
 }
 
-#ifdef ENABLE_WASM_BULKMEM_OPS
 static bool EmitMemOrTableCopy(FunctionCompiler& f, bool isMem) {
+  // Bulk memory must be available if shared memory is enabled.
+#ifndef ENABLE_WASM_BULKMEM_OPS
+  if (!f.env().sharedMemoryEnabled) {
+    return f.iter().fail("bulk memory ops disabled");
+  }
+#endif
+
   MDefinition *dst, *src, *len;
   uint32_t dstTableIndex;
   uint32_t srcTableIndex;
   if (!f.iter().readMemOrTableCopy(isMem, &dstTableIndex, &dst, &srcTableIndex,
                                    &src, &len)) {
     return false;
   }
 
@@ -2913,16 +2919,23 @@ static bool EmitMemOrTableCopy(FunctionC
   if (!f.finishCall(&args)) {
     return false;
   }
 
   return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
 }
 
 static bool EmitDataOrElemDrop(FunctionCompiler& f, bool isData) {
+  // Bulk memory must be available if shared memory is enabled.
+#ifndef ENABLE_WASM_BULKMEM_OPS
+  if (!f.env().sharedMemoryEnabled) {
+    return f.iter().fail("bulk memory ops disabled");
+  }
+#endif
+
   uint32_t segIndexVal = 0;
   if (!f.iter().readDataOrElemDrop(isData, &segIndexVal)) {
     return false;
   }
 
   if (f.inDeadCode()) {
     return true;
   }
@@ -2945,16 +2958,23 @@ static bool EmitDataOrElemDrop(FunctionC
   if (!f.finishCall(&args)) {
     return false;
   }
 
   return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
 }
 
 static bool EmitMemFill(FunctionCompiler& f) {
+  // Bulk memory must be available if shared memory is enabled.
+#ifndef ENABLE_WASM_BULKMEM_OPS
+  if (!f.env().sharedMemoryEnabled) {
+    return f.iter().fail("bulk memory ops disabled");
+  }
+#endif
+
   MDefinition *start, *val, *len;
   if (!f.iter().readMemFill(&start, &val, &len)) {
     return false;
   }
 
   if (f.inDeadCode()) {
     return true;
   }
@@ -2980,16 +3000,23 @@ static bool EmitMemFill(FunctionCompiler
   if (!f.finishCall(&args)) {
     return false;
   }
 
   return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
 }
 
 static bool EmitMemOrTableInit(FunctionCompiler& f, bool isMem) {
+  // Bulk memory must be available if shared memory is enabled.
+#ifndef ENABLE_WASM_BULKMEM_OPS
+  if (!f.env().sharedMemoryEnabled) {
+    return f.iter().fail("bulk memory ops disabled");
+  }
+#endif
+
   uint32_t segIndexVal = 0, dstTableIndex = 0;
   MDefinition *dstOff, *srcOff, *len;
   if (!f.iter().readMemOrTableInit(isMem, &segIndexVal, &dstTableIndex, &dstOff,
                                    &srcOff, &len)) {
     return false;
   }
 
   if (f.inDeadCode()) {
@@ -3030,17 +3057,16 @@ static bool EmitMemOrTableInit(FunctionC
     }
   }
   if (!f.finishCall(&args)) {
     return false;
   }
 
   return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
 }
-#endif  // ENABLE_WASM_BULKMEM_OPS
 
 #ifdef ENABLE_WASM_REFTYPES
 // Note, table.{get,grow,set} on table(funcref) are currently rejected by the
 // verifier.
 
 static bool EmitTableFill(FunctionCompiler& f) {
   uint32_t tableIndex;
   MDefinition *start, *val, *len;
@@ -3777,32 +3803,30 @@ static bool EmitBodyExprs(FunctionCompil
           case uint32_t(MiscOp::I64TruncSSatF32):
           case uint32_t(MiscOp::I64TruncUSatF32):
             CHECK(EmitTruncate(f, ValType::F32, ValType::I64,
                                MiscOp(op.b1) == MiscOp::I64TruncUSatF32, true));
           case uint32_t(MiscOp::I64TruncSSatF64):
           case uint32_t(MiscOp::I64TruncUSatF64):
             CHECK(EmitTruncate(f, ValType::F64, ValType::I64,
                                MiscOp(op.b1) == MiscOp::I64TruncUSatF64, true));
-#ifdef ENABLE_WASM_BULKMEM_OPS
           case uint32_t(MiscOp::MemCopy):
             CHECK(EmitMemOrTableCopy(f, /*isMem=*/true));
           case uint32_t(MiscOp::DataDrop):
             CHECK(EmitDataOrElemDrop(f, /*isData=*/true));
           case uint32_t(MiscOp::MemFill):
             CHECK(EmitMemFill(f));
           case uint32_t(MiscOp::MemInit):
             CHECK(EmitMemOrTableInit(f, /*isMem=*/true));
           case uint32_t(MiscOp::TableCopy):
             CHECK(EmitMemOrTableCopy(f, /*isMem=*/false));
           case uint32_t(MiscOp::ElemDrop):
             CHECK(EmitDataOrElemDrop(f, /*isData=*/false));
           case uint32_t(MiscOp::TableInit):
             CHECK(EmitMemOrTableInit(f, /*isMem=*/false));
-#endif
 #ifdef ENABLE_WASM_REFTYPES
           case uint32_t(MiscOp::TableFill):
             CHECK(EmitTableFill(f));
           case uint32_t(MiscOp::TableGrow):
             CHECK(EmitTableGrow(f));
           case uint32_t(MiscOp::TableSize):
             CHECK(EmitTableSize(f));
 #endif
--- a/js/src/wasm/WasmModule.cpp
+++ b/js/src/wasm/WasmModule.cpp
@@ -430,22 +430,21 @@ void Module::addSizeOfMisc(MallocSizeOf 
   if (debugUnlinkedCode_) {
     *data += debugUnlinkedCode_->sizeOfExcludingThis(mallocSizeOf);
   }
 }
 
 void Module::initGCMallocBytesExcludingCode() {
   // The size doesn't have to be exact so use the serialization framework to
   // calculate a value.
-  gcMallocBytesExcludingCode_ = sizeof(*this) +
-      SerializedVectorSize(imports_) +
-      SerializedVectorSize(exports_) +
-      SerializedVectorSize(dataSegments_) +
-      SerializedVectorSize(elemSegments_) +
-      SerializedVectorSize(customSections_);
+  gcMallocBytesExcludingCode_ = sizeof(*this) + SerializedVectorSize(imports_) +
+                                SerializedVectorSize(exports_) +
+                                SerializedVectorSize(dataSegments_) +
+                                SerializedVectorSize(elemSegments_) +
+                                SerializedVectorSize(customSections_);
 }
 
 // Extracting machine code as JS object. The result has the "code" property, as
 // a Uint8Array, and the "segments" property as array objects. The objects
 // contain offsets in the "code" array and basic information about a code
 // segment/function body.
 bool Module::extractCode(JSContext* cx, Tier tier,
                          MutableHandleValue vp) const {
@@ -564,121 +563,122 @@ bool Module::initSegments(JSContext* cx,
                           const JSFunctionVector& funcImports,
                           HandleWasmMemoryObject memoryObj,
                           const ValVector& globalImportValues) const {
   MOZ_ASSERT_IF(!memoryObj, AllSegmentsArePassive(dataSegments_));
 
   Instance& instance = instanceObj->instance();
   const SharedTableVector& tables = instance.tables();
 
-#ifndef ENABLE_WASM_BULKMEM_OPS
   // Bulk memory changes the error checking behavior: we may write partial data.
+  // We enable bulk memory semantics if shared memory is enabled.
+#ifdef ENABLE_WASM_BULKMEM_OPS
+  const bool eagerBoundsCheck = false;
+#else
+  // Bulk memory must be available if shared memory is enabled.
+  const bool eagerBoundsCheck =
+      !cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled();
+#endif
+
+  if (eagerBoundsCheck) {
+    // Perform all error checks up front so that this function does not perform
+    // partial initialization if an error is reported.
+
+    for (const ElemSegment* seg : elemSegments_) {
+      if (!seg->active()) {
+        continue;
+      }
+
+      uint32_t tableLength = tables[seg->tableIndex]->length();
+      uint32_t offset = EvaluateInitExpr(globalImportValues, seg->offset());
 
-  // Perform all error checks up front so that this function does not perform
-  // partial initialization if an error is reported.
+      if (offset > tableLength || tableLength - offset < seg->length()) {
+        JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+                                 JSMSG_WASM_BAD_FIT, "elem", "table");
+        return false;
+      }
+    }
+
+    if (memoryObj) {
+      uint32_t memoryLength = memoryObj->volatileMemoryLength();
+      for (const DataSegment* seg : dataSegments_) {
+        if (!seg->active()) {
+          continue;
+        }
+
+        uint32_t offset = EvaluateInitExpr(globalImportValues, seg->offset());
+
+        if (offset > memoryLength ||
+            memoryLength - offset < seg->bytes.length()) {
+          JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+                                   JSMSG_WASM_BAD_FIT, "data", "memory");
+          return false;
+        }
+      }
+    }
+  }
+
+  // Write data/elem segments into memories/tables.
 
   for (const ElemSegment* seg : elemSegments_) {
-    if (!seg->active()) {
-      continue;
-    }
-
-    uint32_t tableLength = tables[seg->tableIndex]->length();
-    uint32_t offset = EvaluateInitExpr(globalImportValues, seg->offset());
-
-    if (offset > tableLength || tableLength - offset < seg->length()) {
-      JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_FIT,
-                               "elem", "table");
-      return false;
+    if (seg->active()) {
+      uint32_t offset = EvaluateInitExpr(globalImportValues, seg->offset());
+      uint32_t count = seg->length();
+      bool fail = false;
+      if (!eagerBoundsCheck) {
+        uint32_t tableLength = tables[seg->tableIndex]->length();
+        if (offset > tableLength) {
+          fail = true;
+          count = 0;
+        } else if (tableLength - offset < count) {
+          fail = true;
+          count = tableLength - offset;
+        }
+      }
+      if (count) {
+        instance.initElems(seg->tableIndex, *seg, offset, 0, count);
+      }
+      if (fail) {
+        JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+                                 JSMSG_WASM_BAD_FIT, "elem", "table");
+        return false;
+      }
     }
   }
 
   if (memoryObj) {
     uint32_t memoryLength = memoryObj->volatileMemoryLength();
-    for (const DataSegment* seg : dataSegments_) {
-      if (!seg->active()) {
-        continue;
-      }
-
-      uint32_t offset = EvaluateInitExpr(globalImportValues, seg->offset());
-
-      if (offset > memoryLength ||
-          memoryLength - offset < seg->bytes.length()) {
-        JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
-                                 JSMSG_WASM_BAD_FIT, "data", "memory");
-        return false;
-      }
-    }
-  }
-
-  // Now that initialization can't fail partway through, write data/elem
-  // segments into memories/tables.
-#endif
-
-  for (const ElemSegment* seg : elemSegments_) {
-    if (seg->active()) {
-      uint32_t offset = EvaluateInitExpr(globalImportValues, seg->offset());
-      uint32_t count = seg->length();
-#ifdef ENABLE_WASM_BULKMEM_OPS
-      uint32_t tableLength = tables[seg->tableIndex]->length();
-      bool fail = false;
-      if (offset > tableLength) {
-        fail = true;
-        count = 0;
-      } else if (tableLength - offset < count) {
-        fail = true;
-        count = tableLength - offset;
-      }
-#endif
-      if (count) {
-        instance.initElems(seg->tableIndex, *seg, offset, 0, count);
-      }
-#ifdef ENABLE_WASM_BULKMEM_OPS
-      if (fail) {
-        JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
-                                 JSMSG_WASM_BAD_FIT, "elem", "table");
-        return false;
-      }
-#endif
-    }
-  }
-
-  if (memoryObj) {
-#ifdef ENABLE_WASM_BULKMEM_OPS
-    uint32_t memoryLength = memoryObj->volatileMemoryLength();
-#endif
     uint8_t* memoryBase =
         memoryObj->buffer().dataPointerEither().unwrap(/* memcpy */);
 
     for (const DataSegment* seg : dataSegments_) {
       if (!seg->active()) {
         continue;
       }
 
       uint32_t offset = EvaluateInitExpr(globalImportValues, seg->offset());
       uint32_t count = seg->bytes.length();
-#ifdef ENABLE_WASM_BULKMEM_OPS
       bool fail = false;
-      if (offset > memoryLength) {
-        fail = true;
-        count = 0;
-      } else if (memoryLength - offset < count) {
-        fail = true;
-        count = memoryLength - offset;
+      if (!eagerBoundsCheck) {
+        if (offset > memoryLength) {
+          fail = true;
+          count = 0;
+        } else if (memoryLength - offset < count) {
+          fail = true;
+          count = memoryLength - offset;
+        }
       }
-#endif
       if (count) {
         memcpy(memoryBase + offset, seg->bytes.begin(), count);
       }
-#ifdef ENABLE_WASM_BULKMEM_OPS
       if (fail) {
         JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
                                  JSMSG_WASM_BAD_FIT, "data", "memory");
         return false;
       }
-#endif
     }
   }
 
   return true;
 }
 
 static const Import& FindImportForFuncImport(const ImportVector& imports,
                                              uint32_t funcImportIndex) {
--- a/js/src/wasm/WasmOpIter.cpp
+++ b/js/src/wasm/WasmOpIter.cpp
@@ -35,21 +35,16 @@ using namespace js::wasm;
 #  else
 #    define WASM_REF_OP(code) break
 #  endif
 #  ifdef ENABLE_WASM_GC
 #    define WASM_GC_OP(code) return code
 #  else
 #    define WASM_GC_OP(code) break
 #  endif
-#  ifdef ENABLE_WASM_BULKMEM_OPS
-#    define WASM_BULK_OP(code) return code
-#  else
-#    define WASM_BULK_OP(code) break
-#  endif
 
 OpKind wasm::Classify(OpBytes op) {
   switch (Op(op.b0)) {
     case Op::Block:
       return OpKind::Block;
     case Op::Loop:
       return OpKind::Loop;
     case Op::Unreachable:
@@ -282,25 +277,25 @@ OpKind wasm::Classify(OpBytes op) {
         case MiscOp::I32TruncUSatF64:
         case MiscOp::I64TruncSSatF32:
         case MiscOp::I64TruncUSatF32:
         case MiscOp::I64TruncSSatF64:
         case MiscOp::I64TruncUSatF64:
           return OpKind::Conversion;
         case MiscOp::MemCopy:
         case MiscOp::TableCopy:
-          WASM_BULK_OP(OpKind::MemOrTableCopy);
+          return OpKind::MemOrTableCopy;
         case MiscOp::DataDrop:
         case MiscOp::ElemDrop:
-          WASM_BULK_OP(OpKind::DataOrElemDrop);
+          return OpKind::DataOrElemDrop;
         case MiscOp::MemFill:
-          WASM_BULK_OP(OpKind::MemFill);
+          return OpKind::MemFill;
         case MiscOp::MemInit:
         case MiscOp::TableInit:
-          WASM_BULK_OP(OpKind::MemOrTableInit);
+          return OpKind::MemOrTableInit;
         case MiscOp::TableFill:
           WASM_REF_OP(OpKind::TableFill);
         case MiscOp::TableGrow:
           WASM_REF_OP(OpKind::TableGrow);
         case MiscOp::TableSize:
           WASM_REF_OP(OpKind::TableSize);
         case MiscOp::StructNew:
           WASM_GC_OP(OpKind::StructNew);
@@ -440,12 +435,11 @@ OpKind wasm::Classify(OpBytes op) {
       }
       break;
     }
   }
   MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unimplemented opcode");
 }
 
 #  undef WASM_GC_OP
-#  undef WASM_BULK_OP
 #  undef WASM_REF_OP
 
 #endif
--- a/js/src/wasm/WasmTextToBinary.cpp
+++ b/js/src/wasm/WasmTextToBinary.cpp
@@ -69,20 +69,18 @@ class WasmToken {
     BrTable,
     Call,
     CallIndirect,
     CloseParen,
     ComparisonOpcode,
     Const,
     ConversionOpcode,
     Data,
-#ifdef ENABLE_WASM_BULKMEM_OPS
     DataCount,
     DataDrop,
-#endif
     Drop,
     Elem,
     Else,
     End,
     EndOfFile,
     Equal,
     Error,
     Export,
@@ -99,56 +97,50 @@ class WasmToken {
     If,
     Import,
     Index,
     Memory,
     NegativeZero,
     Load,
     Local,
     Loop,
-#ifdef ENABLE_WASM_BULKMEM_OPS
     MemCopy,
     MemFill,
     MemInit,
-#endif
     MemoryGrow,
     MemorySize,
     Module,
     Mutable,
     Name,
 #ifdef ENABLE_WASM_GC
     StructNew,
     StructGet,
     StructSet,
     StructNarrow,
 #endif
     Nop,
     Offset,
     OpenParen,
     Param,
-#ifdef ENABLE_WASM_BULKMEM_OPS
     Passive,
-#endif
     Ref,
     RefNull,
     Result,
     Return,
     SetGlobal,
     SetLocal,
     Shared,
     SignedInteger,
     Start,
     Struct,
     Store,
     Table,
-#ifdef ENABLE_WASM_BULKMEM_OPS
     TableCopy,
     ElemDrop,
     TableInit,
-#endif
 #ifdef ENABLE_WASM_REFTYPES
     TableFill,
     TableGet,
     TableGrow,
     TableSet,
     TableSize,
 #endif
     TeeLocal,
@@ -305,51 +297,43 @@ class WasmToken {
       case BrIf:
       case BrTable:
       case Call:
       case CallIndirect:
       case ComparisonOpcode:
       case Const:
       case ConversionOpcode:
       case ExtraConversionOpcode:
-#ifdef ENABLE_WASM_BULKMEM_OPS
       case DataDrop:
-#endif
       case Drop:
-#ifdef ENABLE_WASM_BULKMEM_OPS
       case ElemDrop:
-#endif
       case GetGlobal:
       case GetLocal:
       case If:
       case Load:
       case Loop:
-#ifdef ENABLE_WASM_BULKMEM_OPS
       case MemCopy:
       case MemFill:
       case MemInit:
-#endif
       case MemoryGrow:
       case MemorySize:
 #ifdef ENABLE_WASM_GC
       case StructNew:
       case StructGet:
       case StructSet:
       case StructNarrow:
 #endif
       case Nop:
       case RefNull:
       case Return:
       case SetGlobal:
       case SetLocal:
       case Store:
-#ifdef ENABLE_WASM_BULKMEM_OPS
       case TableCopy:
       case TableInit:
-#endif
 #ifdef ENABLE_WASM_REFTYPES
       case TableFill:
       case TableGet:
       case TableGrow:
       case TableSet:
       case TableSize:
 #endif
       case TeeLocal:
@@ -357,19 +341,17 @@ class WasmToken {
       case UnaryOpcode:
       case Unreachable:
       case Wait:
       case Wake:
         return true;
       case Align:
       case CloseParen:
       case Data:
-#ifdef ENABLE_WASM_BULKMEM_OPS
       case DataCount:
-#endif
       case Elem:
       case Else:
       case EndOfFile:
       case Equal:
       case End:
       case Error:
       case Export:
       case Field:
@@ -385,19 +367,17 @@ class WasmToken {
       case Memory:
       case NegativeZero:
       case Local:
       case Module:
       case Name:
       case Offset:
       case OpenParen:
       case Param:
-#ifdef ENABLE_WASM_BULKMEM_OPS
       case Passive:
-#endif
       case Ref:
       case Result:
       case Shared:
       case SignedInteger:
       case Start:
       case Struct:
       case Table:
       case Text:
@@ -989,38 +969,34 @@ WasmToken WasmTokenStream::next() {
       }
       if (consume(u"current_memory")) {
         return WasmToken(WasmToken::MemorySize, begin, cur_);
       }
       break;
 
     case 'd':
       if (consume(u"data")) {
-#ifdef ENABLE_WASM_BULKMEM_OPS
         if (consume(u"count")) {
           return WasmToken(WasmToken::DataCount, begin, cur_);
         }
         if (consume(u".drop")) {
           return WasmToken(WasmToken::DataDrop, begin, cur_);
         }
-#endif
         return WasmToken(WasmToken::Data, begin, cur_);
       }
       if (consume(u"drop")) {
         return WasmToken(WasmToken::Drop, begin, cur_);
       }
       break;
 
     case 'e':
       if (consume(u"elem")) {
-#ifdef ENABLE_WASM_BULKMEM_OPS
         if (consume(u".drop")) {
           return WasmToken(WasmToken::ElemDrop, begin, cur_);
         }
-#endif
         return WasmToken(WasmToken::Elem, begin, cur_);
       }
       if (consume(u"else")) {
         return WasmToken(WasmToken::Else, begin, cur_);
       }
       if (consume(u"end")) {
         return WasmToken(WasmToken::End, begin, cur_);
       }
@@ -2132,27 +2108,25 @@ WasmToken WasmTokenStream::next() {
       }
       if (consume(u"loop")) {
         return WasmToken(WasmToken::Loop, begin, cur_);
       }
       break;
 
     case 'm':
       if (consume(u"memory.")) {
-#ifdef ENABLE_WASM_BULKMEM_OPS
         if (consume(u"copy")) {
           return WasmToken(WasmToken::MemCopy, begin, cur_);
         }
         if (consume(u"fill")) {
           return WasmToken(WasmToken::MemFill, begin, cur_);
         }
         if (consume(u"init")) {
           return WasmToken(WasmToken::MemInit, begin, cur_);
         }
-#endif
         if (consume(u"grow")) {
           return WasmToken(WasmToken::MemoryGrow, begin, cur_);
         }
         if (consume(u"size")) {
           return WasmToken(WasmToken::MemorySize, begin, cur_);
         }
         break;
       }
@@ -2181,21 +2155,19 @@ WasmToken WasmTokenStream::next() {
         return WasmToken(WasmToken::Offset, begin, cur_);
       }
       break;
 
     case 'p':
       if (consume(u"param")) {
         return WasmToken(WasmToken::Param, begin, cur_);
       }
-#ifdef ENABLE_WASM_BULKMEM_OPS
       if (consume(u"passive")) {
         return WasmToken(WasmToken::Passive, begin, cur_);
       }
-#endif
       break;
 
     case 'r':
       if (consume(u"result")) {
         return WasmToken(WasmToken::Result, begin, cur_);
       }
       if (consume(u"return")) {
         return WasmToken(WasmToken::Return, begin, cur_);
@@ -2246,24 +2218,22 @@ WasmToken WasmTokenStream::next() {
         }
 #endif
         return WasmToken(WasmToken::Struct, begin, cur_);
       }
       break;
 
     case 't':
       if (consume(u"table.")) {
-#ifdef ENABLE_WASM_BULKMEM_OPS
         if (consume(u"copy")) {
           return WasmToken(WasmToken::TableCopy, begin, cur_);
         }
         if (consume(u"init")) {
           return WasmToken(WasmToken::TableInit, begin, cur_);
         }
-#endif
 #ifdef ENABLE_WASM_REFTYPES
         if (consume(u"fill")) {
           return WasmToken(WasmToken::TableFill, begin, cur_);
         }
         if (consume(u"get")) {
           return WasmToken(WasmToken::TableGet, begin, cur_);
         }
         if (consume(u"grow")) {
@@ -3668,17 +3638,16 @@ static AstMemoryGrow* ParseMemoryGrow(Wa
   AstExpr* operand = ParseExpr(c, inParens);
   if (!operand) {
     return nullptr;
   }
 
   return new (c.lifo) AstMemoryGrow(operand);
 }
 
-#ifdef ENABLE_WASM_BULKMEM_OPS
 static AstMemOrTableCopy* ParseMemOrTableCopy(WasmParseContext& c,
                                               bool inParens, bool isMem) {
   // (table.copy dest-table dest src-table src len)
   // (table.copy dest src len)
   // (memory.copy dest src len)
   //
   // Note that while the instruction *encoding* has src-table before dest-table,
   // we use the normal (dest, src) order in text.
@@ -3802,17 +3771,16 @@ static AstMemOrTableInit* ParseMemOrTabl
   AstExpr* len = ParseExpr(c, inParens);
   if (!len) {
     return nullptr;
   }
 
   return new (c.lifo)
       AstMemOrTableInit(isMem, segIndex, targetMemOrTable, dst, src, len);
 }
-#endif
 
 #ifdef ENABLE_WASM_REFTYPES
 static AstTableFill* ParseTableFill(WasmParseContext& c, bool inParens) {
   // (table.fill table start val len)
   // (table.fill start val len)
 
   AstRef targetTable = AstRef(0);
   c.ts.getIfRef(&targetTable);
@@ -4068,32 +4036,30 @@ static AstExpr* ParseExprBody(WasmParseC
     case WasmToken::UnaryOpcode:
       return ParseUnaryOperator(c, token.op(), inParens);
     case WasmToken::Nop:
       return new (c.lifo) AstNop();
     case WasmToken::MemorySize:
       return new (c.lifo) AstMemorySize();
     case WasmToken::MemoryGrow:
       return ParseMemoryGrow(c, inParens);
-#ifdef ENABLE_WASM_BULKMEM_OPS
     case WasmToken::MemCopy:
       return ParseMemOrTableCopy(c, inParens, /*isMem=*/true);
     case WasmToken::DataDrop:
       return ParseDataOrElemDrop(c, /*isData=*/true);
     case WasmToken::MemFill:
       return ParseMemFill(c, inParens);
     case WasmToken::MemInit:
       return ParseMemOrTableInit(c, inParens, /*isMem=*/true);
     case WasmToken::TableCopy:
       return ParseMemOrTableCopy(c, inParens, /*isMem=*/false);
     case WasmToken::ElemDrop:
       return ParseDataOrElemDrop(c, /*isData=*/false);
     case WasmToken::TableInit:
       return ParseMemOrTableInit(c, inParens, /*isMem=*/false);
-#endif
 #ifdef ENABLE_WASM_REFTYPES
     case WasmToken::TableFill:
       return ParseTableFill(c, inParens);
     case WasmToken::TableGet:
       return ParseTableGet(c, inParens);
     case WasmToken::TableGrow:
       return ParseTableGrow(c, inParens);
     case WasmToken::TableSet:
@@ -4469,22 +4435,20 @@ static AstExpr* ParseInitializerConstExp
     return nullptr;
   }
 
   return initExpr;
 }
 
 static bool ParseInitializerExpressionOrPassive(WasmParseContext& c,
                                                 AstExpr** maybeInitExpr) {
-#ifdef ENABLE_WASM_BULKMEM_OPS
   if (c.ts.getIf(WasmToken::Passive)) {
     *maybeInitExpr = nullptr;
     return true;
   }
-#endif
 
   if (!c.ts.match(WasmToken::OpenParen, c.error)) {
     return false;
   }
 
   AstExpr* initExpr = ParseExprInsideParens(c);
   if (!initExpr) {
     return false;
@@ -4515,27 +4479,25 @@ static AstDataSegment* ParseDataSegment(
     if (!fragments.append(text.text())) {
       return nullptr;
     }
   }
 
   return new (c.lifo) AstDataSegment(offsetIfActive, std::move(fragments));
 }
 
-#ifdef ENABLE_WASM_BULKMEM_OPS
 static bool ParseDataCount(WasmParseContext& c, AstModule* module) {
   WasmToken token;
   if (!c.ts.getIf(WasmToken::Index, &token)) {
     c.ts.generateError(token, "Literal data segment count required", c.error);
     return false;
   }
 
   return module->initDataCount(token.index());
 }
-#endif
 
 static bool ParseLimits(WasmParseContext& c, Limits* limits,
                         Shareable allowShared) {
   WasmToken initial;
   if (!c.ts.match(WasmToken::Index, &initial, c.error)) {
     return false;
   }
 
@@ -5222,24 +5184,22 @@ static AstModule* ParseModule(const char
       }
       case WasmToken::Data: {
         AstDataSegment* segment = ParseDataSegment(c);
         if (!segment || !module->append(segment)) {
           return nullptr;
         }
         break;
       }
-#ifdef ENABLE_WASM_BULKMEM_OPS
       case WasmToken::DataCount: {
         if (!ParseDataCount(c, module)) {
           return nullptr;
         }
         break;
       }
-#endif
       case WasmToken::Import: {
         AstImport* imp = ParseImport(c, module);
         if (!imp || !module->append(imp)) {
           return nullptr;
         }
         break;
       }
       case WasmToken::Export: {
@@ -5688,17 +5648,16 @@ static bool ResolveWait(Resolver& r, Ast
   return ResolveLoadStoreAddress(r, s.address()) &&
          ResolveExpr(r, s.expected()) && ResolveExpr(r, s.timeout());
 }
 
 static bool ResolveWake(Resolver& r, AstWake& s) {
   return ResolveLoadStoreAddress(r, s.address()) && ResolveExpr(r, s.count());
 }
 
-#ifdef ENABLE_WASM_BULKMEM_OPS
 static bool ResolveMemOrTableCopy(Resolver& r, AstMemOrTableCopy& s) {
   return ResolveExpr(r, s.dest()) && ResolveExpr(r, s.src()) &&
          ResolveExpr(r, s.len()) &&
          (s.isMem() || r.resolveTable(s.destTable())) &&
          (s.isMem() || r.resolveTable(s.srcTable()));
 }
 
 static bool ResolveMemFill(Resolver& r, AstMemFill& s) {
@@ -5707,17 +5666,16 @@ static bool ResolveMemFill(Resolver& r, 
 }
 
 static bool ResolveMemOrTableInit(Resolver& r, AstMemOrTableInit& s) {
   return ResolveExpr(r, s.dst()) && ResolveExpr(r, s.src()) &&
          ResolveExpr(r, s.len()) &&
          (s.isMem() ? r.resolveMemory(s.targetMemory())
                     : r.resolveTable(s.targetTable()));
 }
-#endif
 
 #ifdef ENABLE_WASM_REFTYPES
 static bool ResolveTableFill(Resolver& r, AstTableFill& s) {
   return ResolveExpr(r, s.start()) && ResolveExpr(r, s.val()) &&
          ResolveExpr(r, s.len()) && r.resolveTable(s.targetTable());
 }
 
 static bool ResolveTableGet(Resolver& r, AstTableGet& s) {
@@ -5856,26 +5814,24 @@ static bool ResolveExpr(Resolver& r, Ast
     case AstExprKind::AtomicRMW:
       return ResolveAtomicRMW(r, expr.as<AstAtomicRMW>());
     case AstExprKind::AtomicStore:
       return ResolveAtomicStore(r, expr.as<AstAtomicStore>());
     case AstExprKind::Wait:
       return ResolveWait(r, expr.as<AstWait>());
     case AstExprKind::Wake:
       return ResolveWake(r, expr.as<AstWake>());
-#ifdef ENABLE_WASM_BULKMEM_OPS
     case AstExprKind::MemOrTableCopy:
       return ResolveMemOrTableCopy(r, expr.as<AstMemOrTableCopy>());
     case AstExprKind::DataOrElemDrop:
       return true;
     case AstExprKind::MemFill:
       return ResolveMemFill(r, expr.as<AstMemFill>());
     case AstExprKind::MemOrTableInit:
       return ResolveMemOrTableInit(r, expr.as<AstMemOrTableInit>());
-#endif
 #ifdef ENABLE_WASM_REFTYPES
     case AstExprKind::TableFill:
       return ResolveTableFill(r, expr.as<AstTableFill>());
     case AstExprKind::TableGet:
       return ResolveTableGet(r, expr.as<AstTableGet>());
     case AstExprKind::TableGrow:
       return ResolveTableGrow(r, expr.as<AstTableGrow>());
     case AstExprKind::TableSet:
@@ -6462,17 +6418,16 @@ static bool EncodeWait(Encoder& e, AstWa
          e.writeOp(s.op()) && EncodeLoadStoreFlags(e, s.address());
 }
 
 static bool EncodeWake(Encoder& e, AstWake& s) {
   return EncodeLoadStoreAddress(e, s.address()) && EncodeExpr(e, s.count()) &&
          e.writeOp(ThreadOp::Wake) && EncodeLoadStoreFlags(e, s.address());
 }
 
-#ifdef ENABLE_WASM_BULKMEM_OPS
 static bool EncodeMemOrTableCopy(Encoder& e, AstMemOrTableCopy& s) {
   return EncodeExpr(e, s.dest()) && EncodeExpr(e, s.src()) &&
          EncodeExpr(e, s.len()) &&
          e.writeOp(s.isMem() ? MiscOp::MemCopy : MiscOp::TableCopy) &&
          e.writeVarU32(s.isMem() ? 0 : s.srcTable().index()) &&
          e.writeVarU32(s.isMem() ? 0 : s.destTable().index());
 }
 
@@ -6488,17 +6443,16 @@ static bool EncodeMemFill(Encoder& e, As
 }
 
 static bool EncodeMemOrTableInit(Encoder& e, AstMemOrTableInit& s) {
   return EncodeExpr(e, s.dst()) && EncodeExpr(e, s.src()) &&
          EncodeExpr(e, s.len()) &&
          e.writeOp(s.isMem() ? MiscOp::MemInit : MiscOp::TableInit) &&
          e.writeVarU32(s.segIndex()) && e.writeVarU32(s.target().index());
 }
-#endif
 
 #ifdef ENABLE_WASM_REFTYPES
 static bool EncodeTableFill(Encoder& e, AstTableFill& s) {
   return EncodeExpr(e, s.start()) && EncodeExpr(e, s.val()) &&
          EncodeExpr(e, s.len()) && e.writeOp(MiscOp::TableFill) &&
          e.writeVarU32(s.targetTable().index());
 }
 
@@ -6663,26 +6617,24 @@ static bool EncodeExpr(Encoder& e, AstEx
     case AstExprKind::AtomicRMW:
       return EncodeAtomicRMW(e, expr.as<AstAtomicRMW>());
     case AstExprKind::AtomicStore:
       return EncodeAtomicStore(e, expr.as<AstAtomicStore>());
     case AstExprKind::Wait:
       return EncodeWait(e, expr.as<AstWait>());
     case AstExprKind::Wake:
       return EncodeWake(e, expr.as<AstWake>());
-#ifdef ENABLE_WASM_BULKMEM_OPS
     case AstExprKind::MemOrTableCopy:
       return EncodeMemOrTableCopy(e, expr.as<AstMemOrTableCopy>());
     case AstExprKind::DataOrElemDrop:
       return EncodeDataOrElemDrop(e, expr.as<AstDataOrElemDrop>());
     case AstExprKind::MemFill:
       return EncodeMemFill(e, expr.as<AstMemFill>());
     case AstExprKind::MemOrTableInit:
       return EncodeMemOrTableInit(e, expr.as<AstMemOrTableInit>());
-#endif
 #ifdef ENABLE_WASM_REFTYPES
     case AstExprKind::TableFill:
       return EncodeTableFill(e, expr.as<AstTableFill>());
     case AstExprKind::TableGet:
       return EncodeTableGet(e, expr.as<AstTableGet>());
     case AstExprKind::TableGrow:
       return EncodeTableGrow(e, expr.as<AstTableGrow>());
     case AstExprKind::TableSet:
@@ -7245,17 +7197,16 @@ static bool EncodeDataSection(Encoder& e
       return false;
     }
   }
 
   e.finishSection(offset);
   return true;
 }
 
-#ifdef ENABLE_WASM_BULKMEM_OPS
 static bool EncodeDataCountSection(Encoder& e, AstModule& module) {
   if (module.dataCount().isNothing()) {
     return true;
   }
 
   size_t offset;
   if (!e.startSection(SectionId::DataCount, &offset)) {
     return false;
@@ -7263,17 +7214,16 @@ static bool EncodeDataCountSection(Encod
 
   if (!e.writeVarU32(*module.dataCount())) {
     return false;
   }
 
   e.finishSection(offset);
   return true;
 }
-#endif
 
 static bool EncodeElemSegment(Encoder& e, AstElemSegment& segment) {
   if (!EncodeDestinationOffsetOrFlags(e, segment.targetTable().index(),
                                       segment.offsetIfActive())) {
     return false;
   }
 
   if (segment.isPassive()) {
@@ -7387,21 +7337,19 @@ static bool EncodeModule(AstModule& modu
   if (!EncodeStartSection(e, module)) {
     return false;
   }
 
   if (!EncodeElemSection(e, module)) {
     return false;
   }
 
-#ifdef ENABLE_WASM_BULKMEM_OPS
   if (!EncodeDataCountSection(e, module)) {
     return false;
   }
-#endif
 
   if (!EncodeCodeSection(e, offsets, module)) {
     return false;
   }
 
   if (!EncodeDataSection(e, module)) {
     return false;
   }
--- a/js/src/wasm/WasmValidate.cpp
+++ b/js/src/wasm/WasmValidate.cpp
@@ -824,56 +824,96 @@ static bool DecodeFunctionBodyExprs(cons
           case uint32_t(MiscOp::I32TruncUSatF64):
             CHECK(iter.readConversion(ValType::F64, ValType::I32, &nothing));
           case uint32_t(MiscOp::I64TruncSSatF32):
           case uint32_t(MiscOp::I64TruncUSatF32):
             CHECK(iter.readConversion(ValType::F32, ValType::I64, &nothing));
           case uint32_t(MiscOp::I64TruncSSatF64):
           case uint32_t(MiscOp::I64TruncUSatF64):
             CHECK(iter.readConversion(ValType::F64, ValType::I64, &nothing));
-#ifdef ENABLE_WASM_BULKMEM_OPS
           case uint32_t(MiscOp::MemCopy): {
+#ifndef ENABLE_WASM_BULKMEM_OPS
+            // Bulk memory must be available if shared memory is enabled.
+            if (!env.sharedMemoryEnabled) {
+              return iter.fail("bulk memory ops disabled");
+            }
+#endif
             uint32_t unusedDestMemIndex;
             uint32_t unusedSrcMemIndex;
             CHECK(iter.readMemOrTableCopy(/*isMem=*/true, &unusedDestMemIndex,
                                           &nothing, &unusedSrcMemIndex,
                                           &nothing, &nothing));
           }
           case uint32_t(MiscOp::DataDrop): {
+#ifndef ENABLE_WASM_BULKMEM_OPS
+            // Bulk memory must be available if shared memory is enabled.
+            if (!env.sharedMemoryEnabled) {
+              return iter.fail("bulk memory ops disabled");
+            }
+#endif
             uint32_t unusedSegIndex;
             CHECK(iter.readDataOrElemDrop(/*isData=*/true, &unusedSegIndex));
           }
           case uint32_t(MiscOp::MemFill):
+#ifndef ENABLE_WASM_BULKMEM_OPS
+            // Bulk memory must be available if shared memory is enabled.
+            if (!env.sharedMemoryEnabled) {
+              return iter.fail("bulk memory ops disabled");
+            }
+#endif
             CHECK(iter.readMemFill(&nothing, &nothing, &nothing));
           case uint32_t(MiscOp::MemInit): {
+#ifndef ENABLE_WASM_BULKMEM_OPS
+            // Bulk memory must be available if shared memory is enabled.
+            if (!env.sharedMemoryEnabled) {
+              return iter.fail("bulk memory ops disabled");
+            }
+#endif
             uint32_t unusedSegIndex;
             uint32_t unusedTableIndex;
             CHECK(iter.readMemOrTableInit(/*isMem=*/true, &unusedSegIndex,
                                           &unusedTableIndex, &nothing, &nothing,
                                           &nothing));
           }
           case uint32_t(MiscOp::TableCopy): {
+#ifndef ENABLE_WASM_BULKMEM_OPS
+            // Bulk memory must be available if shared memory is enabled.
+            if (!env.sharedMemoryEnabled) {
+              return iter.fail("bulk memory ops disabled");
+            }
+#endif
             uint32_t unusedDestTableIndex;
             uint32_t unusedSrcTableIndex;
             CHECK(iter.readMemOrTableCopy(
                 /*isMem=*/false, &unusedDestTableIndex, &nothing,
                 &unusedSrcTableIndex, &nothing, &nothing));
           }
           case uint32_t(MiscOp::ElemDrop): {
+#ifndef ENABLE_WASM_BULKMEM_OPS
+            // Bulk memory must be available if shared memory is enabled.
+            if (!env.sharedMemoryEnabled) {
+              return iter.fail("bulk memory ops disabled");
+            }
+#endif
             uint32_t unusedSegIndex;
             CHECK(iter.readDataOrElemDrop(/*isData=*/false, &unusedSegIndex));
           }
           case uint32_t(MiscOp::TableInit): {
+#ifndef ENABLE_WASM_BULKMEM_OPS
+            // Bulk memory must be available if shared memory is enabled.
+            if (!env.sharedMemoryEnabled) {
+              return iter.fail("bulk memory ops disabled");
+            }
+#endif
             uint32_t unusedSegIndex;
             uint32_t unusedTableIndex;
             CHECK(iter.readMemOrTableInit(/*isMem=*/false, &unusedSegIndex,
                                           &unusedTableIndex, &nothing, &nothing,
                                           &nothing));
           }
-#endif
 #ifdef ENABLE_WASM_REFTYPES
           case uint32_t(MiscOp::TableFill): {
             uint32_t unusedTableIndex;
             CHECK(iter.readTableFill(&unusedTableIndex, &nothing, &nothing,
                                      &nothing));
           }
           case uint32_t(MiscOp::TableGrow): {
             uint32_t unusedTableIndex;
@@ -2368,36 +2408,41 @@ static bool DecodeElemSection(Decoder& d
     }
 
     env->elemSegments.infallibleAppend(std::move(seg));
   }
 
   return d.finishSection(*range, "elem");
 }
 
-#ifdef ENABLE_WASM_BULKMEM_OPS
 static bool DecodeDataCountSection(Decoder& d, ModuleEnvironment* env) {
   MaybeSectionRange range;
   if (!d.startSection(SectionId::DataCount, env, &range, "datacount")) {
     return false;
   }
   if (!range) {
     return true;
   }
 
+#ifndef ENABLE_WASM_BULKMEM_OPS
+  // Bulk memory must be available if shared memory is enabled.
+  if (!env->sharedMemoryEnabled) {
+    return d.fail("bulk memory ops disabled");
+  }
+#endif
+
   uint32_t dataCount;
   if (!d.readVarU32(&dataCount)) {
     return d.fail("expected data segment count");
   }
 
   env->dataCount.emplace(dataCount);
 
   return d.finishSection(*range, "datacount");
 }
-#endif
 
 bool wasm::StartsCodeSection(const uint8_t* begin, const uint8_t* end,
                              SectionRange* codeSection) {
   UniqueChars unused;
   Decoder d(begin, end, 0, &unused);
 
   if (!DecodePreamble(d)) {
     return false;
@@ -2470,21 +2515,19 @@ bool wasm::DecodeModuleEnvironment(Decod
   if (!DecodeStartSection(d, env)) {
     return false;
   }
 
   if (!DecodeElemSection(d, env)) {
     return false;
   }
 
-#ifdef ENABLE_WASM_BULKMEM_OPS
   if (!DecodeDataCountSection(d, env)) {
     return false;
   }
-#endif
 
   if (!d.startSection(SectionId::Code, env, &env->codeSection, "code")) {
     return false;
   }
 
   if (env->codeSection && env->codeSection->size > MaxCodeSectionBytes) {
     return d.fail("code section too big");
   }
--- a/js/src/wasm/cranelift/src/wasm2clif.rs
+++ b/js/src/wasm/cranelift/src/wasm2clif.rs
@@ -24,17 +24,17 @@ use cranelift_codegen::cursor::{Cursor, 
 use cranelift_codegen::entity::{EntityRef, PrimaryMap, SecondaryMap};
 use cranelift_codegen::ir;
 use cranelift_codegen::ir::condcodes::IntCC;
 use cranelift_codegen::ir::InstBuilder;
 use cranelift_codegen::isa::{CallConv, TargetFrontendConfig, TargetIsa};
 use cranelift_codegen::packed_option::PackedOption;
 use cranelift_wasm::{
     FuncEnvironment, FuncIndex, GlobalIndex, GlobalVariable, MemoryIndex, ReturnMode,
-    SignatureIndex, TableIndex, WasmResult,
+    SignatureIndex, TableIndex, WasmError, WasmResult,
 };
 
 use crate::bindings;
 use crate::compile::{symbolic_function_name, wasm_function_name};
 
 /// Get the integer type used for representing pointers on this platform.
 fn native_pointer_type() -> ir::Type {
     if cfg!(target_pointer_width = "64") {
@@ -431,17 +431,21 @@ impl<'a, 'b, 'c> FuncEnvironment for Tra
         Ok(GlobalVariable::Memory {
             gv: base_gv,
             ty: mem_ty,
             offset,
         })
     }
 
     fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> WasmResult<ir::Heap> {
-        assert_eq!(index.index(), 0, "Only one WebAssembly memory supported");
+        // Currently, Baldrdash doesn't support multiple memories.
+        if index.index() != 0 {
+            return Err(WasmError::Unsupported("only one wasm memory supported"));
+        }
+
         // Get the address of the `TlsData::memoryBase` field.
         let base_addr = self.get_vmctx_gv(func);
         // Get the `TlsData::memoryBase` field. We assume this is never modified during execution
         // of the function.
         let base = func.create_global_value(ir::GlobalValueData::Load {
             base: base_addr,
             offset: offset32(0),
             global_type: native_pointer_type(),
@@ -547,18 +551,20 @@ impl<'a, 'b, 'c> FuncEnvironment for Tra
         table: ir::Table,
         sig_index: SignatureIndex,
         sig_ref: ir::SigRef,
         callee: ir::Value,
         call_args: &[ir::Value],
     ) -> WasmResult<ir::Inst> {
         let wsig = self.env.signature(sig_index);
 
-        // Currently, WebAssembly doesn't support multiple tables. That may change.
-        assert_eq!(table_index.index(), 0);
+        // Currently, Baldrdash doesn't support multiple tables.
+        if table_index.index() != 0 {
+            return Err(WasmError::Unsupported("only one wasm table supported"));
+        }
         let wtable = self.get_table(pos.func, table_index);
 
         // Follows `MacroAssembler::wasmCallIndirect`:
 
         // 1. Materialize the signature ID.
         let sigid_value = match wsig.id_kind() {
             bindings::FuncTypeIdDescKind::None => None,
             bindings::FuncTypeIdDescKind::Immediate => {
--- a/mobile/android/app/mobile.js
+++ b/mobile/android/app/mobile.js
@@ -838,12 +838,13 @@ pref("media.navigator.permission.device"
 
 // Allow system add-on updates
 pref("extensions.systemAddon.update.url", "https://aus5.mozilla.org/update/3/SystemAddons/%VERSION%/%BUILD_ID%/%BUILD_TARGET%/%LOCALE%/%CHANNEL%/%OS_VERSION%/%DISTRIBUTION%/%DISTRIBUTION_VERSION%/update.xml");
 
 // E10s stuff. We don't support 'file' or 'priveleged' process types.
 pref("browser.tabs.remote.separateFileUriProcess", false);
 pref("browser.tabs.remote.allowLinkedWebInFileUriProcess", true);
 pref("browser.tabs.remote.separatePrivilegedContentProcess", false);
+pref("browser.tabs.remote.enforceRemoteTypeRestrictions", false);
 
 // Allow Web Authentication
 pref("security.webauth.webauthn_enable_android_fido2", true);
 pref("browser.tabs.remote.separatePrivilegedMozillaWebContentProcess", false);
--- a/mobile/android/base/java/org/mozilla/gecko/home/TabMenuStripLayout.java
+++ b/mobile/android/base/java/org/mozilla/gecko/home/TabMenuStripLayout.java
@@ -124,16 +124,19 @@ class TabMenuStripLayout extends ThemedL
 
     void onPageSelected(final int position) {
         // Callback to measure and draw the strip after the view is visible.
         ViewTreeObserver vto = getViewTreeObserver();
         if (vto.isAlive()) {
             vto.addOnGlobalLayoutListener(new ViewTreeObserver.OnGlobalLayoutListener() {
                 @Override
                 public void onGlobalLayout() {
+                    if (!vto.isAlive()) {
+                        return;
+                    }
                     // let's ensure that we are calling this only once
                     vto.removeOnGlobalLayoutListener(this);
                     if (selectedView != null) {
                         selectedView.setTextColor(inactiveTextColor);
                     }
 
                     selectedView = (TextView) getChildAt(position);
                     selectedView.setTextColor(activeTextColor);
--- a/mobile/android/extensions/webcompat/injections.js
+++ b/mobile/android/extensions/webcompat/injections.js
@@ -124,16 +124,25 @@ for (const injection of [
     id: "bug1432935-breitbart",
     platform: "desktop",
     domain: "breitbart.com",
     bug: "1432935",
     contentScripts: {
       matches: ["*://*.breitbart.com/*"],
       css: [{file: "injections/css/bug1432935-breitbart.com-webkit-scrollbar.css"}],
     },
+  }, {
+    id: "bug1561371",
+    platform: "android",
+    domain: "mail.google.com",
+    bug: "1561371",
+    contentScripts: {
+      matches: ["*://mail.google.com/*"],
+      css: [{file: "injections/css/bug1561371-mail.google.com-allow-horizontal-scrolling.css"}],
+    },
   },
 ]) {
   Injections.push(injection);
 }
 
 let port = browser.runtime.connect();
 const ActiveInjections = new Map();
 
new file mode 100644
--- /dev/null
+++ b/mobile/android/extensions/webcompat/injections/css/bug1561371-mail.google.com-allow-horizontal-scrolling.css
@@ -0,0 +1,12 @@
+/**
+ * mail.google.com - The HTML email view does not allow horizontal scrolling
+ * on Fennec due to a missing CSS rule which is only served to Chrome.
+ * Bug #1561371 - https://bugzilla.mozilla.org/show_bug.cgi?id=1561371
+ *
+ * HTML emails may sometimes contain content that does not wrap, yet the
+ * CSS served to Fennec does not permit scrolling horizontally. To prevent
+ * this UX frustration, we enable horizontal scrolling.
+ */
+body > #views > div {
+  overflow: auto;
+}
--- a/mobile/android/extensions/webcompat/manifest.json
+++ b/mobile/android/extensions/webcompat/manifest.json
@@ -1,13 +1,13 @@
 {
   "manifest_version": 2,
   "name": "Web Compat",
   "description": "Urgent post-release fixes for web compatibility.",
-  "version": "4.3.1",
+  "version": "4.3.2",
 
   "applications": {
     "gecko": {
       "id": "webcompat@mozilla.org",
       "strict_min_version": "59.0b5"
     }
   },
 
--- a/mobile/android/extensions/webcompat/moz.build
+++ b/mobile/android/extensions/webcompat/moz.build
@@ -22,17 +22,18 @@ FINAL_TARGET_FILES.features['webcompat@m
 ]
 
 FINAL_TARGET_FILES.features['webcompat@mozilla.org']['injections']['css'] += [
   'injections/css/bug0000000-dummy-css-injection.css',
   'injections/css/bug1305028-gaming.youtube.com-webkit-scrollbar.css',
   'injections/css/bug1432935-breitbart.com-webkit-scrollbar.css',
   'injections/css/bug1432935-discordapp.com-webkit-scorllbar-white-line.css',
   'injections/css/bug1518781-twitch.tv-webkit-scrollbar.css',
-  'injections/css/bug1526977-sreedharscce.in-login-fix.css'
+  'injections/css/bug1526977-sreedharscce.in-login-fix.css',
+  'injections/css/bug1561371-mail.google.com-allow-horizontal-scrolling.css'
 ]
 
 FINAL_TARGET_FILES.features['webcompat@mozilla.org']['injections']['js'] += [
   'injections/js/bug0000000-dummy-js-injection.js',
   'injections/js/bug1452707-window.controllers-shim-ib.absa.co.za.js',
   'injections/js/bug1457335-histography.io-ua-change.js',
   'injections/js/bug1472075-bankofamerica.com-ua-change.js',
   'injections/js/bug1472081-election.gov.np-window.sidebar-shim.js',
--- a/mobile/android/extensions/webcompat/ua_overrides.js
+++ b/mobile/android/extensions/webcompat/ua_overrides.js
@@ -20,34 +20,16 @@ for (const override of [
     config: {
       matches: ["*://webcompat-addon-testcases.schub.io/*"],
       uaTransformer: (originalUA) => {
         return UAHelpers.getPrefix(originalUA) + " AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36";
       },
     },
   }, {
     /*
-     * Bug 1464106 - directvnow.com - Create a UA override for Directvnow.com for playback on desktop
-     * WebCompat issue #3846 - https://webcompat.com/issues/3846
-     *
-     * directvnow.com is blocking Firefox via UA sniffing. Outreach is still going
-     * on, and playback works fine if we spoof as Chrome.
-     */
-    id: "bug1464106",
-    platform: "desktop",
-    domain: "directvnow.com",
-    bug: "1464106",
-    config: {
-      matches: ["*://*.directvnow.com/*"],
-      uaTransformer: (originalUA) => {
-        return UAHelpers.getPrefix(originalUA) + " AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36";
-      },
-    },
-  }, {
-    /*
      * Bug 1480710 - m.imgur.com - Build UA override
      * WebCompat issue #13154 - https://webcompat.com/issues/13154
      *
      * imgur returns a 404 for requests to CSS and JS file if requested with a Fennec
      * User Agent. By removing the Fennec identifies and adding Chrome Mobile's, we
      * receive the correct CSS and JS files.
      */
     id: "bug1480710",
@@ -57,34 +39,16 @@ for (const override of [
     config: {
       matches: ["*://m.imgur.com/*"],
       uaTransformer: (originalUA) => {
         return UAHelpers.getPrefix(originalUA) + " AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.85 Mobile Safari/537.36";
       },
     },
   }, {
     /*
-     * Bug 755590 - sites.google.com - top bar doesn't show up in Firefox for Android
-     *
-     * Google Sites does show a different top bar template based on the User Agent.
-     * For Fennec, this results in a broken top bar. Appending Chrome and Mobile Safari
-     * identifiers to the UA results in a correct rendering.
-     */
-    id: "bug755590",
-    platform: "android",
-    domain: "sites.google.com",
-    bug: "755590",
-    config: {
-      matches: ["*://sites.google.com/*"],
-      uaTransformer: (originalUA) => {
-        return originalUA + " Chrome/68.0.3440.85 Mobile Safari/537.366";
-      },
-    },
-  }, {
-    /*
      * Bug 945963 - tieba.baidu.com serves simplified mobile content to Firefox Android
      * WebCompat issue #18455 - https://webcompat.com/issues/18455
      *
      * tieba.baidu.com and tiebac.baidu.com serve a heavily simplified and less functional
      * mobile experience to Firefox for Android users. Adding the AppleWebKit indicator
      * to the User Agent gets us the same experience.
      */
     id: "bug945963",
@@ -94,60 +58,16 @@ for (const override of [
     config: {
       matches: ["*://tieba.baidu.com/*", "*://tiebac.baidu.com/*"],
       uaTransformer: (originalUA) => {
         return originalUA + " AppleWebKit/537.36 (KHTML, like Gecko)";
       },
     },
   }, {
     /*
-     * Bug 1518625 - rottentomatoes.com - Add UA override for videos on www.rottentomatoes.com
-     *
-     * The video framework loaded in via pdk.theplatform.com fails to
-     * acknowledge that Firefox does support HLS, so it fails to find a
-     * supported video format and shows the loading bar forever. Spoofing as
-     * Chrome works.
-     *
-     * Contrary to other PDK sites, rottentomatoes sometimes uses an iFrame to
-     * player.theplatform.com to show a video, so we need to override that domain
-     * as well.
-     */
-    id: "bug1518625",
-    platform: "android",
-    domain: "rottentomatoes.com",
-    bug: "1518625",
-    config: {
-      matches: [
-        "*://*.rottentomatoes.com/*",
-        "*://player.theplatform.com/*",
-      ],
-      uaTransformer: (_) => {
-        return "Mozilla/5.0 (Linux; Android 6.0.1; SM-G920F Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36";
-      },
-    },
-  }, {
-    /*
-     * Bug 1177298 - Write UA overrides for top Japanese Sites
-     * (Imported from ua-update.json.in)
-     *
-     * To receive the proper mobile version instead of the desktop version or
-     * a lower grade mobile experience, the UA is spoofed.
-     */
-    id: "bug1177298-1",
-    platform: "android",
-    domain: "weather.yahoo.co.jp",
-    bug: "1177298",
-    config: {
-      matches: ["*://weather.yahoo.co.jp/*"],
-      uaTransformer: (_) => {
-        return "Mozilla/5.0 (Linux; Android 5.0.2; Galaxy Nexus Build/IMM76B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.93 Mobile Safari/537.36";
-      },
-    },
-  }, {
-    /*
      * Bug 1177298 - Write UA overrides for top Japanese Sites
      * (Imported from ua-update.json.in)
      *
      * To receive the proper mobile version instead of the desktop version or
      * a lower grade mobile experience, the UA is spoofed.
      */
     id: "bug1177298-2",
     platform: "android",
@@ -174,34 +94,16 @@ for (const override of [
     config: {
       matches: ["*://*.nhk.or.jp/*"],
       uaTransformer: (originalUA) => {
         return originalUA + " AppleWebKit";
       },
     },
   }, {
     /*
-     * Bug 1177298 - Write UA overrides for top Japanese Sites
-     * (Imported from ua-update.json.in)
-     *
-     * To receive the proper mobile version instead of the desktop version or
-     * a lower grade mobile experience, the UA is spoofed.
-     */
-    id: "bug1177298-4",
-    platform: "android",
-    domain: "uniqlo.com",
-    bug: "1177298",
-    config: {
-      matches: ["*://*.uniqlo.com/*"],
-      uaTransformer: (originalUA) => {
-        return originalUA + " Mobile Safari";
-      },
-    },
-  }, {
-    /*
      * Bug 1338260 - Add UA override for directTV
      * (Imported from ua-update.json.in)
      *
      * DirectTV has issues with scrolling and cut-off images. Pretending to be
      * Chrome for Android fixes those issues.
      */
     id: "bug1338260",
     platform: "android",
@@ -243,35 +145,16 @@ for (const override of [
     config: {
       matches: ["*://*.mobile.de/*"],
       uaTransformer: (_) => {
         return "Mozilla/5.0 (Linux; Android 6.0.1; SM-G920F Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36";
       },
     },
   }, {
     /*
-     * Bug 1476436 - mobile.bet365.com - add UA override for fennec
-     * WebCompat issue #17010 - https://webcompat.com/issues/17010
-     *
-     * mobile.bet365.com serves fennec an alternative version with less interactive
-     * elements, although they work just fine. Spoofing as Chrome makes the
-     * interactive elements appear.
-     */
-    id: "bug1476436",
-    platform: "android",
-    domain: "mobile.bet365.com",
-    bug: "1476436",
-    config: {
-      matches: ["*://mobile.bet365.com/*"],
-      uaTransformer: (_) => {
-        return "Mozilla/5.0 (Linux; Android 6.0.1; SM-G920F Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36";
-      },
-    },
-  }, {
-    /*
      * Bug 1509831 - cc.com - Add UA override for CC.com
      * WebCompat issue #329 - https://webcompat.com/issues/329
      *
      * ComedyCentral blocks Firefox for not being able to play HLS, which was
      * true in previous versions, but no longer is. With a spoofed Chrome UA,
      * the site works just fine.
      */
     id: "bug1509831",
@@ -281,36 +164,16 @@ for (const override of [
     config: {
       matches: ["*://*.cc.com/*"],
       uaTransformer: (_) => {
         return "Mozilla/5.0 (Linux; Android 6.0.1; SM-G920F Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36";
       },
     },
   }, {
     /*
-     * Bug 1508564 - cnbc.com - Add UA override for videos on www.cnbc.com
-     * WebCompat issue #8410 - https://webcompat.com/issues/8410
-     *
-     * The video framework loaded in via pdk.theplatform.com fails to
-     * acknowledge that Firefox does support HLS, so it fails to find a
-     * supported video format and shows the loading bar forever. Spoofing as
-     * Chrome works.
-     */
-    id: "bug1508564",
-    platform: "android",
-    domain: "cnbc.com",
-    bug: "1508564",
-    config: {
-      matches: ["*://*.cnbc.com/*"],
-      uaTransformer: (_) => {
-        return "Mozilla/5.0 (Linux; Android 6.0.1; SM-G920F Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36";
-      },
-    },
-  }, {
-    /*
      * Bug 1508516 - cineflix.com.br - Add UA override for cineflix.com.br/m/
      * WebCompat issue #21553 - https://webcompat.com/issues/21553
      *
      * The site renders a blank page with any Firefox snipped in the UA as it
      * is running into an exception. Spoofing as Chrome makes the site work
      * fine.
      */
     id: "bug1508516",
--- a/modules/libpref/init/all.js
+++ b/modules/libpref/init/all.js
@@ -3172,16 +3172,20 @@ pref("browser.tabs.remote.separateFileUr
 
 // Pref that enables top level web content pages that are opened from file://
 // URI pages to run in the file content process.
 // This has been added in case breaking any window references between these
 // sorts of pages, which we have to do when we run them in the normal web
 // content process, causes compatibility issues.
 pref("browser.tabs.remote.allowLinkedWebInFileUriProcess", true);
 
+// This pref will cause assertions when a remoteType triggers a process switch
+// to a new remoteType it should not be able to trigger.
+pref("browser.tabs.remote.enforceRemoteTypeRestrictions", false);
+
 // Pref to control whether we use a separate privileged content process
 // for about: pages. This pref name did not age well: we will have multiple
 // types of privileged content processes, each with different privileges.
 pref("browser.tabs.remote.separatePrivilegedContentProcess", false);
 
 // Pref to control whether we use a separate privileged content process
 // for certain mozilla webpages (which are listed in the following pref).
 pref("browser.tabs.remote.separatePrivilegedMozillaWebContentProcess", false);
--- a/netwerk/protocol/http/TunnelUtils.cpp
+++ b/netwerk/protocol/http/TunnelUtils.cpp
@@ -351,41 +351,50 @@ nsresult TLSFilterTransaction::ReadSegme
     LOG(("TLSFilterTransaction %p read segment blocked found rv=%" PRIx32 "\n",
          this, static_cast<uint32_t>(rv)));
     Unused << Connection()->ForceSend();
   }
 
   return NS_SUCCEEDED(rv) ? mReadSegmentReturnValue : rv;
 }
 
-nsresult TLSFilterTransaction::WriteSegments(nsAHttpSegmentWriter* aWriter,
-                                             uint32_t aCount,
-                                             uint32_t* outCountWritten) {
+nsresult TLSFilterTransaction::WriteSegmentsAgain(nsAHttpSegmentWriter* aWriter,
+                                                  uint32_t aCount,
+                                                  uint32_t* outCountWritten,
+                                                  bool* again) {
   MOZ_ASSERT(OnSocketThread(), "not on socket thread");
-  LOG(("TLSFilterTransaction::WriteSegments %p max=%d\n", this, aCount));
+  LOG(("TLSFilterTransaction::WriteSegmentsAgain %p max=%d\n", this, aCount));
 
   if (!mTransaction) {
     return NS_ERROR_UNEXPECTED;
   }
 
   mSegmentWriter = aWriter;
-  nsresult rv = mTransaction->WriteSegments(this, aCount, outCountWritten);
+  nsresult rv =
+      mTransaction->WriteSegmentsAgain(this, aCount, outCountWritten, again);
   if (NS_SUCCEEDED(rv) && NS_FAILED(mFilterReadCode) && !(*outCountWritten)) {
     // nsPipe turns failures into silent OK.. undo that!
     rv = mFilterReadCode;
     if (Connection() && (mFilterReadCode == NS_BASE_STREAM_WOULD_BLOCK)) {
       Unused << Connection()->ResumeRecv();
     }
   }
   LOG(("TLSFilterTransaction %p called trans->WriteSegments rv=%" PRIx32
        " %d\n",
        this, static_cast<uint32_t>(rv), *outCountWritten));
   return rv;
 }
 
+nsresult TLSFilterTransaction::WriteSegments(nsAHttpSegmentWriter* aWriter,
+                                             uint32_t aCount,
+                                             uint32_t* outCountWritten) {
+  bool again = false;
+  return WriteSegmentsAgain(aWriter, aCount, outCountWritten, &again);
+}
+
 nsresult TLSFilterTransaction::GetTransactionSecurityInfo(
     nsISupports** outSecInfo) {
   if (!mSecInfo) {
     return NS_ERROR_FAILURE;
   }
 
   nsCOMPtr<nsISupports> temp(mSecInfo);
   temp.forget(outSecInfo);
--- a/netwerk/protocol/http/TunnelUtils.h
+++ b/netwerk/protocol/http/TunnelUtils.h
@@ -139,16 +139,20 @@ class TLSFilterTransaction final : publi
                    nsIAsyncInputStream** outSocketIn,
                    nsIAsyncOutputStream** outSocketOut);
 
   // nsAHttpTransaction overloads
   bool IsNullTransaction() override;
   NullHttpTransaction* QueryNullTransaction() override;
   nsHttpTransaction* QueryHttpTransaction() override;
   SpdyConnectTransaction* QuerySpdyConnectTransaction() override;
+  MOZ_MUST_USE nsresult WriteSegmentsAgain(nsAHttpSegmentWriter* writer,
+                                           uint32_t count,
+                                           uint32_t* countWritten,
+                                           bool* again) override;
 
  private:
   MOZ_MUST_USE nsresult StartTimerCallback();
   void Cleanup();
   int32_t FilterOutput(const char* aBuf, int32_t aAmount);
   int32_t FilterInput(char* aBuf, int32_t aAmount);
 
   static PRStatus GetPeerName(PRFileDesc* fd, PRNetAddr* addr);
new file mode 100644
--- /dev/null
+++ b/python/mozbuild/mozbuild/artifact_commands.py
@@ -0,0 +1,444 @@
+import argparse
+import hashlib
+import json
+import logging
+import os
+import shutil
+
+from collections import OrderedDict
+
+import mozpack.path as mozpath
+
+from mozbuild.artifact_builds import JOB_CHOICES
+
+from mach.decorators import (
+    CommandArgument,
+    CommandProvider,
+    Command,
+    SubCommand,
+)
+
+from mozbuild.base import (
+    MachCommandBase,
+    MachCommandConditions as conditions,
+)
+
+from mozbuild.util import ensureParentDir
+
+
+class SymbolsAction(argparse.Action):
+    def __call__(self, parser, namespace, values, option_string=None):
+        # If this function is called, it means the --symbols option was given,
+        # so we want to store the value `True` if no explicit value was given
+        # to the option.
+        setattr(namespace, self.dest, values or True)
+
+
+class ArtifactSubCommand(SubCommand):
+    def __call__(self, func):
+        after = SubCommand.__call__(self, func)
+        args = [
+            CommandArgument('--tree', metavar='TREE', type=str,
+                            help='Firefox tree.'),
+            CommandArgument('--job', metavar='JOB', choices=JOB_CHOICES,
+                            help='Build job.'),
+            CommandArgument('--verbose', '-v', action='store_true',
+                            help='Print verbose output.'),
+        ]
+        for arg in args:
+            after = arg(after)
+        return after
+
+
+@CommandProvider
+class PackageFrontend(MachCommandBase):
+    """Fetch and install binary artifacts from Mozilla automation."""
+
+    @Command('artifact', category='post-build',
+             description='Use pre-built artifacts to build Firefox.')
+    def artifact(self):
+        '''Download, cache, and install pre-built binary artifacts to build Firefox.
+
+        Use |mach build| as normal to freshen your installed binary libraries:
+        artifact builds automatically download, cache, and install binary
+        artifacts from Mozilla automation, replacing whatever may be in your
+        object directory.  Use |mach artifact last| to see what binary artifacts
+        were last used.
+
+        Never build libxul again!
+
+        '''
+        pass
+
+    def _make_artifacts(self, tree=None, job=None, skip_cache=False,
+                        download_tests=True, download_symbols=False,
+                        download_host_bins=False,
+                        download_maven_zip=False,
+                        no_process=False):
+        state_dir = self._mach_context.state_dir
+        cache_dir = os.path.join(state_dir, 'package-frontend')
+
+        hg = None
+        if conditions.is_hg(self):
+            hg = self.substs['HG']
+
+        git = None
+        if conditions.is_git(self):
+            git = self.substs['GIT']
+
+        # If we're building Thunderbird, we should be checking for comm-central artifacts.
+        topsrcdir = self.substs.get('commtopsrcdir', self.topsrcdir)
+
+        if download_maven_zip:
+            if download_tests:
+                raise ValueError('--maven-zip requires --no-tests')
+            if download_symbols:
+                raise ValueError('--maven-zip requires no --symbols')
+            if download_host_bins:
+                raise ValueError('--maven-zip requires no --host-bins')
+            if not no_process:
+                raise ValueError('--maven-zip requires --no-process')
+
+        from mozbuild.artifacts import Artifacts
+        artifacts = Artifacts(tree, self.substs, self.defines, job,
+                              log=self.log, cache_dir=cache_dir,
+                              skip_cache=skip_cache, hg=hg, git=git,
+                              topsrcdir=topsrcdir,
+                              download_tests=download_tests,
+                              download_symbols=download_symbols,
+                              download_host_bins=download_host_bins,
+                              download_maven_zip=download_maven_zip,
+                              no_process=no_process)
+        return artifacts
+
+    @ArtifactSubCommand('artifact', 'install',
+                        'Install a good pre-built artifact.')
+    @CommandArgument('source', metavar='SRC', nargs='?', type=str,
+                     help='Where to fetch and install artifacts from.  Can be omitted, in '
+                     'which case the current hg repository is inspected; an hg revision; '
+                     'a remote URL; or a local file.',
+                     default=None)
+    @CommandArgument('--skip-cache', action='store_true',
+                     help='Skip all local caches to force re-fetching remote artifacts.',
+                     default=False)
+    @CommandArgument('--no-tests', action='store_true', help="Don't install tests.")
+    @CommandArgument('--symbols', nargs='?', action=SymbolsAction, help='Download symbols.')
+    @CommandArgument('--host-bins', action='store_true', help='Download host binaries.')
+    @CommandArgument('--distdir', help='Where to install artifacts to.')
+    @CommandArgument('--no-process', action='store_true',
+                     help="Don't process (unpack) artifact packages, just download them.")
+    @CommandArgument('--maven-zip', action='store_true', help="Download Maven zip (Android-only).")
+    def artifact_install(self, source=None, skip_cache=False, tree=None, job=None, verbose=False,
+                         no_tests=False, symbols=False, host_bins=False, distdir=None,
+                         no_process=False, maven_zip=False):
+        self._set_log_level(verbose)
+        artifacts = self._make_artifacts(tree=tree, job=job, skip_cache=skip_cache,
+                                         download_tests=not no_tests,
+                                         download_symbols=symbols,
+                                         download_host_bins=host_bins,
+                                         download_maven_zip=maven_zip,
+                                         no_process=no_process)
+
+        return artifacts.install_from(source, distdir or self.distdir)
+
+    @ArtifactSubCommand('artifact', 'clear-cache',
+                        'Delete local artifacts and reset local artifact cache.')
+    def artifact_clear_cache(self, tree=None, job=None, verbose=False):
+        self._set_log_level(verbose)
+        artifacts = self._make_artifacts(tree=tree, job=job)
+        artifacts.clear_cache()
+        return 0
+
+    @SubCommand('artifact', 'toolchain')
+    @CommandArgument('--verbose', '-v', action='store_true',
+                     help='Print verbose output.')
+    @CommandArgument('--cache-dir', metavar='DIR',
+                     help='Directory where to store the artifacts cache')
+    @CommandArgument('--skip-cache', action='store_true',
+                     help='Skip all local caches to force re-fetching remote artifacts.',
+                     default=False)
+    @CommandArgument('--from-build', metavar='BUILD', nargs='+',
+                     help='Download toolchains resulting from the given build(s); '
+                     'BUILD is a name of a toolchain task, e.g. linux64-clang')
+    @CommandArgument('--tooltool-manifest', metavar='MANIFEST',
+                     help='Explicit tooltool manifest to process')
+    @CommandArgument('--authentication-file', metavar='FILE',
+                     help='Use the RelengAPI token found in the given file to authenticate')
+    @CommandArgument('--tooltool-url', metavar='URL',
+                     help='Use the given url as tooltool server')
+    @CommandArgument('--no-unpack', action='store_true',
+                     help='Do not unpack any downloaded file')
+    @CommandArgument('--retry', type=int, default=4,
+                     help='Number of times to retry failed downloads')
+    @CommandArgument('--artifact-manifest', metavar='FILE',
+                     help='Store a manifest about the downloaded taskcluster artifacts')
+    @CommandArgument('files', nargs='*',
+                     help='A list of files to download, in the form path@task-id, in '
+                     'addition to the files listed in the tooltool manifest.')
+    def artifact_toolchain(self, verbose=False, cache_dir=None,
+                           skip_cache=False, from_build=(),
+                           tooltool_manifest=None, authentication_file=None,
+                           tooltool_url=None, no_unpack=False, retry=None,
+                           artifact_manifest=None, files=()):
+        '''Download, cache and install pre-built toolchains.
+        '''
+        from mozbuild.artifacts import ArtifactCache
+        from mozbuild.action.tooltool import (
+            FileRecord,
+            open_manifest,
+            unpack_file,
+        )
+        from requests.adapters import HTTPAdapter
+        import redo
+        import requests
+
+        from taskgraph.util.taskcluster import (
+            get_artifact_url,
+        )
+
+        self._set_log_level(verbose)
+        # Normally, we'd use self.log_manager.enable_unstructured(),
+        # but that enables all logging, while we only really want tooltool's
+        # and it also makes structured log output twice.
+        # So we manually do what it does, and limit that to the tooltool
+        # logger.
+        if self.log_manager.terminal_handler:
+            logging.getLogger('mozbuild.action.tooltool').addHandler(
+                self.log_manager.terminal_handler)
+            logging.getLogger('redo').addHandler(
+                self.log_manager.terminal_handler)
+            self.log_manager.terminal_handler.addFilter(
+                self.log_manager.structured_filter)
+        if not cache_dir:
+            cache_dir = os.path.join(self._mach_context.state_dir, 'toolchains')
+
+        tooltool_url = (tooltool_url or
+                        'https://tooltool.mozilla-releng.net').rstrip('/')
+
+        cache = ArtifactCache(cache_dir=cache_dir, log=self.log,
+                              skip_cache=skip_cache)
+
+        if authentication_file:
+            with open(authentication_file, 'rb') as f:
+                token = f.read().strip()
+
+            class TooltoolAuthenticator(HTTPAdapter):
+                def send(self, request, *args, **kwargs):
+                    request.headers['Authorization'] = \
+                        'Bearer {}'.format(token)
+                    return super(TooltoolAuthenticator, self).send(
+                        request, *args, **kwargs)
+
+            cache._download_manager.session.mount(
+                tooltool_url, TooltoolAuthenticator())
+
+        class DownloadRecord(FileRecord):
+            def __init__(self, url, *args, **kwargs):
+                super(DownloadRecord, self).__init__(*args, **kwargs)
+                self.url = url
+                self.basename = self.filename
+
+            def fetch_with(self, cache):
+                self.filename = cache.fetch(self.url)
+                return self.filename
+
+            def validate(self):
+                if self.size is None and self.digest is None:
+                    return True
+                return super(DownloadRecord, self).validate()
+
+        class ArtifactRecord(DownloadRecord):
+            def __init__(self, task_id, artifact_name):
+                for _ in redo.retrier(attempts=retry+1, sleeptime=60):
+                    cot = cache._download_manager.session.get(
+                        get_artifact_url(task_id, 'public/chain-of-trust.json'))
+                    if cot.status_code >= 500:
+                        continue
+                    cot.raise_for_status()
+                    break
+                else:
+                    cot.raise_for_status()
+
+                digest = algorithm = None
+                data = json.loads(cot.content)
+                for algorithm, digest in (data.get('artifacts', {})
+                                              .get(artifact_name, {}).items()):
+                    pass
+
+                name = os.path.basename(artifact_name)
+                artifact_url = get_artifact_url(task_id, artifact_name,
+                                                use_proxy=not artifact_name.startswith('public/'))
+                super(ArtifactRecord, self).__init__(
+                    artifact_url, name,
+                    None, digest, algorithm, unpack=True)
+
+        records = OrderedDict()
+        downloaded = []
+
+        if tooltool_manifest:
+            manifest = open_manifest(tooltool_manifest)
+            for record in manifest.file_records:
+                url = '{}/{}/{}'.format(tooltool_url, record.algorithm,
+                                        record.digest)
+                records[record.filename] = DownloadRecord(
+                    url, record.filename, record.size, record.digest,
+                    record.algorithm, unpack=record.unpack,
+                    version=record.version, visibility=record.visibility)
+
+        if from_build:
+            if 'MOZ_AUTOMATION' in os.environ:
+                self.log(logging.ERROR, 'artifact', {},
+                         'Do not use --from-build in automation; all dependencies '
+                         'should be determined in the decision task.')
+                return 1
+            from taskgraph.optimize import IndexSearch
+            from taskgraph.parameters import Parameters
+            from taskgraph.generator import load_tasks_for_kind
+            params = Parameters(
+                level=os.environ.get('MOZ_SCM_LEVEL', '3'),
+                strict=False,
+            )
+
+            root_dir = mozpath.join(self.topsrcdir, 'taskcluster/ci')
+            toolchains = load_tasks_for_kind(params, 'toolchain', root_dir=root_dir)
+
+            aliases = {}
+            for t in toolchains.values():
+                alias = t.attributes.get('toolchain-alias')
+                if alias:
+                    aliases['toolchain-{}'.format(alias)] = \
+                        t.task['metadata']['name']
+
+            for b in from_build:
+                user_value = b
+
+                if not b.startswith('toolchain-'):
+                    b = 'toolchain-{}'.format(b)
+
+                task = toolchains.get(aliases.get(b, b))
+                if not task:
+                    self.log(logging.ERROR, 'artifact', {'build': user_value},
+                             'Could not find a toolchain build named `{build}`')
+                    return 1
+
+                task_id = IndexSearch().should_replace_task(
+                    task, {}, task.optimization.get('index-search', []))
+                artifact_name = task.attributes.get('toolchain-artifact')
+                if task_id in (True, False) or not artifact_name:
+                    self.log(logging.ERROR, 'artifact', {'build': user_value},
+                             'Could not find artifacts for a toolchain build '
+                             'named `{build}`. Local commits and other changes '
+                             'in your checkout may cause this error. Try '
+                             'updating to a fresh checkout of mozilla-central '
+                             'to use artifact builds.')
+                    return 1
+
+                record = ArtifactRecord(task_id, artifact_name)
+                records[record.filename] = record
+
+        # Handle the list of files of the form path@task-id on the command
+        # line. Each of those give a path to an artifact to download.
+        for f in files:
+            if '@' not in f:
+                self.log(logging.ERROR, 'artifact', {},
+                         'Expected a list of files of the form path@task-id')
+                return 1
+            name, task_id = f.rsplit('@', 1)
+            record = ArtifactRecord(task_id, name)
+            records[record.filename] = record
+
+        for record in records.itervalues():
+            self.log(logging.INFO, 'artifact', {'name': record.basename},
+                     'Downloading {name}')
+            valid = False
+            # sleeptime is 60 per retry.py, used by tooltool_wrapper.sh
+            for attempt, _ in enumerate(redo.retrier(attempts=retry+1,
+                                                     sleeptime=60)):
+                try:
+                    record.fetch_with(cache)
+                except (requests.exceptions.HTTPError,
+                        requests.exceptions.ChunkedEncodingError,
+                        requests.exceptions.ConnectionError) as e:
+
+                    if isinstance(e, requests.exceptions.HTTPError):
+                        # The relengapi proxy likes to return error 400 bad request
+                        # which seems improbably to be due to our (simple) GET
+                        # being borked.
+                        status = e.response.status_code
+                        should_retry = status >= 500 or status == 400
+                    else:
+                        should_retry = True
+
+                    if should_retry or attempt < retry:
+                        level = logging.WARN
+                    else:
+                        level = logging.ERROR
+                    # e.message is not always a string, so convert it first.
+                    self.log(level, 'artifact', {}, str(e.message))
+                    if not should_retry:
+                        break
+                    if attempt < retry:
+                        self.log(logging.INFO, 'artifact', {},
+                                 'Will retry in a moment...')
+                    continue
+                try:
+                    valid = record.validate()
+                except Exception:
+                    pass
+                if not valid:
+                    os.unlink(record.filename)
+                    if attempt < retry:
+                        self.log(logging.INFO, 'artifact', {},
+                                 'Corrupt download. Will retry in a moment...')
+                    continue
+
+                downloaded.append(record)
+                break
+
+            if not valid:
+                self.log(logging.ERROR, 'artifact', {'name': record.basename},
+                         'Failed to download {name}')
+                return 1
+
+        artifacts = {} if artifact_manifest else None
+
+        for record in downloaded:
+            local = os.path.join(os.getcwd(), record.basename)
+            if os.path.exists(local):
+                os.unlink(local)
+            # unpack_file needs the file with its final name to work
+            # (https://github.com/mozilla/build-tooltool/issues/38), so we
+            # need to copy it, even though we remove it later. Use hard links
+            # when possible.
+            try:
+                os.link(record.filename, local)
+            except Exception:
+                shutil.copy(record.filename, local)
+            # Keep a sha256 of each downloaded file, for the chain-of-trust
+            # validation.
+            if artifact_manifest is not None:
+                with open(local) as fh:
+                    h = hashlib.sha256()
+                    while True:
+                        data = fh.read(1024 * 1024)
+                        if not data:
+                            break
+                        h.update(data)
+                artifacts[record.url] = {
+                    'sha256': h.hexdigest(),
+                }
+            if record.unpack and not no_unpack:
+                unpack_file(local)
+                os.unlink(local)
+
+        if not downloaded:
+            self.log(logging.ERROR, 'artifact', {}, 'Nothing to download')
+            if files:
+                return 1
+
+        if artifacts:
+            ensureParentDir(artifact_manifest)
+            with open(artifact_manifest, 'w') as fh:
+                json.dump(artifacts, fh, indent=4, sort_keys=True)
+
+        return 0
new file mode 100644
--- /dev/null
+++ b/python/mozbuild/mozbuild/build_commands.py
@@ -0,0 +1,176 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, # You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import argparse
+import os
+
+from mach.decorators import (
+    CommandArgument,
+    CommandProvider,
+    Command,
+)
+
+from mozbuild.base import MachCommandBase
+
+from mozbuild.backend import (
+    backends,
+)
+
+BUILD_WHAT_HELP = '''
+What to build. Can be a top-level make target or a relative directory. If
+multiple options are provided, they will be built serially. Takes dependency
+information from `topsrcdir/build/dumbmake-dependencies` to build additional
+targets as needed. BUILDING ONLY PARTS OF THE TREE CAN RESULT IN BAD TREE
+STATE. USE AT YOUR OWN RISK.
+'''.strip()
+
+
+@CommandProvider
+class Build(MachCommandBase):
+    """Interface to build the tree."""
+
+    @Command('build', category='build', description='Build the tree.')
+    @CommandArgument('--jobs', '-j', default='0', metavar='jobs', type=int,
+                     help='Number of concurrent jobs to run. Default is the number of CPUs.')
+    @CommandArgument('-C', '--directory', default=None,
+                     help='Change to a subdirectory of the build directory first.')
+    @CommandArgument('what', default=None, nargs='*', help=BUILD_WHAT_HELP)
+    @CommandArgument('-X', '--disable-extra-make-dependencies',
+                     default=False, action='store_true',
+                     help='Do not add extra make dependencies.')
+    @CommandArgument('-v', '--verbose', action='store_true',
+                     help='Verbose output for what commands the build is running.')
+    @CommandArgument('--keep-going', action='store_true',
+                     help='Keep building after an error has occurred')
+    def build(self, what=None, disable_extra_make_dependencies=None, jobs=0,
+              directory=None, verbose=False, keep_going=False):
+        """Build the source tree.
+
+        With no arguments, this will perform a full build.
+
+        Positional arguments define targets to build. These can be make targets
+        or patterns like "<dir>/<target>" to indicate a make target within a
+        directory.
+
+        There are a few special targets that can be used to perform a partial
+        build faster than what `mach build` would perform:
+
+        * binaries - compiles and links all C/C++ sources and produces shared
+          libraries and executables (binaries).
+
+        * faster - builds JavaScript, XUL, CSS, etc files.
+
+        "binaries" and "faster" almost fully complement each other. However,
+        there are build actions not captured by either. If things don't appear to
+        be rebuilding, perform a vanilla `mach build` to rebuild the world.
+        """
+        from mozbuild.controller.building import (
+            BuildDriver,
+        )
+
+        self.log_manager.enable_all_structured_loggers()
+
+        driver = self._spawn(BuildDriver)
+        return driver.build(
+            what=what,
+            disable_extra_make_dependencies=disable_extra_make_dependencies,
+            jobs=jobs,
+            directory=directory,
+            verbose=verbose,
+            keep_going=keep_going,
+            mach_context=self._mach_context)
+
+    @Command('configure', category='build',
+             description='Configure the tree (run configure and config.status).')
+    @CommandArgument('options', default=None, nargs=argparse.REMAINDER,
+                     help='Configure options')
+    def configure(self, options=None, buildstatus_messages=False, line_handler=None):
+        from mozbuild.controller.building import (
+            BuildDriver,
+        )
+
+        self.log_manager.enable_all_structured_loggers()
+        driver = self._spawn(BuildDriver)
+
+        return driver.configure(
+            options=options,
+            buildstatus_messages=buildstatus_messages,
+            line_handler=line_handler)
+
+    @Command('resource-usage', category='post-build',
+             description='Show information about system resource usage for a build.')
+    @CommandArgument('--address', default='localhost',
+                     help='Address the HTTP server should listen on.')
+    @CommandArgument('--port', type=int, default=0,
+                     help='Port number the HTTP server should listen on.')
+    @CommandArgument('--browser', default='firefox',
+                     help='Web browser to automatically open. See webbrowser Python module.')
+    @CommandArgument('--url',
+                     help='URL of JSON document to display')
+    def resource_usage(self, address=None, port=None, browser=None, url=None):
+        import webbrowser
+        from mozbuild.html_build_viewer import BuildViewerServer
+
+        server = BuildViewerServer(address, port)
+
+        if url:
+            server.add_resource_json_url('url', url)
+        else:
+            last = self._get_state_filename('build_resources.json')
+            if not os.path.exists(last):
+                print('Build resources not available. If you have performed a '
+                      'build and receive this message, the psutil Python package '
+                      'likely failed to initialize properly.')
+                return 1
+
+            server.add_resource_json_file('last', last)
+        try:
+            webbrowser.get(browser).open_new_tab(server.url)
+        except Exception:
+            print('Cannot get browser specified, trying the default instead.')
+            try:
+                browser = webbrowser.get().open_new_tab(server.url)
+            except Exception:
+                print('Please open %s in a browser.' % server.url)
+
+        print('Hit CTRL+c to stop server.')
+        server.run()
+
+    @Command('build-backend', category='build',
+             description='Generate a backend used to build the tree.')
+    @CommandArgument('-d', '--diff', action='store_true',
+                     help='Show a diff of changes.')
+    # It would be nice to filter the choices below based on
+    # conditions, but that is for another day.
+    @CommandArgument('-b', '--backend', nargs='+', choices=sorted(backends),
+                     help='Which backend to build.')
+    @CommandArgument('-v', '--verbose', action='store_true',
+                     help='Verbose output.')
+    @CommandArgument('-n', '--dry-run', action='store_true',
+                     help='Do everything except writing files out.')
+    def build_backend(self, backend, diff=False, verbose=False, dry_run=False):
+        python = self.virtualenv_manager.python_path
+        config_status = os.path.join(self.topobjdir, 'config.status')
+
+        if not os.path.exists(config_status):
+            print('config.status not found.  Please run |mach configure| '
+                  'or |mach build| prior to building the %s build backend.'
+                  % backend)
+            return 1
+
+        args = [python, config_status]
+        if backend:
+            args.append('--backend')
+            args.extend(backend)
+        if diff:
+            args.append('--diff')
+        if verbose:
+            args.append('--verbose')
+        if dry_run:
+            args.append('--dry-run')
+
+        return self._run_command_in_objdir(args=args, pass_thru=True,
+                                           ensure_exit_code=False)
new file mode 100644
--- /dev/null
+++ b/python/mozbuild/mozbuild/code-analysis/mach_commands.py
@@ -0,0 +1,2042 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, # You can obtain one at http://mozilla.org/MPL/2.0/.
+from __future__ import absolute_import, print_function, unicode_literals
+
+import io
+import logging
+import json
+import ntpath
+import os
+import re
+import sys
+import subprocess
+import shutil
+import tarfile
+import tempfile
+import xml.etree.ElementTree as ET
+import yaml
+
+from mach.decorators import (
+    CommandArgument,
+    CommandProvider,
+    Command,
+    SubCommand,
+)
+
+from mach.main import Mach
+
+from mozbuild.base import MachCommandBase
+
+from mozbuild.build_commands import Build
+from mozbuild.artifact_commands import PackageFrontend
+
+import mozpack.path as mozpath
+
+from mozversioncontrol import get_repository_object
+
+
+# Function used to run clang-format on a batch of files. It is a helper function
+# in order to integrate into the futures ecosystem clang-format.
+def run_one_clang_format_batch(args):
+    try:
+        subprocess.check_output(args)
+    except subprocess.CalledProcessError as e:
+        return e
+
+
+class StaticAnalysisSubCommand(SubCommand):
+    def __call__(self, func):
+        after = SubCommand.__call__(self, func)
+        args = [
+            CommandArgument('--verbose', '-v', action='store_true',
+                            help='Print verbose output.'),
+        ]
+        for arg in args:
+            after = arg(after)
+        return after
+
+
+class StaticAnalysisMonitor(object):
+    def __init__(self, srcdir, objdir, clang_tidy_config, total):
+        self._total = total
+        self._processed = 0
+        self._current = None
+        self._srcdir = srcdir
+
+        self._clang_tidy_config = clang_tidy_config['clang_checkers']
+        # Transform the configuration to support Regex
+        for item in self._clang_tidy_config:
+            if item['name'] == '-*':
+                continue
+            item['name'].replace('*', '.*')
+
+        from mozbuild.compilation.warnings import (
+            WarningsCollector,
+            WarningsDatabase,
+        )
+
+        self._warnings_database = WarningsDatabase()
+
+        def on_warning(warning):
+            self._warnings_database.insert(warning)
+
+        self._warnings_collector = WarningsCollector(on_warning, objdir=objdir)
+
+    @property
+    def num_files(self):
+        return self._total
+
+    @property
+    def num_files_processed(self):
+        return self._processed
+
+    @property
+    def current_file(self):
+        return self._current
+
+    @property
+    def warnings_db(self):
+        return self._warnings_database
+
+    def on_line(self, line):
+        warning = None
+
+        try:
+            warning = self._warnings_collector.process_line(line)
+        except Exception:
+            pass
+
+        if line.find('clang-tidy') != -1:
+            filename = line.split(' ')[-1]
+            if os.path.isfile(filename):
+                self._current = os.path.relpath(filename, self._srcdir)
+            else:
+                self._current = None
+            self._processed = self._processed + 1
+            return (warning, False)
+        if warning is not None:
+            def get_reliability(checker_name):
+                # get the matcher from self._clang_tidy_config that is the 'name' field
+                reliability = None
+                for item in self._clang_tidy_config:
+                    if item['name'] == checker_name:
+                        reliability = item.get('reliability', 'low')
+                        break
+                    else:
+                        # We are using a regex in order to also match 'mozilla-.* like checkers'
+                        matcher = re.match(item['name'], checker_name)
+                        if matcher is not None and matcher.group(0) == checker_name:
+                            reliability = item.get('reliability', 'low')
+                            break
+                return reliability
+            reliability = get_reliability(warning['flag'])
+            if reliability is not None:
+                warning['reliability'] = reliability
+        return (warning, True)
+
+
+@CommandProvider
+class StaticAnalysis(MachCommandBase):
+    """Utilities for running C++ static analysis checks and format."""
+
+    # List of file extension to consider (should start with dot)
+    _format_include_extensions = ('.cpp', '.c', '.cc', '.h', '.m', '.mm')
+    # File contaning all paths to exclude from formatting
+    _format_ignore_file = '.clang-format-ignore'
+
+    _clang_tidy_config = None
+    _cov_config = None
+
+    @Command('static-analysis', category='testing',
+             description='Run C++ static analysis checks')
+    def static_analysis(self):
+        # If not arguments are provided, just print a help message.
+        mach = Mach(os.getcwd())
+        mach.run(['static-analysis', '--help'])
+
+    @StaticAnalysisSubCommand('static-analysis', 'check',
+                              'Run the checks using the helper tool')
+    @CommandArgument('source', nargs='*', default=['.*'],
+                     help='Source files to be analyzed (regex on path). '
+                          'Can be omitted, in which case the entire code base '
+                          'is analyzed.  The source argument is ignored if '
+                          'there is anything fed through stdin, in which case '
+                          'the analysis is only performed on the files changed '
+                          'in the patch streamed through stdin.  This is called '
+                          'the diff mode.')
+    @CommandArgument('--checks', '-c', default='-*', metavar='checks',
+                     help='Static analysis checks to enable.  By default, this enables only '
+                     'checks that are published here: https://mzl.la/2DRHeTh, but can be any '
+                     'clang-tidy checks syntax.')
+    @CommandArgument('--jobs', '-j', default='0', metavar='jobs', type=int,
+                     help='Number of concurrent jobs to run. Default is the number of CPUs.')
+    @CommandArgument('--strip', '-p', default='1', metavar='NUM',
+                     help='Strip NUM leading components from file names in diff mode.')
+    @CommandArgument('--fix', '-f', default=False, action='store_true',
+                     help='Try to autofix errors detected by clang-tidy checkers.')
+    @CommandArgument('--header-filter', '-h-f', default='', metavar='header_filter',
+                     help='Regular expression matching the names of the headers to '
+                          'output diagnostics from. Diagnostics from the main file '
+                          'of each translation unit are always displayed')
+    @CommandArgument('--output', '-o', default=None,
+                     help='Write clang-tidy output in a file')
+    @CommandArgument('--format', default='text', choices=('text', 'json'),
+                     help='Output format to write in a file')
+    @CommandArgument('--outgoing', default=False, action='store_true',
+                     help='Run static analysis checks on outgoing files from mercurial repository')
+    def check(self, source=None, jobs=2, strip=1, verbose=False, checks='-*',
+              fix=False, header_filter='', output=None, format='text', outgoing=False):
+        from mozbuild.controller.building import (
+            StaticAnalysisFooter,
+            StaticAnalysisOutputManager,
+        )
+
+        self._set_log_level(verbose)
+        self.log_manager.enable_all_structured_loggers()
+
+        rc = self._get_clang_tools(verbose=verbose)
+        if rc != 0:
+            return rc
+
+        if self._is_version_eligible() is False:
+            self.log(logging.ERROR, 'static-analysis', {},
+                     "You're using an old version of clang-format binary."
+                     " Please update to a more recent one by running: './mach bootstrap'")
+            return 1
+
+        rc = self._build_compile_db(verbose=verbose)
+        rc = rc or self._build_export(jobs=jobs, verbose=verbose)
+        if rc != 0:
+            return rc
+
+        # Use outgoing files instead of source files
+        if outgoing:
+            repo = get_repository_object(self.topsrcdir)
+            files = repo.get_outgoing_files()
+            source = map(os.path.abspath, files)
+
+        # Split in several chunks to avoid hitting Python's limit of 100 groups in re
+        compile_db = json.loads(open(self._compile_db, 'r').read())
+        total = 0
+        import re
+        chunk_size = 50
+        for offset in range(0, len(source), chunk_size):
+            source_chunks = source[offset:offset + chunk_size]
+            name_re = re.compile('(' + ')|('.join(source_chunks) + ')')
+            for f in compile_db:
+                if name_re.search(f['file']):
+                    total = total + 1
+
+        if not total:
+            self.log(logging.INFO, 'static-analysis', {},
+                     "There are no files eligible for analysis. Please note that 'header' files "
+                     "cannot be used for analysis since they do not consist compilation units.")
+            return 0
+
+        cwd = self.topobjdir
+        self._compilation_commands_path = self.topobjdir
+        if self._clang_tidy_config is None:
+            self._clang_tidy_config = self._get_clang_tidy_config()
+        args = self._get_clang_tidy_command(
+            checks=checks, header_filter=header_filter, sources=source, jobs=jobs, fix=fix)
+
+        monitor = StaticAnalysisMonitor(
+            self.topsrcdir, self.topobjdir, self._clang_tidy_config, total)
+
+        footer = StaticAnalysisFooter(self.log_manager.terminal, monitor)
+        with StaticAnalysisOutputManager(self.log_manager, monitor, footer) as output_manager:
+            rc = self.run_process(args=args, ensure_exit_code=False,
+                                  line_handler=output_manager.on_line, cwd=cwd)
+
+            self.log(logging.WARNING, 'warning_summary',
+                     {'count': len(monitor.warnings_db)},
+                     '{count} warnings present.')
+
+            # Write output file
+            if output is not None:
+                output_manager.write(output, format)
+
+        if rc != 0:
+            return rc
+        # if we are building firefox for android it might be nice to
+        # also analyze the java code base
+        if self.substs['MOZ_BUILD_APP'] == 'mobile/android':
+            rc = self.check_java(source, jobs, strip, verbose, skip_export=True)
+        return rc
+
+    @StaticAnalysisSubCommand('static-analysis', 'check-coverity',
+                              'Run coverity static-analysis tool on the given files. '
+                              'Can only be run by automation! '
+                              'It\'s result is stored as an json file on the artifacts server.')
+    @CommandArgument('source', nargs='*', default=[],
+                     help='Source files to be analyzed by Coverity Static Analysis Tool. '
+                          'This is ran only in automation.')
+    @CommandArgument('--output', '-o', default=None,
+                     help='Write coverity output translated to json output in a file')
+    @CommandArgument('--coverity_output_path', '-co', default=None,
+                     help='Path where to write coverity results as cov-results.json. '
+                     'If no path is specified the default path from the coverity working '
+                     'directory, ~./mozbuild/coverity is used.')
+    @CommandArgument('--outgoing', default=False, action='store_true',
+                     help='Run coverity on outgoing files from mercurial or git repository')
+    def check_coverity(self, source=[], output=None, coverity_output_path=None,
+                       outgoing=False, verbose=False):
+        self._set_log_level(verbose)
+        self.log_manager.enable_all_structured_loggers()
+
+        if 'MOZ_AUTOMATION' not in os.environ:
+            self.log(logging.INFO, 'static-analysis', {},
+                     'Coverity based static-analysis cannot be ran outside automation.')
+            return
+
+        # Use outgoing files instead of source files
+        if outgoing:
+            repo = get_repository_object(self.topsrcdir)
+            files = repo.get_outgoing_files()
+            source = map(os.path.abspath, files)
+
+        if len(source) == 0:
+            self.log(logging.ERROR, 'static-analysis', {},
+                     'There are no files that coverity can use to scan.')
+            return 0
+
+        rc = self._build_compile_db(verbose=verbose)
+        rc = rc or self._build_export(jobs=2, verbose=verbose)
+
+        if rc != 0:
+            return rc
+
+        commands_list = self.get_files_with_commands(source)
+        if len(commands_list) == 0:
+            self.log(logging.INFO, 'static-analysis', {},
+                     'There are no files that need to be analyzed.')
+            return 0
+
+        # Load the configuration file for coverity static-analysis
+        # For the moment we store only the reliability index for each checker
+        # as the rest is managed on the https://github.com/mozilla/release-services side.
+        self._cov_config = self._get_cov_config()
+
+        rc = self.setup_coverity()
+        if rc != 0:
+            return rc
+
+        # First run cov-run-desktop --setup in order to setup the analysis env
+        cmd = [self.cov_run_desktop, '--setup']
+        self.log(logging.INFO, 'static-analysis', {},
+                 'Running {} --setup'.format(self.cov_run_desktop))
+
+        rc = self.run_process(args=cmd, cwd=self.cov_path, pass_thru=True)
+
+        if rc != 0:
+            self.log(logging.ERROR, 'static-analysis', {},
+                     'Running {} --setup failed!'.format(self.cov_run_desktop))
+            return rc
+
+        # Run cov-configure for clang
+        cmd = [self.cov_configure, '--clang']
+        self.log(logging.INFO, 'static-analysis', {},
+                 'Running {} --clang'.format(self.cov_configure))
+
+        rc = self.run_process(args=cmd, cwd=self.cov_path, pass_thru=True)
+
+        if rc != 0:
+            self.log(logging.ERROR, 'static-analysis', {},
+                     'Running {} --clang failed!'.format(self.cov_configure))
+            return rc
+
+        # For each element in commands_list run `cov-translate`
+        for element in commands_list:
+            cmd = [self.cov_translate, '--dir', self.cov_idir_path] + element['command'].split(' ')
+            self.log(logging.INFO, 'static-analysis', {},
+                     'Running Coverity Tranlate for {}'.format(cmd))
+            rc = self.run_process(args=cmd, cwd=element['directory'], pass_thru=True)
+            if rc != 0:
+                self.log(logging.ERROR, 'static-analysis', {},
+                         'Running Coverity Tranlate failed for {}'.format(cmd))
+                return cmd
+
+        if coverity_output_path is None:
+            cov_result = mozpath.join(self.cov_state_path, 'cov-results.json')
+        else:
+            cov_result = mozpath.join(coverity_output_path, 'cov-results.json')
+
+        # Once the capture is performed we need to do the actual Coverity Desktop analysis
+        cmd = [self.cov_run_desktop, '--json-output-v6', cov_result, '--analyze-captured-source']
+        self.log(logging.INFO, 'static-analysis', {},
+                 'Running Coverity Analysis for {}'.format(cmd))
+        rc = self.run_process(cmd, cwd=self.cov_state_path, pass_thru=True)
+        if rc != 0:
+            self.log(logging.ERROR, 'static-analysis', {}, 'Coverity Analysis failed!')
+
+        if output is not None:
+            self.dump_cov_artifact(cov_result, source, output)
+
+    def get_reliability_index_for_cov_checker(self, checker_name):
+        if self._cov_config is None:
+            self.log(logging.INFO, 'static-analysis', {}, 'Coverity config file not found, '
+                     'using default-value \'reliablity\' = medium. for checker {}'.format(
+                        checker_name))
+            return 'medium'
+
+        checkers = self._cov_config['coverity_checkers']
+        if checker_name not in checkers:
+            self.log(logging.INFO, 'static-analysis', {},
+                     'Coverity checker {} not found to determine reliability index. '
+                     'For the moment we shall use the default \'reliablity\' = medium.'.format(
+                        checker_name))
+            return 'medium'
+
+        if 'reliability' not in checkers[checker_name]:
+            # This checker doesn't have a reliability index
+            self.log(logging.INFO, 'static-analysis', {},
+                     'Coverity checker {} doesn\'t have a reliability index set, '
+                     'field \'reliability is missing\', please cosinder adding it. '
+                     'For the moment we shall use the default \'reliablity\' = medium.'.format(
+                        checker_name))
+            return 'medium'
+
+        return checkers[checker_name]['reliability']
+
+    def dump_cov_artifact(self, cov_results, source, output):
+        # Parse Coverity json into structured issues
+        with open(cov_results) as f:
+            result = json.load(f)
+
+            # Parse the issues to a standard json format
+            issues_dict = {'files': {}}
+
+            files_list = issues_dict['files']
+
+            def build_element(issue):
+                # We look only for main event
+                event_path = next(
+                    (event for event in issue['events'] if event['main'] is True), None)
+
+                dict_issue = {
+                    'line': issue['mainEventLineNumber'],
+                    'flag': issue['checkerName'],
+                    'message': event_path['eventDescription'],
+                    'reliability': self.get_reliability_index_for_cov_checker(
+                        issue['checkerName']
+                        ),
+                    'extra': {
+                        'category': issue['checkerProperties']['category'],
+                        'stateOnServer': issue['stateOnServer'],
+                        'stack': []
+                    }
+                }
+
+                # Embed all events into extra message
+                for event in issue['events']:
+                    dict_issue['extra']['stack'].append(
+                        {'file_path': event['strippedFilePathname'],
+                         'line_number': event['lineNumber'],
+                         'path_type': event['eventTag'],
+                         'description': event['eventDescription']})
+
+                return dict_issue
+
+            for issue in result['issues']:
+                path = self.cov_is_file_in_source(issue['strippedMainEventFilePathname'], source)
+                if path is None:
+                    # Since we skip a result we should log it
+                    self.log(logging.INFO, 'static-analysis', {},
+                             'Skipping CID: {0} from file: {1} since it\'s not related '
+                             'with the current patch.'.format(
+                                issue['stateOnServer']['cid'],
+                                issue['strippedMainEventFilePathname'])
+                             )
+                    continue
+                if path in files_list:
+                    files_list[path]['warnings'].append(build_element(issue))
+                else:
+                    files_list[path] = {'warnings': [build_element(issue)]}
+
+            with open(output, 'w') as f:
+                json.dump(issues_dict, f)
+
+    def get_coverity_secrets(self):
+        from taskgraph.util.taskcluster import get_root_url
+
+        secret_name = 'project/relman/coverity'
+        secrets_url = '{}/secrets/v1/secret/{}'.format(get_root_url(True), secret_name)
+
+        self.log(logging.INFO, 'static-analysis', {},
+                 'Using symbol upload token from the secrets service: "{}"'.format(secrets_url))
+
+        import requests
+        res = requests.get(secrets_url)
+        res.raise_for_status()
+        secret = res.json()
+        cov_config = secret['secret'] if 'secret' in secret else None
+
+        if cov_config is None:
+            self.log(logging.ERROR, 'static-analysis', {},
+                     'Ill formatted secret for Coverity. Aborting analysis.')
+            return 1
+
+        self.cov_analysis_url = cov_config.get('package_url')
+        self.cov_package_name = cov_config.get('package_name')
+        self.cov_url = cov_config.get('server_url')
+        # In case we don't have a port in the secret we use the default one,
+        # for a default coverity deployment.
+        self.cov_port = cov_config.get('server_port', 8443)
+        self.cov_auth = cov_config.get('auth_key')
+        self.cov_package_ver = cov_config.get('package_ver')
+        self.cov_full_stack = cov_config.get('full_stack', False)
+
+        return 0
+
+    def download_coverity(self):
+        if self.cov_url is None or self.cov_port is None or \
+                self.cov_analysis_url is None or \
+                self.cov_auth is None:
+            self.log(logging.ERROR, 'static-analysis', {}, 'Missing Coverity secret on try job!')
+            return 1
+
+        COVERITY_CONFIG = '''
+        {
+            "type": "Coverity configuration",
+            "format_version": 1,
+            "settings": {
+            "server": {
+                "host": "%s",
+                "ssl" : true,
+                "port": %s,
+                "on_new_cert" : "trust",
+                "auth_key_file": "%s"
+            },
+            "stream": "Firefox",
+            "cov_run_desktop": {
+                "build_cmd": [],
+                "clean_cmd": []
+            }
+            }
+        }
+        '''
+        # Generate the coverity.conf and auth files
+        cov_auth_path = mozpath.join(self.cov_state_path, 'auth')
+        cov_setup_path = mozpath.join(self.cov_state_path, 'coverity.conf')
+        cov_conf = COVERITY_CONFIG % (self.cov_url, self.cov_port, cov_auth_path)
+
+        def download(artifact_url, target):
+            import requests
+            resp = requests.get(artifact_url, verify=False, stream=True)
+            resp.raise_for_status()
+
+            # Extract archive into destination
+            with tarfile.open(fileobj=io.BytesIO(resp.content)) as tar:
+                tar.extractall(target)
+
+        download(self.cov_analysis_url, self.cov_state_path)
+
+        with open(cov_auth_path, 'w') as f:
+            f.write(self.cov_auth)
+
+        # Modify it's permission to 600
+        os.chmod(cov_auth_path, 0o600)
+
+        with open(cov_setup_path, 'a') as f:
+            f.write(cov_conf)
+
+    def setup_coverity(self, force_download=True):
+        rc, config, _ = self._get_config_environment()
+        rc = rc or self.get_coverity_secrets()
+
+        if rc != 0:
+            return rc
+
+        # Create a directory in mozbuild where we setup coverity
+        self.cov_state_path = mozpath.join(self._mach_context.state_dir, "coverity")
+
+        if force_download is True and os.path.exists(self.cov_state_path):
+            shutil.rmtree(self.cov_state_path)
+
+        os.mkdir(self.cov_state_path)
+
+        # Download everything that we need for Coverity from out private instance
+        self.download_coverity()
+
+        self.cov_path = mozpath.join(self.cov_state_path, self.cov_package_name)
+        self.cov_run_desktop = mozpath.join(self.cov_path, 'bin', 'cov-run-desktop')
+        self.cov_translate = mozpath.join(self.cov_path, 'bin', 'cov-translate')
+        self.cov_configure = mozpath.join(self.cov_path, 'bin', 'cov-configure')
+        self.cov_work_path = mozpath.join(self.cov_state_path, 'data-coverity')
+        self.cov_idir_path = mozpath.join(self.cov_work_path, self.cov_package_ver, 'idir')
+
+        if not os.path.exists(self.cov_path):
+            self.log(logging.ERROR, 'static-analysis', {},
+                     'Missing Coverity in {}'.format(self.cov_path))
+            return 1
+
+        return 0
+
+    def cov_is_file_in_source(self, abs_path, source):
+        # We have as an input an absolute path for whom we verify if it's a symlink,
+        # if so, we follow that symlink and we match it with elements from source.
+        # If the match is done we return abs_path, otherwise None
+        assert isinstance(source, list)
+        if os.path.islink(abs_path):
+            abs_path = os.path.realpath(abs_path)
+        if abs_path in source:
+            return abs_path
+        return None
+
+    def get_files_with_commands(self, source):
+        '''
+        Returns an array of dictionaries having file_path with build command
+        '''
+
+        compile_db = json.load(open(self._compile_db, 'r'))
+
+        commands_list = []
+
+        for f in source:
+            # It must be a C/C++ file
+            _, ext = os.path.splitext(f)
+
+            if ext.lower() not in self._format_include_extensions:
+                self.log(logging.INFO, 'static-analysis', {}, 'Skipping {}'.format(f))
+                continue
+            file_with_abspath = os.path.join(self.topsrcdir, f)
+            for f in compile_db:
+                # Found for a file that we are looking
+                if file_with_abspath == f['file']:
+                    commands_list.append(f)
+
+        return commands_list
+
+    @StaticAnalysisSubCommand('static-analysis', 'check-java',
+                              'Run infer on the java codebase.')
+    @CommandArgument('source', nargs='*', default=['mobile'],
+                     help='Source files to be analyzed. '
+                          'Can be omitted, in which case the entire code base '
+                          'is analyzed.  The source argument is ignored if '
+                          'there is anything fed through stdin, in which case '
+                          'the analysis is only performed on the files changed '
+                          'in the patch streamed through stdin.  This is called '
+                          'the diff mode.')
+    @CommandArgument('--checks', '-c', default=[], metavar='checks', nargs='*',
+                     help='Static analysis checks to enable.')
+    @CommandArgument('--jobs', '-j', default='0', metavar='jobs', type=int,
+                     help='Number of concurrent jobs to run.'
+                     ' Default is the number of CPUs.')
+    @CommandArgument('--task', '-t', type=str,
+                     default='compileWithGeckoBinariesDebugSources',
+                     help='Which gradle tasks to use to compile the java codebase.')
+    @CommandArgument('--outgoing', default=False, action='store_true',
+                     help='Run infer checks on outgoing files from repository')
+    @CommandArgument('--output', default=None,
+                     help='Write infer json output in a file')
+    def check_java(self, source=['mobile'], jobs=2, strip=1, verbose=False, checks=[],
+                   task='compileWithGeckoBinariesDebugSources',
+                   skip_export=False, outgoing=False, output=None):
+        self._set_log_level(verbose)
+        self.log_manager.enable_all_structured_loggers()
+        if self.substs['MOZ_BUILD_APP'] != 'mobile/android':
+            self.log(logging.WARNING, 'static-analysis', {},
+                     'Cannot check java source code unless you are building for android!')
+            return 1
+        rc = self._check_for_java()
+        if rc != 0:
+            return 1
+        if output is not None:
+            output = os.path.abspath(output)
+            if not os.path.isdir(os.path.dirname(output)):
+                self.log(logging.WARNING, 'static-analysis', {},
+                         'Missing report destination folder for {}'.format(output))
+
+        # if source contains the whole mobile folder, then we just have to
+        # analyze everything
+        check_all = any(i.rstrip(os.sep).split(os.sep)[-1] == 'mobile' for i in source)
+        # gather all java sources from the source variable
+        java_sources = []
+        if outgoing:
+            repo = get_repository_object(self.topsrcdir)
+            java_sources = self._get_java_files(repo.get_outgoing_files())
+            if not java_sources:
+                self.log(logging.WARNING, 'static-analysis', {},
+                         'No outgoing Java files to check')
+                return 0
+        elif not check_all:
+            java_sources = self._get_java_files(source)
+            if not java_sources:
+                return 0
+        if not skip_export:
+            rc = self._build_export(jobs=jobs, verbose=verbose)
+            if rc != 0:
+                return rc
+        rc = self._get_infer(verbose=verbose)
+        if rc != 0:
+            self.log(logging.WARNING, 'static-analysis', {},
+                     'This command is only available for linux64!')
+            return rc
+        # which checkers to use, and which folders to exclude
+        all_checkers, third_party_path = self._get_infer_config()
+        checkers, excludes = self._get_infer_args(
+            checks=checks or all_checkers,
+            third_party_path=third_party_path
+        )
+        rc = rc or self._gradle(['clean'])  # clean so that we can recompile
+        # infer capture command
+        capture_cmd = [self._infer_path, 'capture'] + excludes + ['--']
+        rc = rc or self._gradle([task], infer_args=capture_cmd, verbose=verbose)
+        tmp_file, args = self._get_infer_source_args(java_sources)
+        # infer analyze command
+        analysis_cmd = [self._infer_path, 'analyze', '--keep-going'] +  \
+            checkers + args
+        rc = rc or self.run_process(args=analysis_cmd, cwd=self.topsrcdir, pass_thru=True)
+        if tmp_file:
+            tmp_file.close()
+
+        # Copy the infer report
+        report_path = os.path.join(self.topsrcdir, 'infer-out', 'report.json')
+        if output is not None and os.path.exists(report_path):
+            shutil.copy(report_path, output)
+            self.log(logging.INFO, 'static-analysis', {},
+                     'Report available in {}'.format(output))
+
+        return rc
+
+    def _get_java_files(self, sources):
+        java_sources = []
+        for i in sources:
+            f = mozpath.join(self.topsrcdir, i)
+            if os.path.isdir(f):
+                for root, dirs, files in os.walk(f):
+                    dirs.sort()
+                    for file in sorted(files):
+                        if file.endswith('.java'):
+                            java_sources.append(mozpath.join(root, file))
+            elif f.endswith('.java'):
+                java_sources.append(f)
+        return java_sources
+
+    def _get_infer_source_args(self, sources):
+        '''Return the arguments to only analyze <sources>'''
+        if not sources:
+            return (None, [])
+        # create a temporary file in which we place all sources
+        # this is used by the analysis command to only analyze certain files
+        f = tempfile.NamedTemporaryFile()
+        for source in sources:
+            f.write(source+'\n')
+        f.flush()
+        return (f, ['--changed-files-index', f.name])
+
+    def _get_infer_config(self):
+        '''Load the infer config file.'''
+        checkers = []
+        tp_path = ''
+        with open(mozpath.join(self.topsrcdir, 'tools',
+                               'infer', 'config.yaml')) as f:
+            try:
+                config = yaml.safe_load(f)
+                for item in config['infer_checkers']:
+                    if item['publish']:
+                        checkers.append(item['name'])
+                tp_path = mozpath.join(self.topsrcdir, config['third_party'])
+            except Exception:
+                print('Looks like config.yaml is not valid, so we are unable '
+                      'to determine default checkers, and which folder to '
+                      'exclude, using defaults provided by infer')
+        return checkers, tp_path
+
+    def _get_infer_args(self, checks, third_party_path):
+        '''Return the arguments which include the checkers <checks>, and
+        excludes all folder in <third_party_path>.'''
+        checkers = ['-a', 'checkers']
+        excludes = []
+        for checker in checks:
+            checkers.append('--' + checker)
+        with open(third_party_path) as f:
+            for line in f:
+                excludes.append('--skip-analysis-in-path')
+                excludes.append(line.strip('\n'))
+        return checkers, excludes
+
+    def _get_clang_tidy_config(self):
+        try:
+            file_handler = open(mozpath.join(self.topsrcdir, "tools", "clang-tidy", "config.yaml"))
+            config = yaml.safe_load(file_handler)
+        except Exception:
+            self.log(logging.ERROR, 'static-analysis', {},
+                     'Looks like config.yaml is not valid, we are going to use default'
+                     ' values for the rest of the analysis for clang-tidy.')
+            return None
+        return config
+
+    def _get_cov_config(self):
+        try:
+            file_handler = open(mozpath.join(self.topsrcdir, "tools", "coverity", "config.yaml"))
+            config = yaml.safe_load(file_handler)
+        except Exception:
+            self.log(logging.ERROR, 'static-analysis', {},
+                     'Looks like config.yaml is not valid, we are going to use default'
+                     ' values for the rest of the analysis for coverity.')
+            return None
+        return config
+
+    def _is_version_eligible(self):
+        # make sure that we've cached self._clang_tidy_config
+        if self._clang_tidy_config is None:
+            self._clang_tidy_config = self._get_clang_tidy_config()
+
+        version = None
+        if 'package_version' in self._clang_tidy_config:
+            version = self._clang_tidy_config['package_version']
+        else:
+            self.log(logging.ERROR, 'static-analysis', {},
+                     "Unable to find 'package_version' in the config.yml")
+            return False
+
+        # Because the fact that we ship together clang-tidy and clang-format
+        # we are sure that these two will always share the same version.
+        # Thus in order to determine that the version is compatible we only
+        # need to check one of them, going with clang-format
+        cmd = [self._clang_format_path, '--version']
+        try:
+            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
+            version_string = 'clang-format version ' + version
+            if output.startswith(version_string):
+                return True
+        except subprocess.CalledProcessError as e:
+            self.log(logging.ERROR, 'static-analysis', {},
+                     "Error determining the version clang-tidy/format binary, please see the "
+                     "attached exception: \n{}".format(e.output))
+
+        return False
+
+    def _get_clang_tidy_command(self, checks, header_filter, sources, jobs, fix):
+
+        if checks == '-*':
+            checks = self._get_checks()
+
+        common_args = ['-clang-tidy-binary', self._clang_tidy_path,
+                       '-clang-apply-replacements-binary', self._clang_apply_replacements,
+                       '-checks=%s' % checks,
+                       '-extra-arg=-DMOZ_CLANG_PLUGIN']
+
+        # Flag header-filter is passed in order to limit the diagnostic messages only
+        # to the specified header files. When no value is specified the default value
+        # is considered to be the source in order to limit the diagnostic message to
+        # the source files or folders.
+        common_args += ['-header-filter=%s' % (header_filter
+                                               if len(header_filter) else '|'.join(sources))]
+
+        # From our configuration file, config.yaml, we build the configuration list, for
+        # the checkers that are used. These configuration options are used to better fit
+        # the checkers to our code.
+        cfg = self._get_checks_config()
+        if cfg:
+            common_args += ['-config=%s' % yaml.dump(cfg)]
+
+        if fix:
+            common_args += ['-fix']
+
+        return [
+            self.virtualenv_manager.python_path, self._run_clang_tidy_path, '-j',
+            str(jobs), '-p', self._compilation_commands_path
+        ] + common_args + sources
+
+    def _check_for_java(self):
+        '''Check if javac can be found.'''
+        import distutils
+        java = self.substs.get('JAVA')
+        java = java or os.getenv('JAVA_HOME')
+        java = java or distutils.spawn.find_executable('javac')
+        error = 'javac was not found! Please install javac and either add it to your PATH, '
+        error += 'set JAVA_HOME, or add the following to your mozconfig:\n'
+        error += '  --with-java-bin-path=/path/to/java/bin/'
+        if not java:
+            self.log(logging.ERROR, 'ERROR: static-analysis', {}, error)
+            return 1
+        return 0
+
+    def _gradle(self, args, infer_args=None, verbose=False, autotest=False,
+                suppress_output=True):
+        infer_args = infer_args or []
+        if autotest:
+            cwd = mozpath.join(self.topsrcdir, 'tools', 'infer', 'test')
+            gradle = mozpath.join(cwd, 'gradlew')
+        else:
+            gradle = self.substs['GRADLE']
+            cwd = self.topsrcdir
+        extra_env = {
+            'GRADLE_OPTS': '-Dfile.encoding=utf-8',  # see mobile/android/mach_commands.py
+            'JAVA_TOOL_OPTIONS': '-Dfile.encoding=utf-8',
+        }
+        if suppress_output:
+            devnull = open(os.devnull, 'w')
+            return subprocess.call(
+                infer_args + [gradle] + args,
+                env=dict(os.environ, **extra_env),
+                cwd=cwd, stdout=devnull, stderr=subprocess.STDOUT, close_fds=True)
+
+        return self.run_process(
+            infer_args + [gradle] + args,
+            append_env=extra_env,
+            pass_thru=True,  # Allow user to run gradle interactively.
+            ensure_exit_code=False,  # Don't throw on non-zero exit code.
+            cwd=cwd)
+
+    @StaticAnalysisSubCommand('static-analysis', 'autotest',
+                              'Run the auto-test suite in order to determine that'
+                              ' the analysis did not regress.')
+    @CommandArgument('--dump-results', '-d', default=False, action='store_true',
+                     help='Generate the baseline for the regression test. Based on'
+                     ' this baseline we will test future results.')
+    @CommandArgument('--intree-tool', '-i', default=False, action='store_true',
+                     help='Use a pre-aquired in-tree clang-tidy package.')
+    @CommandArgument('checker_names', nargs='*', default=[],
+                     help='Checkers that are going to be auto-tested.')
+    def autotest(self, verbose=False, dump_results=False, intree_tool=False, checker_names=[]):
+        # If 'dump_results' is True than we just want to generate the issues files for each
+        # checker in particulat and thus 'force_download' becomes 'False' since we want to
+        # do this on a local trusted clang-tidy package.
+        self._set_log_level(verbose)
+        self._dump_results = dump_results
+
+        force_download = not self._dump_results
+
+        # Function return codes
+        self.TOOLS_SUCCESS = 0
+        self.TOOLS_FAILED_DOWNLOAD = 1
+        self.TOOLS_UNSUPORTED_PLATFORM = 2
+        self.TOOLS_CHECKER_NO_TEST_FILE = 3
+        self.TOOLS_CHECKER_RETURNED_NO_ISSUES = 4
+        self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND = 5
+        self.TOOLS_CHECKER_DIFF_FAILED = 6
+        self.TOOLS_CHECKER_NOT_FOUND = 7
+        self.TOOLS_CHECKER_FAILED_FILE = 8
+        self.TOOLS_CHECKER_LIST_EMPTY = 9
+        self.TOOLS_GRADLE_FAILED = 10
+
+        # Configure the tree or download clang-tidy package, depending on the option that we choose
+        if intree_tool:
+            _, config, _ = self._get_config_environment()
+            clang_tools_path = self.topsrcdir
+            self._clang_tidy_path = mozpath.join(
+                clang_tools_path, "clang-tidy", "bin",
+                "clang-tidy" + config.substs.get('BIN_SUFFIX', ''))
+            self._clang_format_path = mozpath.join(
+                clang_tools_path, "clang-tidy", "bin",
+                "clang-format" + config.substs.get('BIN_SUFFIX', ''))
+            self._clang_apply_replacements = mozpath.join(
+                clang_tools_path, "clang-tidy", "bin",
+                "clang-apply-replacements" + config.substs.get('BIN_SUFFIX', ''))
+            self._run_clang_tidy_path = mozpath.join(clang_tools_path, "clang-tidy", "share",
+                                                     "clang", "run-clang-tidy.py")
+            self._clang_format_diff = mozpath.join(clang_tools_path, "clang-tidy", "share",
+                                                   "clang", "clang-format-diff.py")
+
+            # Ensure that clang-tidy is present
+            rc = not os.path.exists(self._clang_tidy_path)
+        else:
+            rc = self._get_clang_tools(force=force_download, verbose=verbose)
+
+        if rc != 0:
+            self.log(logging.ERROR, 'ERROR: static-analysis', {},
+                     'clang-tidy unable to locate package.')
+            return self.TOOLS_FAILED_DOWNLOAD
+
+        self._clang_tidy_base_path = mozpath.join(self.topsrcdir, "tools", "clang-tidy")
+
+        # For each checker run it
+        self._clang_tidy_config = self._get_clang_tidy_config()
+        platform, _ = self.platform
+
+        if platform not in self._clang_tidy_config['platforms']:
+            self.log(
+                logging.ERROR, 'static-analysis', {},
+                "RUNNING: clang-tidy autotest for platform {} not supported.".format(
+                    platform)
+                )
+            return self.TOOLS_UNSUPORTED_PLATFORM
+
+        import concurrent.futures
+        import multiprocessing
+
+        max_workers = multiprocessing.cpu_count()
+
+        self.log(logging.INFO, 'static-analysis', {},
+                 "RUNNING: clang-tidy autotest for platform {0} with {1} workers.".format(
+                     platform, max_workers))
+
+        # List all available checkers
+        cmd = [self._clang_tidy_path, '-list-checks', '-checks=*']
+        clang_output = subprocess.check_output(
+            cmd, stderr=subprocess.STDOUT).decode('utf-8')
+        available_checks = clang_output.split('\n')[1:]
+        self._clang_tidy_checks = [c.strip() for c in available_checks if c]
+
+        # Build the dummy compile_commands.json
+        self._compilation_commands_path = self._create_temp_compilation_db(self._clang_tidy_config)
+        checkers_test_batch = []
+        checkers_results = []
+        with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+            futures = []
+            for item in self._clang_tidy_config['clang_checkers']:
+                # Skip if any of the following statements is true:
+                # 1. Checker attribute 'publish' is False.
+                not_published = not bool(item.get('publish', True))
+                # 2. Checker has restricted-platforms and current platform is not of them.
+                ignored_platform = ('restricted-platforms' in item and
+                                    platform not in item['restricted-platforms'])
+                # 3. Checker name is mozilla-* or -*.
+                ignored_checker = item['name'] in ['mozilla-*', '-*']
+                # 4. List checker_names is passed and the current checker is not part of the
+                #    list or 'publish' is False
+                checker_not_in_list = checker_names and (
+                    item['name'] not in checker_names or not_published)
+                if not_published or \
+                   ignored_platform or \
+                   ignored_checker or \
+                   checker_not_in_list:
+                    continue
+                checkers_test_batch.append(item['name'])
+                futures.append(executor.submit(self._verify_checker, item, checkers_results))
+
+            error_code = self.TOOLS_SUCCESS
+            for future in concurrent.futures.as_completed(futures):
+                # Wait for every task to finish
+                ret_val = future.result()
+                if ret_val != self.TOOLS_SUCCESS:
+                    # We are interested only in one error and we don't break
+                    # the execution of for loop since we want to make sure that all
+                    # tasks finished.
+                    error_code = ret_val
+
+            if error_code != self.TOOLS_SUCCESS:
+
+                self.log(logging.INFO, 'static-analysis', {},
+                         "FAIL: the following clang-tidy check(s) failed:")
+                for failure in checkers_results:
+                    checker_error = failure['checker-error']
+                    checker_name = failure['checker-name']
+                    info1 = failure['info1']
+                    info2 = failure['info2']
+                    info3 = failure['info3']
+
+                    message_to_log = ''
+                    if checker_error == self.TOOLS_CHECKER_NOT_FOUND:
+                        message_to_log = \
+                            "\tChecker {} not present in this clang-tidy version.".format(
+                                checker_name)
+                    elif checker_error == self.TOOLS_CHECKER_NO_TEST_FILE:
+                        message_to_log = \
+                            "\tChecker {0} does not have a test file - {0}.cpp".format(
+                                checker_name)
+                    elif checker_error == self.TOOLS_CHECKER_RETURNED_NO_ISSUES:
+                        message_to_log = (
+                            "\tChecker {0} did not find any issues in its test file, "
+                            "clang-tidy output for the run is:\n{1}"
+                            ).format(checker_name, info1)
+                    elif checker_error == self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND:
+                        message_to_log = \
+                            "\tChecker {0} does not have a result file - {0}.json".format(
+                                checker_name)
+                    elif checker_error == self.TOOLS_CHECKER_DIFF_FAILED:
+                        message_to_log = (
+                            "\tChecker {0}\nExpected: {1}\n"
+                            "Got: {2}\n"
+                            "clang-tidy output for the run is:\n"
+                            "{3}"
+                            ).format(checker_name, info1, info2, info3)
+
+                    print('\n'+message_to_log)
+
+                # Also delete the tmp folder
+                shutil.rmtree(self._compilation_commands_path)
+                return error_code
+
+            # Run the analysis on all checkers at the same time only if we don't dump results.
+            if not self._dump_results:
+                ret_val = self._run_analysis_batch(checkers_test_batch)
+                if ret_val != self.TOOLS_SUCCESS:
+                    shutil.rmtree(self._compilation_commands_path)
+                    return ret_val
+
+        self.log(logging.INFO, 'static-analysis', {}, "SUCCESS: clang-tidy all tests passed.")
+        # Also delete the tmp folder
+        shutil.rmtree(self._compilation_commands_path)
+        return self._autotest_infer(intree_tool, force_download, verbose)
+
+    def _run_analysis(self, checks, header_filter, sources, jobs=1, fix=False, print_out=False):
+        cmd = self._get_clang_tidy_command(
+            checks=checks, header_filter=header_filter,
+            sources=sources,
+            jobs=jobs, fix=fix)
+
+        try:
+            clang_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
+        except subprocess.CalledProcessError as e:
+            print(e.output)
+            return None
+        return self._parse_issues(clang_output), clang_output
+
+    def _run_analysis_batch(self, items):
+        self.log(logging.INFO, 'static-analysis', {},
+                 "RUNNING: clang-tidy checker batch analysis.")
+        if not len(items):
+            self.log(logging.ERROR, 'static-analysis', {},
+                     "ERROR: clang-tidy checker list is empty!")
+            return self.TOOLS_CHECKER_LIST_EMPTY
+
+        issues, clang_output = self._run_analysis(
+            checks='-*,' + ",".join(items),
+            header_filter='',
+            sources=[mozpath.join(self._clang_tidy_base_path, "test", checker) + '.cpp'
+                     for checker in items],
+            print_out=True)
+
+        if issues is None:
+            return self.TOOLS_CHECKER_FAILED_FILE
+
+        failed_checks = []
+        failed_checks_baseline = []
+        for checker in items:
+            test_file_path_json = mozpath.join(
+                self._clang_tidy_base_path, "test", checker) + '.json'
+            # Read the pre-determined issues
+            baseline_issues = self._get_autotest_stored_issues(test_file_path_json)
+
+            # We also stored the 'reliability' index so strip that from the baseline_issues
+            baseline_issues[:] = [item for item in baseline_issues if 'reliability' not in item]
+
+            found = all([element_base in issues for element_base in baseline_issues])
+
+            if not found:
+                failed_checks.append(checker)
+                failed_checks_baseline.append(baseline_issues)
+
+        if len(failed_checks) > 0:
+            self.log(logging.ERROR, 'static-analysis', {},
+                     'The following check(s) failed for bulk analysis: ' + ' '.join(failed_checks))
+
+            for failed_check, baseline_issue in zip(failed_checks, failed_checks_baseline):
+                print('\tChecker {0} expect following results: \n\t\t{1}'.format(
+                    failed_check, baseline_issue))
+
+            print('This is the output generated by clang-tidy for the bulk build:\n{}'.format(
+                clang_output))
+            return self.TOOLS_CHECKER_DIFF_FAILED
+
+        return self.TOOLS_SUCCESS
+
+    def _create_temp_compilation_db(self, config):
+        directory = tempfile.mkdtemp(prefix='cc')
+        with open(mozpath.join(directory, "compile_commands.json"), "wb") as file_handler:
+            compile_commands = []
+            director = mozpath.join(self.topsrcdir, 'tools', 'clang-tidy', 'test')
+            for item in config['clang_checkers']:
+                if item['name'] in ['-*', 'mozilla-*']:
+                    continue
+                file = item['name'] + '.cpp'
+                element = {}
+                element["directory"] = director
+                element["command"] = 'cpp ' + file
+                element["file"] = mozpath.join(director, file)
+                compile_commands.append(element)
+
+            json.dump(compile_commands, file_handler)
+            file_handler.flush()
+
+            return directory
+
+    def _autotest_infer(self, intree_tool, force_download, verbose):
+        # infer is not available on other platforms, but autotest should work even without
+        # it being installed
+        if self.platform[0] == 'linux64':
+            rc = self._check_for_java()
+            if rc != 0:
+                return 1
+            rc = self._get_infer(force=force_download, verbose=verbose, intree_tool=intree_tool)
+            if rc != 0:
+                self.log(logging.ERROR, 'ERROR: static-analysis', {},
+                         'infer unable to locate package.')
+                return self.TOOLS_FAILED_DOWNLOAD
+            self.__infer_tool = mozpath.join(self.topsrcdir, 'tools', 'infer')
+            self.__infer_test_folder = mozpath.join(self.__infer_tool, 'test')
+
+            import concurrent.futures
+            import multiprocessing
+            max_workers = multiprocessing.cpu_count()
+            self.log(logging.INFO, 'static-analysis', {},
+                     "RUNNING: infer autotest for platform {0} with {1} workers.".format(
+                         self.platform[0], max_workers))
+            # clean previous autotest if it exists
+            rc = self._gradle(['autotest:clean'], autotest=True)
+            if rc != 0:
+                return rc
+            import yaml
+            with open(mozpath.join(self.__infer_tool, 'config.yaml')) as f:
+                config = yaml.safe_load(f)
+            with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+                futures = []
+                for item in config['infer_checkers']:
+                    if item['publish']:
+                        futures.append(executor.submit(self._verify_infer_checker, item))
+                # this is always included in check-java, but not in config.yaml
+                futures.append(executor.submit(self._verify_infer_checker,
+                                               {'name': 'checkers'}))
+                for future in concurrent.futures.as_completed(futures):
+                    ret_val = future.result()
+                    if ret_val != self.TOOLS_SUCCESS:
+                        return ret_val
+            self.log(logging.INFO, 'static-analysis', {}, "SUCCESS: infer all tests passed.")
+        else:
+            self.log(logging.WARNING, 'static-analysis', {},
+                     "Skipping infer autotest, because it is only available on linux64!")
+        return self.TOOLS_SUCCESS
+
+    def _verify_infer_checker(self, item):
+        '''Given a checker, this method verifies the following:
+          1. if there is a `checker`.json and `checker`.java file in
+             `tools/infer/test/autotest/src`
+          2. if running infer on `checker`.java yields the same result as `checker`.json
+        An `item` is simply a dictionary, which needs to have a `name` field set, which is the
+        name of the checker.
+        '''
+        def to_camelcase(str):
+            return ''.join([s.capitalize() for s in str.split('-')])
+        check = item['name']
+        test_file_path = mozpath.join(self.__infer_tool, 'test', 'autotest', 'src',
+                                      'main', 'java', to_camelcase(check))
+        test_file_path_java = test_file_path + '.java'
+        test_file_path_json = test_file_path + '.json'
+        self.log(logging.INFO, 'static-analysis', {}, "RUNNING: infer check {}.".format(check))
+        # Verify if the test file exists for this checker
+        if not os.path.exists(test_file_path_java):
+            self.log(logging.ERROR, 'static-analysis', {},
+                     "ERROR: infer check {} doesn't have a test file.".format(check))
+            return self.TOOLS_CHECKER_NO_TEST_FILE
+        # run infer on a particular test file
+        out_folder = mozpath.join(self.__infer_test_folder, 'test-infer-{}'.format(check))
+        if check == 'checkers':
+            check_arg = ['-a', 'checkers']
+        else:
+            check_arg = ['--{}-only'.format(check)]
+        infer_args = [self._infer_path, 'run'] + check_arg + ['-o', out_folder, '--']
+        gradle_args = ['autotest:compileInferTest{}'.format(to_camelcase(check))]
+        rc = self._gradle(gradle_args, infer_args=infer_args, autotest=True)
+        if rc != 0:
+            self.log(logging.ERROR, 'static-analysis', {},
+                     "ERROR: infer failed to execute gradle {}.".format(gradle_args))
+            return self.TOOLS_GRADLE_FAILED
+        issues = json.load(open(mozpath.join(out_folder, 'report.json')))
+        # remove folder that infer creates because the issues are loaded into memory
+        shutil.rmtree(out_folder)
+        # Verify to see if we got any issues, if not raise exception
+        if not issues:
+            self.log(
+                logging.ERROR, 'static-analysis', {},
+                "ERROR: infer check {0} did not find any issues in its associated test suite."
+                .format(check)
+            )
+            return self.TOOLS_CHECKER_RETURNED_NO_ISSUES
+        if self._dump_results:
+            self._build_autotest_result(test_file_path_json, issues)
+        else:
+            if not os.path.exists(test_file_path_json):
+                # Result file for test not found maybe regenerate it?
+                self.log(
+                    logging.ERROR, 'static-analysis', {},
+                    "ERROR: infer result file not found for check {0}".format(check)
+                )
+                return self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND
+            # Read the pre-determined issues
+            baseline_issues = self._get_autotest_stored_issues(test_file_path_json)
+
+            def ordered(obj):
+                if isinstance(obj, dict):
+                    return sorted((k, ordered(v)) for k, v in obj.items())
+                if isinstance(obj, list):
+                    return sorted(ordered(x) for x in obj)
+                return obj
+            # Compare the two lists
+            if ordered(issues) != ordered(baseline_issues):
+                error_str = "ERROR: in check {} Expected: ".format(check)
+                error_str += '\n' + json.dumps(baseline_issues, indent=2)
+                error_str += '\n Got:\n' + json.dumps(issues, indent=2)
+                self.log(logging.ERROR, 'static-analysis', {},
+                         'ERROR: infer autotest for check {} failed, check stdout for more details'
+                         .format(check))
+                print(error_str)
+                return self.TOOLS_CHECKER_DIFF_FAILED
+        return self.TOOLS_SUCCESS
+
+    @StaticAnalysisSubCommand('static-analysis', 'install',
+                              'Install the static analysis helper tool')
+    @CommandArgument('source', nargs='?', type=str,
+                     help='Where to fetch a local archive containing the static-analysis and '
+                     'format helper tool.'
+                          'It will be installed in ~/.mozbuild/clang-tools and ~/.mozbuild/infer.'
+                          'Can be omitted, in which case the latest clang-tools and infer '
+                          'helper for the platform would be automatically detected and installed.')
+    @CommandArgument('--skip-cache', action='store_true',
+                     help='Skip all local caches to force re-fetching the helper tool.',
+                     default=False)
+    @CommandArgument('--force', action='store_true',
+                     help='Force re-install even though the tool exists in mozbuild.',
+                     default=False)
+    @CommandArgument('--minimal-install', action='store_true',
+                     help='Download only clang based tool.',
+                     default=False)
+    def install(self, source=None, skip_cache=False, force=False, minimal_install=False,
+                verbose=False):
+        self._set_log_level(verbose)
+        rc = self._get_clang_tools(force=force, skip_cache=skip_cache,
+                                   source=source, verbose=verbose)
+        if rc == 0 and not minimal_install:
+            # XXX ignore the return code because if it fails or not, infer is
+            # not mandatory, but clang-tidy is
+            self._get_infer(force=force, skip_cache=skip_cache, verbose=verbose)
+        return rc
+
+    @StaticAnalysisSubCommand('static-analysis', 'clear-cache',
+                              'Delete local helpers and reset static analysis helper tool cache')
+    def clear_cache(self, verbose=False):
+        self._set_log_level(verbose)
+        rc = self._get_clang_tools(force=True, download_if_needed=True, skip_cache=True,
+                                   verbose=verbose)
+        if rc == 0:
+            self._get_infer(force=True, download_if_needed=True, skip_cache=True,
+                            verbose=verbose)
+        if rc != 0:
+            return rc
+        return self._artifact_manager.artifact_clear_cache()
+
+    @StaticAnalysisSubCommand('static-analysis', 'print-checks',
+                              'Print a list of the static analysis checks performed by default')
+    def print_checks(self, verbose=False):
+        self._set_log_level(verbose)
+        rc = self._get_clang_tools(verbose=verbose)
+        if rc == 0:
+            rc = self._get_infer(verbose=verbose)
+        if rc != 0:
+            return rc
+        args = [self._clang_tidy_path, '-list-checks', '-checks=%s' % self._get_checks()]
+        rc = self._run_command_in_objdir(args=args, pass_thru=True)
+        if rc != 0:
+            return rc
+        checkers, _ = self._get_infer_config()
+        print('Infer checks:')
+        for checker in checkers:
+            print(' '*4 + checker)
+        return 0
+
+    @Command('clang-format',  category='misc', description='Run clang-format on current changes')
+    @CommandArgument('--show', '-s', action='store_const', const='stdout', dest='output_path',
+                     help='Show diff output on stdout instead of applying changes')
+    @CommandArgument('--assume-filename', '-a', nargs=1, default=None,
+                     help='This option is usually used in the context of hg-formatsource.'
+                          'When reading from stdin, clang-format assumes this '
+                          'filename to look for a style config file (with '
+                          '-style=file) and to determine the language. When '
+                          'specifying this option only one file should be used '
+                          'as an input and the output will be forwarded to stdin. '
+                          'This option also impairs the download of the clang-tools '
+                          'and assumes the package is already located in it\'s default '
+                          'location')
+    @CommandArgument('--path', '-p', nargs='+', default=None,
+                     help='Specify the path(s) to reformat')
+    @CommandArgument('--commit', '-c', default=None,
+                     help='Specify a commit to reformat from.'
+                          'For git you can also pass a range of commits (foo..bar)'
+                          'to format all of them at the same time.')
+    @CommandArgument('--output', '-o', default=None, dest='output_path',
+                     help='Specify a file handle to write clang-format raw output instead of '
+                          'applying changes. This can be stdout or a file path.')
+    @CommandArgument('--format', '-f', choices=('diff', 'json'), default='diff',
+                     dest='output_format',
+                     help='Specify the output format used: diff is the raw patch provided by '
+                     'clang-format, json is a list of atomic changes to process.')
+    @CommandArgument('--outgoing', default=False, action='store_true',
+                     help='Run clang-format on outgoing files from mercurial repository')
+    def clang_format(self, assume_filename, path, commit, output_path=None, output_format='diff',
+                     verbose=False, outgoing=False):
+        # Run clang-format or clang-format-diff on the local changes
+        # or files/directories
+        if path is None and outgoing:
+            repo = get_repository_object(self.topsrcdir)
+            path = repo.get_outgoing_files()
+
+        if path:
+            # Create the full path list
+            def path_maker(f_name): return os.path.join(self.topsrcdir, f_name)
+            path = map(path_maker, path)
+
+        os.chdir(self.topsrcdir)
+
+        # Load output file handle, either stdout or a file handle in write mode
+        output = None
+        if output_path is not None:
+            output = sys.stdout if output_path == 'stdout' else open(output_path, 'w')
+
+        # With assume_filename we want to have stdout clean since the result of the
+        # format will be redirected to stdout. Only in case of errror we
+        # write something to stdout.
+        # We don't actually want to get the clang-tools here since we want in some
+        # scenarios to do this in parallel so we relay on the fact that the tools
+        # have already been downloaded via './mach bootstrap' or directly via
+        # './mach static-analysis install'
+        if assume_filename:
+            rc = self._set_clang_tools_paths()
+            if rc != 0:
+                print("clang-format: Unable to set path to clang-format tools.")
+                return rc
+
+            if not self._do_clang_tools_exist():
+                print("clang-format: Unable to set locate clang-format tools.")
+                return 1
+        else:
+            rc = self._get_clang_tools(verbose=verbose)
+            if rc != 0:
+                return rc
+
+        if self._is_version_eligible() is False:
+            self.log(logging.ERROR, 'static-analysis', {},
+                     "You're using an old version of clang-format binary."
+                     " Please update to a more recent one by running: './mach bootstrap'")
+            return 1
+
+        if path is None:
+            return self._run_clang_format_diff(self._clang_format_diff,
+                                               self._clang_format_path, commit, output)
+
+        if assume_filename:
+            return self._run_clang_format_in_console(self._clang_format_path,
+                                                     path, assume_filename)
+
+        return self._run_clang_format_path(self._clang_format_path, path, output, output_format)
+
+    def _verify_checker(self, item, checkers_results):
+        check = item['name']
+        test_file_path = mozpath.join(self._clang_tidy_base_path, "test", check)
+        test_file_path_cpp = test_file_path + '.cpp'
+        test_file_path_json = test_file_path + '.json'
+
+        self.log(logging.INFO, 'static-analysis', {},
+                 "RUNNING: clang-tidy checker {}.".format(check))
+
+        # Structured information in case a checker fails
+        checker_error = {
+            'checker-name': check,
+            'checker-error': '',
+            'info1': '',
+            'info2': '',
+            'info3': ''
+        }
+
+        # Verify if this checker actually exists
+        if check not in self._clang_tidy_checks:
+            checker_error['checker-error'] = self.TOOLS_CHECKER_NOT_FOUND
+            checkers_results.append(checker_error)
+            return self.TOOLS_CHECKER_NOT_FOUND
+
+        # Verify if the test file exists for this checker
+        if not os.path.exists(test_file_path_cpp):
+            checker_error['checker-error'] = self.TOOLS_CHECKER_NO_TEST_FILE
+            checkers_results.append(checker_error)
+            return self.TOOLS_CHECKER_NO_TEST_FILE
+
+        issues, clang_output = self._run_analysis(
+            checks='-*,' + check, header_filter='', sources=[test_file_path_cpp])
+        if issues is None:
+            return self.TOOLS_CHECKER_FAILED_FILE
+
+        # Verify to see if we got any issues, if not raise exception
+        if not issues:
+            checker_error['checker-error'] = self.TOOLS_CHECKER_RETURNED_NO_ISSUES
+            checker_error['info1'] = clang_output
+            checkers_results.append(checker_error)
+            return self.TOOLS_CHECKER_RETURNED_NO_ISSUES
+
+        # Also store the 'reliability' index for this checker
+        issues.append({'reliability': item['reliability']})
+
+        if self._dump_results:
+            self._build_autotest_result(test_file_path_json, json.dumps(issues))
+        else:
+            if not os.path.exists(test_file_path_json):
+                # Result file for test not found maybe regenerate it?
+                checker_error['checker-error'] = self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND
+                checkers_results.append(checker_error)
+                return self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND
+
+            # Read the pre-determined issues
+            baseline_issues = self._get_autotest_stored_issues(test_file_path_json)
+
+            # Compare the two lists
+            if issues != baseline_issues:
+                checker_error['checker-error'] = self.TOOLS_CHECKER_DIFF_FAILED
+                checker_error['info1'] = baseline_issues
+                checker_error['info2'] = issues
+                checker_error['info3'] = clang_output
+                checkers_results.append(checker_error)
+                return self.TOOLS_CHECKER_DIFF_FAILED
+
+        return self.TOOLS_SUCCESS
+
+    def _build_autotest_result(self, file, issues):
+        with open(file, 'w') as f:
+            f.write(issues)
+
+    def _get_autotest_stored_issues(self, file):
+        with open(file) as f:
+            return json.load(f)
+
+    def _parse_issues(self, clang_output):
+        '''
+        Parse clang-tidy output into structured issues
+        '''
+
+        # Limit clang output parsing to 'Enabled checks:'
+        end = re.search(r'^Enabled checks:\n', clang_output, re.MULTILINE)
+        if end is not None:
+            clang_output = clang_output[:end.start()-1]
+
+        platform, _ = self.platform
+        # Starting with clang 8, for the diagnostic messages we have multiple `LF CR`
+        # in order to be compatiable with msvc compiler format, and for this
+        # we are not interested to match the end of line.
+        regex_string = r'(.+):(\d+):(\d+): (warning|error): ([^\[\]\n]+)(?: \[([\.\w-]+)\])'
+
+        # For non 'win' based platforms we also need the 'end of the line' regex
+        if platform not in ('win64', 'win32'):
+            regex_string += '?$'
+
+        regex_header = re.compile(regex_string, re.MULTILINE)
+
+        # Sort headers by positions
+        headers = sorted(
+            regex_header.finditer(clang_output),
+            key=lambda h: h.start()
+        )
+        issues = []
+        for _, header in enumerate(headers):
+            header_group = header.groups()
+            element = [header_group[3], header_group[4], header_group[5]]
+            issues.append(element)
+        return issues
+
+    def _get_checks(self):
+        checks = '-*'
+        try:
+            config = self._clang_tidy_config
+            for item in config['clang_checkers']:
+                if item.get('publish', True):
+                    checks += ',' + item['name']
+        except Exception:
+            print('Looks like config.yaml is not valid, so we are unable to '
+                  'determine default checkers, using \'-checks=-*,mozilla-*\'')
+            checks += ',mozilla-*'
+        finally:
+            return checks
+
+    def _get_checks_config(self):
+        config_list = []
+        checker_config = {}
+        try:
+            config = self._clang_tidy_config
+            for checker in config['clang_checkers']:
+                if checker.get('publish', True) and 'config' in checker:
+                    for checker_option in checker['config']:
+                        # Verify if the format of the Option is correct,
+                        # possibilities are:
+                        # 1. CheckerName.Option
+                        # 2. Option -> that will become CheckerName.Option
+                        if not checker_option['key'].startswith(checker['name']):
+                            checker_option['key'] = "{}.{}".format(
+                                checker['name'], checker_option['key'])
+                    config_list += checker['config']
+            checker_config['CheckOptions'] = config_list
+        except Exception:
+            print('Looks like config.yaml is not valid, so we are unable to '
+                  'determine configuration for checkers, so using default')
+            checker_config = None
+        finally:
+            return checker_config
+
+    def _get_config_environment(self):
+        ran_configure = False
+        config = None
+        builder = Build(self._mach_context)
+
+        try:
+            config = self.config_environment
+        except Exception:
+            print('Looks like configure has not run yet, running it now...')
+            rc = builder.configure()
+            if rc != 0:
+                return (rc, config, ran_configure)
+            ran_configure = True
+            try:
+                config = self.config_environment
+            except Exception:
+                pass
+
+        return (0, config, ran_configure)
+
+    def _build_compile_db(self, verbose=False):
+        self._compile_db = mozpath.join(self.topobjdir, 'compile_commands.json')
+        if os.path.exists(self._compile_db):
+            return 0
+
+        rc, config, ran_configure = self._get_config_environment()
+        if rc != 0:
+            return rc
+
+        if ran_configure:
+            # Configure may have created the compilation database if the
+            # mozconfig enables building the CompileDB backend by default,
+            # So we recurse to see if the file exists once again.
+            return self._build_compile_db(verbose=verbose)
+
+        if config:
+            print('Looks like a clang compilation database has not been '
+                  'created yet, creating it now...')
+            builder = Build(self._mach_context)
+            rc = builder.build_backend(['CompileDB'], verbose=verbose)
+            if rc != 0:
+                return rc
+            assert os.path.exists(self._compile_db)
+            return 0
+
+    def _build_export(self, jobs, verbose=False):
+        def on_line(line):
+            self.log(logging.INFO, 'build_output', {'line': line}, '{line}')
+
+        builder = Build(self._mach_context)
+        # First install what we can through install manifests.
+        rc = builder._run_make(directory=self.topobjdir, target='pre-export',
+                               line_handler=None, silent=not verbose)
+        if rc != 0:
+            return rc
+
+        # Then build the rest of the build dependencies by running the full
+        # export target, because we can't do anything better.
+        return builder._run_make(directory=self.topobjdir, target='export',
+                                 line_handler=None, silent=not verbose,
+                                 num_jobs=jobs)
+
+    def _set_clang_tools_paths(self):
+        rc, config, _ = self._get_config_environment()
+
+        if rc != 0:
+            return rc
+
+        self._clang_tools_path = mozpath.join(self._mach_context.state_dir, "clang-tools")
+        self._clang_tidy_path = mozpath.join(self._clang_tools_path, "clang-tidy", "bin",
+                                             "clang-tidy" + config.substs.get('BIN_SUFFIX', ''))
+        self._clang_format_path = mozpath.join(
+            self._clang_tools_path, "clang-tidy", "bin",
+            "clang-format" + config.substs.get('BIN_SUFFIX', ''))
+        self._clang_apply_replacements = mozpath.join(
+            self._clang_tools_path, "clang-tidy", "bin",
+            "clang-apply-replacements" + config.substs.get('BIN_SUFFIX', ''))
+        self._run_clang_tidy_path = mozpath.join(self._clang_tools_path, "clang-tidy",
+                                                 "share", "clang", "run-clang-tidy.py")
+        self._clang_format_diff = mozpath.join(self._clang_tools_path, "clang-tidy",
+                                               "share", "clang", "clang-format-diff.py")
+        return 0
+
+    def _do_clang_tools_exist(self):
+        return os.path.exists(self._clang_tidy_path) and \
+               os.path.exists(self._clang_format_path) and \
+               os.path.exists(self._clang_apply_replacements) and \
+               os.path.exists(self._run_clang_tidy_path)
+
+    def _get_clang_tools(self, force=False, skip_cache=False,
+                         source=None, download_if_needed=True,
+                         verbose=False):
+
+        rc = self._set_clang_tools_paths()
+
+        if rc != 0:
+            return rc
+
+        if self._do_clang_tools_exist() and not force:
+            return 0
+
+        if os.path.isdir(self._clang_tools_path) and download_if_needed:
+            # The directory exists, perhaps it's corrupted?  Delete it
+            # and start from scratch.
+            shutil.rmtree(self._clang_tools_path)
+            return self._get_clang_tools(force=force, skip_cache=skip_cache,
+                                         source=source, verbose=verbose,
+                                         download_if_needed=download_if_needed)
+
+        # Create base directory where we store clang binary
+        os.mkdir(self._clang_tools_path)
+
+        if source:
+            return self._get_clang_tools_from_source(source)
+
+        self._artifact_manager = PackageFrontend(self._mach_context)
+
+        if not download_if_needed:
+            return 0
+
+        job, _ = self.platform
+
+        if job is None:
+            raise Exception('The current platform isn\'t supported. '
+                            'Currently only the following platforms are '
+                            'supported: win32/win64, linux64 and macosx64.')
+
+        job += '-clang-tidy'
+
+        # We want to unpack data in the clang-tidy mozbuild folder
+        currentWorkingDir = os.getcwd()
+        os.chdir(self._clang_tools_path)
+        rc = self._artifact_manager.artifact_toolchain(verbose=verbose,
+                                                       skip_cache=skip_cache,
+                                                       from_build=[job],
+                                                       no_unpack=False,
+                                                       retry=0)
+        # Change back the cwd
+        os.chdir(currentWorkingDir)
+
+        return rc
+
+    def _get_clang_tools_from_source(self, filename):
+        from mozbuild.action.tooltool import unpack_file
+        clang_tidy_path = mozpath.join(self._mach_context.state_dir,
+                                       "clang-tools")
+
+        currentWorkingDir = os.getcwd()
+        os.chdir(clang_tidy_path)
+
+        unpack_file(filename)
+
+        # Change back the cwd
+        os.chdir(currentWorkingDir)
+
+        clang_path = mozpath.join(clang_tidy_path, 'clang')
+
+        if not os.path.isdir(clang_path):
+            raise Exception('Extracted the archive but didn\'t find '
+                            'the expected output')
+
+        assert os.path.exists(self._clang_tidy_path)
+        assert os.path.exists(self._clang_format_path)
+        assert os.path.exists(self._clang_apply_replacements)
+        assert os.path.exists(self._run_clang_tidy_path)
+        return 0
+
+    def _get_clang_format_diff_command(self, commit):
+        if self.repository.name == 'hg':
+            args = ["hg", "diff", "-U0"]
+            if commit:
+                args += ["-c", commit]
+            else:
+                args += ["-r", ".^"]
+            for dot_extension in self._format_include_extensions:
+                args += ['--include', 'glob:**{0}'.format(dot_extension)]
+            args += ['--exclude', 'listfile:{0}'.format(self._format_ignore_file)]
+        else:
+            commit_range = "HEAD"  # All uncommitted changes.
+            if commit:
+                commit_range = commit if ".." in commit else "{}~..{}".format(commit, commit)
+            args = ["git", "diff", "--no-color", "-U0", commit_range, "--"]
+            for dot_extension in self._format_include_extensions:
+                args += ['*{0}'.format(dot_extension)]
+            # git-diff doesn't support an 'exclude-from-files' param, but
+            # allow to add individual exclude pattern since v1.9, see
+            # https://git-scm.com/docs/gitglossary#gitglossary-aiddefpathspecapathspec
+            with open(self._format_ignore_file, 'rb') as exclude_pattern_file:
+                for pattern in exclude_pattern_file.readlines():
+                    pattern = pattern.rstrip()
+                    pattern = pattern.replace('.*', '**')
+                    if not pattern or pattern.startswith('#'):
+                        continue  # empty or comment
+                    magics = ['exclude']
+                    if pattern.startswith('^'):
+                        magics += ['top']
+                        pattern = pattern[1:]
+                    args += [':({0}){1}'.format(','.join(magics), pattern)]
+        return args
+
+    def _get_infer(self, force=False, skip_cache=False, download_if_needed=True,
+                   verbose=False, intree_tool=False):
+        rc, config, _ = self._get_config_environment()
+        if rc != 0:
+            return rc
+        infer_path = self.topsrcdir if intree_tool else \
+            mozpath.join(self._mach_context.state_dir, 'infer')
+        self._infer_path = mozpath.join(infer_path, 'infer', 'bin', 'infer' +
+                                        config.substs.get('BIN_SUFFIX', ''))
+        if intree_tool:
+            return not os.path.exists(self._infer_path)
+        if os.path.exists(self._infer_path) and not force:
+            return 0
+
+        if os.path.isdir(infer_path) and download_if_needed:
+            # The directory exists, perhaps it's corrupted?  Delete it
+            # and start from scratch.
+            shutil.rmtree(infer_path)
+            return self._get_infer(force=force, skip_cache=skip_cache,
+                                   verbose=verbose,
+                                   download_if_needed=download_if_needed)
+        os.mkdir(infer_path)
+        self._artifact_manager = PackageFrontend(self._mach_context)
+        if not download_if_needed:
+            return 0
+        job, _ = self.platform
+        if job != 'linux64':
+            return -1
+        else:
+            job += '-infer'
+        # We want to unpack data in the infer mozbuild folder
+        currentWorkingDir = os.getcwd()
+        os.chdir(infer_path)
+        rc = self._artifact_manager.artifact_toolchain(verbose=verbose,
+                                                       skip_cache=skip_cache,
+                                                       from_build=[job],
+                                                       no_unpack=False,
+                                                       retry=0)
+        # Change back the cwd
+        os.chdir(currentWorkingDir)
+        return rc
+
+    def _run_clang_format_diff(self, clang_format_diff, clang_format, commit, output_file):
+        # Run clang-format on the diff
+        # Note that this will potentially miss a lot things
+        from subprocess import Popen, PIPE, check_output, CalledProcessError
+
+        diff_process = Popen(self._get_clang_format_diff_command(commit), stdout=PIPE)
+        args = [sys.executable, clang_format_diff, "-p1", "-binary=%s" % clang_format]
+
+        if not output_file:
+            args.append("-i")
+        try:
+            output = check_output(args, stdin=diff_process.stdout)
+            if output_file:
+                # We want to print the diffs
+                print(output, file=output_file)
+
+            return 0
+        except CalledProcessError as e:
+            # Something wrong happend
+            print("clang-format: An error occured while running clang-format-diff.")
+            return e.returncode
+
+    def _is_ignored_path(self, ignored_dir_re, f):
+        # Remove upto topsrcdir in pathname and match
+        if f.startswith(self.topsrcdir + '/'):
+            match_f = f[len(self.topsrcdir + '/'):]
+        else:
+            match_f = f
+        return re.match(ignored_dir_re, match_f)
+
+    def _generate_path_list(self, paths, verbose=True):
+        path_to_third_party = os.path.join(self.topsrcdir, self._format_ignore_file)
+        ignored_dir = []
+        with open(path_to_third_party, 'r') as fh:
+            for line in fh:
+                # Remove comments and empty lines
+                if line.startswith('#') or len(line.strip()) == 0:
+                    continue
+                # The regexp is to make sure we are managing relative paths
+                ignored_dir.append(r"^[\./]*" + line.rstrip())
+
+        # Generates the list of regexp
+        ignored_dir_re = '(%s)' % '|'.join(ignored_dir)
+        extensions = self._format_include_extensions
+
+        path_list = []
+        for f in paths:
+            if self._is_ignored_path(ignored_dir_re, f):
+                # Early exit if we have provided an ignored directory
+                if verbose:
+                    print("clang-format: Ignored third party code '{0}'".format(f))
+                continue
+
+            if os.path.isdir(f):
+                # Processing a directory, generate the file list
+                for folder, subs, files in os.walk(f):
+                    subs.sort()
+                    for filename in sorted(files):
+                        f_in_dir = os.path.join(folder, filename)
+                        if (f_in_dir.endswith(extensions)
+                            and not self._is_ignored_path(ignored_dir_re, f_in_dir)):
+                            # Supported extension and accepted path
+                            path_list.append(f_in_dir)
+            else:
+                # Make sure that the file exists and it has a supported extension
+                if os.path.isfile(f) and f.endswith(extensions):
+                    path_list.append(f)
+
+        return path_list
+
+    def _run_clang_format_in_console(self, clang_format, paths, assume_filename):
+        path_list = self._generate_path_list(assume_filename, False)
+
+        if path_list == []:
+            return 0
+
+        # We use -assume-filename in order to better determine the path for
+        # the .clang-format when it is ran outside of the repo, for example
+        # by the extension hg-formatsource
+        args = [clang_format, "-assume-filename={}".format(assume_filename[0])]
+
+        process = subprocess.Popen(args, stdin=subprocess.PIPE)
+        with open(paths[0], 'r') as fin:
+            process.stdin.write(fin.read())
+            process.stdin.close()
+            process.wait()
+            return process.returncode
+
+    def _run_clang_format_path(self, clang_format, paths, output_file, output_format):
+
+        # Run clang-format on files or directories directly
+        from subprocess import check_output, CalledProcessError
+
+        if output_format == 'json':
+            # Get replacements in xml, then process to json
+            args = [clang_format, '-output-replacements-xml']
+        else:
+            args = [clang_format, '-i']
+
+        if output_file:
+            # We just want to show the diff, we create the directory to copy it
+            tmpdir = os.path.join(self.topobjdir, 'tmp')
+            if not os.path.exists(tmpdir):
+                os.makedirs(tmpdir)
+
+        path_list = self._generate_path_list(paths)
+
+        if path_list == []:
+            return
+
+        print("Processing %d file(s)..." % len(path_list))
+
+        if output_file:
+            patches = {}
+            for i in range(0, len(path_list)):
+                l = path_list[i: (i + 1)]
+
+                # Copy the files into a temp directory
+                # and run clang-format on the temp directory
+                # and show the diff
+                original_path = l[0]
+                local_path = ntpath.basename(original_path)
+                target_file = os.path.join(tmpdir, local_path)
+                faketmpdir = os.path.dirname(target_file)
+                if not os.path.isdir(faketmpdir):
+                    os.makedirs(faketmpdir)
+                shutil.copy(l[0], faketmpdir)
+                l[0] = target_file
+
+                # Run clang-format on the list
+                try:
+                    output = check_output(args + l)
+                    if output and output_format == 'json':
+                        patches[original_path] = self._parse_xml_output(original_path, output)
+                except CalledProcessError as e:
+                    # Something wrong happend
+                    print("clang-format: An error occured while running clang-format.")
+                    return e.returncode
+
+                # show the diff
+                if output_format == 'diff':
+                    diff_command = ["diff", "-u", original_path, target_file]
+                    try:
+                        output = check_output(diff_command)
+                    except CalledProcessError as e:
+                        # diff -u returns 0 when no change
+                        # here, we expect changes. if we are here, this means that
+                        # there is a diff to show
+                        if e.output:
+                            # Replace the temp path by the path relative to the repository to
+                            # display a valid patch
+                            relative_path = os.path.relpath(original_path, self.topsrcdir)
+                            patch = e.output.replace(target_file, relative_path)
+                            patch = patch.replace(original_path, relative_path)
+                            patches[original_path] = patch
+
+            if output_format == 'json':
+                output = json.dumps(patches, indent=4)
+            else:
+                # Display all the patches at once
+                output = '\n'.join(patches.values())
+
+            # Output to specified file or stdout
+            print(output, file=output_file)
+
+            shutil.rmtree(tmpdir)
+            return 0
+
+        # Run clang-format in parallel trying to saturate all of the available cores.
+        import concurrent.futures
+        import multiprocessing
+        import math
+
+        max_workers = multiprocessing.cpu_count()
+
+        # To maximize CPU usage when there are few items to handle,
+        # underestimate the number of items per batch, then dispatch
+        # outstanding items across workers. Per definition, each worker will
+        # handle at most one outstanding item.
+        batch_size = int(math.floor(float(len(path_list)) / max_workers))
+        outstanding_items = len(path_list) - batch_size * max_workers
+
+        batches = []
+
+        i = 0
+        while i < len(path_list):
+            num_items = batch_size + (1 if outstanding_items > 0 else 0)
+            batches.append(args + path_list[i: (i + num_items)])
+
+            outstanding_items -= 1
+            i += num_items
+
+        error_code = None
+
+        with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+            futures = []
+            for batch in batches:
+                futures.append(executor.submit(run_one_clang_format_batch, batch))
+
+            for future in concurrent.futures.as_completed(futures):
+                # Wait for every task to finish
+                ret_val = future.result()
+                if ret_val is not None:
+                    error_code = ret_val
+
+            if error_code is not None:
+                return error_code
+        return 0
+
+    def _parse_xml_output(self, path, clang_output):
+        '''
+        Parse the clang-format XML output to convert it in a JSON compatible
+        list of patches, and calculates line level informations from the
+        character level provided changes.
+        '''
+        content = open(path, 'r').read().decode('utf-8')
+
+        def _nb_of_lines(start, end):
+            return len(content[start:end].splitlines())
+
+        def _build(replacement):
+            offset = int(replacement.attrib['offset'])
+            length = int(replacement.attrib['length'])
+            last_line = content.rfind('\n', 0, offset)
+            return {
+                'replacement': replacement.text,
+                'char_offset': offset,
+                'char_length': length,
+                'line': _nb_of_lines(0, offset),
+                'line_offset': last_line != -1 and (offset - last_line) or 0,
+                'lines_modified': _nb_of_lines(offset, offset + length),
+            }
+
+        return [
+            _build(replacement)
+            for replacement in ET.fromstring(clang_output).findall('replacement')
+        ]
--- a/python/mozbuild/mozbuild/mach_commands.py
+++ b/python/mozbuild/mozbuild/mach_commands.py
@@ -1,70 +1,42 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, # You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import argparse
-import hashlib
-import io
 import itertools
 import json
 import logging
-import ntpath
 import operator
 import os
 import re
-import shutil
 import subprocess
 import sys
-import tarfile
 import tempfile
-import xml.etree.ElementTree as ET
-import yaml
-
-from collections import OrderedDict
 
 import mozpack.path as mozpath
 
 from mach.decorators import (
     CommandArgument,
     CommandArgumentGroup,
     CommandProvider,
     Command,
     SettingsProvider,
     SubCommand,
 )
 
-from mach.main import Mach
-
-from mozbuild.artifact_builds import JOB_CHOICES
 from mozbuild.base import (
     BuildEnvironmentNotFoundException,
     MachCommandBase,
     MachCommandConditions as conditions,
     MozbuildObject,
 )
-from mozbuild.util import ensureParentDir
-
-from mozbuild.backend import (
-    backends,
-)
-
-from mozversioncontrol import get_repository_object
-
-BUILD_WHAT_HELP = '''
-What to build. Can be a top-level make target or a relative directory. If
-multiple options are provided, they will be built serially. Takes dependency
-information from `topsrcdir/build/dumbmake-dependencies` to build additional
-targets as needed. BUILDING ONLY PARTS OF THE TREE CAN RESULT IN BAD TREE
-STATE. USE AT YOUR OWN RISK.
-'''.strip()
-
 
 EXCESSIVE_SWAP_MESSAGE = '''
 ===================
 PERFORMANCE WARNING
 
 Your machine experienced a lot of swap activity during the build. This is
 possibly a sign that your machine doesn't have enough physical memory or
 not enough available memory to perform the build. It's also possible some
@@ -74,25 +46,16 @@ If you feel this message is not appropri
 please file a Firefox Build System :: General bug at
 https://bugzilla.mozilla.org/enter_bug.cgi?product=Firefox%20Build%20System&component=General
 and tell us about your machine and build configuration so we can adjust the
 warning heuristic.
 ===================
 '''
 
 
-# Function used to run clang-format on a batch of files. It is a helper function
-# in order to integrate into the futures ecosystem clang-format.
-def run_one_clang_format_batch(args):
-    try:
-        subprocess.check_output(args)
-    except subprocess.CalledProcessError as e:
-        return e
-
-
 class StoreDebugParamsAndWarnAction(argparse.Action):
     def __call__(self, parser, namespace, values, option_string=None):
         sys.stderr.write('The --debugparams argument is deprecated. Please ' +
                          'use --debugger-args instead.\n\n')
         setattr(namespace, self.dest, values)
 
 
 @CommandProvider
@@ -130,164 +93,16 @@ class Watch(MachCommandBase):
         try:
             return daemon.watch()
         except KeyboardInterrupt:
             # Suppress ugly stack trace when user hits Ctrl-C.
             sys.exit(3)
 
 
 @CommandProvider
-class Build(MachCommandBase):
-    """Interface to build the tree."""
-
-    @Command('build', category='build', description='Build the tree.')
-    @CommandArgument('--jobs', '-j', default='0', metavar='jobs', type=int,
-                     help='Number of concurrent jobs to run. Default is the number of CPUs.')
-    @CommandArgument('-C', '--directory', default=None,
-                     help='Change to a subdirectory of the build directory first.')
-    @CommandArgument('what', default=None, nargs='*', help=BUILD_WHAT_HELP)
-    @CommandArgument('-X', '--disable-extra-make-dependencies',
-                     default=False, action='store_true',
-                     help='Do not add extra make dependencies.')
-    @CommandArgument('-v', '--verbose', action='store_true',
-                     help='Verbose output for what commands the build is running.')
-    @CommandArgument('--keep-going', action='store_true',
-                     help='Keep building after an error has occurred')
-    def build(self, what=None, disable_extra_make_dependencies=None, jobs=0,
-              directory=None, verbose=False, keep_going=False):
-        """Build the source tree.
-
-        With no arguments, this will perform a full build.
-
-        Positional arguments define targets to build. These can be make targets
-        or patterns like "<dir>/<target>" to indicate a make target within a
-        directory.
-
-        There are a few special targets that can be used to perform a partial
-        build faster than what `mach build` would perform:
-
-        * binaries - compiles and links all C/C++ sources and produces shared
-          libraries and executables (binaries).
-
-        * faster - builds JavaScript, XUL, CSS, etc files.
-
-        "binaries" and "faster" almost fully complement each other. However,
-        there are build actions not captured by either. If things don't appear to
-        be rebuilding, perform a vanilla `mach build` to rebuild the world.
-        """
-        from mozbuild.controller.building import (
-            BuildDriver,
-        )
-
-        self.log_manager.enable_all_structured_loggers()
-
-        driver = self._spawn(BuildDriver)
-        return driver.build(
-            what=what,
-            disable_extra_make_dependencies=disable_extra_make_dependencies,
-            jobs=jobs,
-            directory=directory,
-            verbose=verbose,
-            keep_going=keep_going,
-            mach_context=self._mach_context)
-
-    @Command('configure', category='build',
-             description='Configure the tree (run configure and config.status).')
-    @CommandArgument('options', default=None, nargs=argparse.REMAINDER,
-                     help='Configure options')
-    def configure(self, options=None, buildstatus_messages=False, line_handler=None):
-        from mozbuild.controller.building import (
-            BuildDriver,
-        )
-
-        self.log_manager.enable_all_structured_loggers()
-        driver = self._spawn(BuildDriver)
-
-        return driver.configure(
-            options=options,
-            buildstatus_messages=buildstatus_messages,
-            line_handler=line_handler)
-
-    @Command('resource-usage', category='post-build',
-             description='Show information about system resource usage for a build.')
-    @CommandArgument('--address', default='localhost',
-                     help='Address the HTTP server should listen on.')
-    @CommandArgument('--port', type=int, default=0,
-                     help='Port number the HTTP server should listen on.')
-    @CommandArgument('--browser', default='firefox',
-                     help='Web browser to automatically open. See webbrowser Python module.')
-    @CommandArgument('--url',
-                     help='URL of JSON document to display')
-    def resource_usage(self, address=None, port=None, browser=None, url=None):
-        import webbrowser
-        from mozbuild.html_build_viewer import BuildViewerServer
-
-        server = BuildViewerServer(address, port)
-
-        if url:
-            server.add_resource_json_url('url', url)
-        else:
-            last = self._get_state_filename('build_resources.json')
-            if not os.path.exists(last):
-                print('Build resources not available. If you have performed a '
-                      'build and receive this message, the psutil Python package '
-                      'likely failed to initialize properly.')
-                return 1
-
-            server.add_resource_json_file('last', last)
-        try:
-            webbrowser.get(browser).open_new_tab(server.url)
-        except Exception:
-            print('Cannot get browser specified, trying the default instead.')
-            try:
-                browser = webbrowser.get().open_new_tab(server.url)
-            except Exception:
-                print('Please open %s in a browser.' % server.url)
-
-        print('Hit CTRL+c to stop server.')
-        server.run()
-
-    @Command('build-backend', category='build',
-             description='Generate a backend used to build the tree.')
-    @CommandArgument('-d', '--diff', action='store_true',
-                     help='Show a diff of changes.')
-    # It would be nice to filter the choices below based on
-    # conditions, but that is for another day.
-    @CommandArgument('-b', '--backend', nargs='+', choices=sorted(backends),
-                     help='Which backend to build.')
-    @CommandArgument('-v', '--verbose', action='store_true',
-                     help='Verbose output.')
-    @CommandArgument('-n', '--dry-run', action='store_true',
-                     help='Do everything except writing files out.')
-    def build_backend(self, backend, diff=False, verbose=False, dry_run=False):
-        python = self.virtualenv_manager.python_path
-        config_status = os.path.join(self.topobjdir, 'config.status')
-
-        if not os.path.exists(config_status):
-            print('config.status not found.  Please run |mach configure| '
-                  'or |mach build| prior to building the %s build backend.'
-                  % backend)
-            return 1
-
-        args = [python, config_status]
-        if backend:
-            args.append('--backend')
-            args.extend(backend)
-        if diff:
-            args.append('--diff')
-        if verbose:
-            args.append('--verbose')
-        if dry_run:
-            args.append('--dry-run')
-
-        return self._run_command_in_objdir(args=args, pass_thru=True,
-                                           ensure_exit_code=False)
-
-
-@CommandProvider
 class CargoProvider(MachCommandBase):
     """Invoke cargo in useful ways."""
 
     @Command('cargo', category='build',
              description='Invoke cargo in useful ways.')
     def cargo(self):
         self.parser.print_usage()
         return 1
@@ -825,69 +640,16 @@ class GTestCommands(MachCommandBase):
                 return 1
 
         # Prepend the debugger args.
         args = [debuggerInfo.path] + debuggerInfo.args + args
         return args
 
 
 @CommandProvider
-class ClangCommands(MachCommandBase):
-    @Command('clang-complete', category='devenv',
-             description='Generate a .clang_complete file.')
-    def clang_complete(self):
-        import shlex
-
-        build_vars = {}
-
-        def on_line(line):
-            elements = [s.strip() for s in line.split('=', 1)]
-
-            if len(elements) != 2:
-                return
-
-            build_vars[elements[0]] = elements[1]
-
-        try:
-            old_logger = self.log_manager.replace_terminal_handler(None)
-            self._run_make(target='showbuild', log=False, line_handler=on_line)
-        finally:
-            self.log_manager.replace_terminal_handler(old_logger)
-
-        def print_from_variable(name):
-            if name not in build_vars:
-                return
-
-            value = build_vars[name]
-
-            value = value.replace('-I.', '-I%s' % self.topobjdir)
-            value = value.replace(' .', ' %s' % self.topobjdir)
-            value = value.replace('-I..', '-I%s/..' % self.topobjdir)
-            value = value.replace(' ..', ' %s/..' % self.topobjdir)
-
-            args = shlex.split(value)
-            for i in range(0, len(args) - 1):
-                arg = args[i]
-
-                if arg.startswith(('-I', '-D')):
-                    print(arg)
-                    continue
-
-                if arg.startswith('-include'):
-                    print(arg + ' ' + args[i + 1])
-                    continue
-
-        print_from_variable('COMPILE_CXXFLAGS')
-
-        print('-I%s/ipc/chromium/src' % self.topsrcdir)
-        print('-I%s/ipc/glue' % self.topsrcdir)
-        print('-I%s/ipc/ipdl/_ipdlheaders' % self.topobjdir)
-
-
-@CommandProvider
 class Package(MachCommandBase):
     """Package the built product for distribution."""
 
     @Command('package', category='post-build',
              description='Package the built product for distribution as an APK, DMG, etc.')
     @CommandArgument('-v', '--verbose', action='store_true',
                      help='Verbose output for what commands the packaging process is running.')
     def package(self, verbose=False):
@@ -1288,2431 +1050,16 @@ class MachDebug(MachCommandBase):
                         result['defines'] = obj.defines
                     return result
                 elif isinstance(obj, set):
                     return list(obj)
                 return json.JSONEncoder.default(self, obj)
         json.dump(self, cls=EnvironmentEncoder, sort_keys=True, fp=out)
 
 
-class ArtifactSubCommand(SubCommand):
-    def __call__(self, func):
-        after = SubCommand.__call__(self, func)
-        args = [
-            CommandArgument('--tree', metavar='TREE', type=str,
-                            help='Firefox tree.'),
-            CommandArgument('--job', metavar='JOB', choices=JOB_CHOICES,
-                            help='Build job.'),
-            CommandArgument('--verbose', '-v', action='store_true',
-                            help='Print verbose output.'),
-        ]
-        for arg in args:
-            after = arg(after)
-        return after
-
-
-class SymbolsAction(argparse.Action):
-    def __call__(self, parser, namespace, values, option_string=None):
-        # If this function is called, it means the --symbols option was given,
-        # so we want to store the value `True` if no explicit value was given
-        # to the option.
-        setattr(namespace, self.dest, values or True)
-
-
-@CommandProvider
-class PackageFrontend(MachCommandBase):
-    """Fetch and install binary artifacts from Mozilla automation."""
-
-    @Command('artifact', category='post-build',
-             description='Use pre-built artifacts to build Firefox.')
-    def artifact(self):
-        '''Download, cache, and install pre-built binary artifacts to build Firefox.
-
-        Use ``|mach build|`` as normal to freshen your installed binary libraries:
-        artifact builds automatically download, cache, and install binary
-        artifacts from Mozilla automation, replacing whatever may be in your
-        object directory.  Use ``|mach artifact last|`` to see what binary artifacts
-        were last used.
-
-        Never build libxul again!
-
-        '''
-        pass
-
-    def _make_artifacts(self, tree=None, job=None, skip_cache=False,
-                        download_tests=True, download_symbols=False,
-                        download_host_bins=False,
-                        download_maven_zip=False,
-                        no_process=False):
-        state_dir = self._mach_context.state_dir
-        cache_dir = os.path.join(state_dir, 'package-frontend')
-
-        hg = None
-        if conditions.is_hg(self):
-            hg = self.substs['HG']
-
-        git = None
-        if conditions.is_git(self):
-            git = self.substs['GIT']
-
-        # If we're building Thunderbird, we should be checking for comm-central artifacts.
-        topsrcdir = self.substs.get('commtopsrcdir', self.topsrcdir)
-
-        if download_maven_zip:
-            if download_tests:
-                raise ValueError('--maven-zip requires --no-tests')
-            if download_symbols:
-                raise ValueError('--maven-zip requires no --symbols')
-            if download_host_bins:
-                raise ValueError('--maven-zip requires no --host-bins')
-            if not no_process:
-                raise ValueError('--maven-zip requires --no-process')
-
-        from mozbuild.artifacts import Artifacts
-        artifacts = Artifacts(tree, self.substs, self.defines, job,
-                              log=self.log, cache_dir=cache_dir,
-                              skip_cache=skip_cache, hg=hg, git=git,
-                              topsrcdir=topsrcdir,
-                              download_tests=download_tests,
-                              download_symbols=download_symbols,
-                              download_host_bins=download_host_bins,
-                              download_maven_zip=download_maven_zip,
-                              no_process=no_process)
-        return artifacts
-
-    @ArtifactSubCommand('artifact', 'install',
-                        'Install a good pre-built artifact.')
-    @CommandArgument('source', metavar='SRC', nargs='?', type=str,
-                     help='Where to fetch and install artifacts from.  Can be omitted, in '
-                     'which case the current hg repository is inspected; an hg revision; '
-                     'a remote URL; or a local file.',
-                     default=None)
-    @CommandArgument('--skip-cache', action='store_true',
-                     help='Skip all local caches to force re-fetching remote artifacts.',
-                     default=False)
-    @CommandArgument('--no-tests', action='store_true', help="Don't install tests.")
-    @CommandArgument('--symbols', nargs='?', action=SymbolsAction, help='Download symbols.')
-    @CommandArgument('--host-bins', action='store_true', help='Download host binaries.')
-    @CommandArgument('--distdir', help='Where to install artifacts to.')
-    @CommandArgument('--no-process', action='store_true',
-                     help="Don't process (unpack) artifact packages, just download them.")
-    @CommandArgument('--maven-zip', action='store_true', help="Download Maven zip (Android-only).")
-    def artifact_install(self, source=None, skip_cache=False, tree=None, job=None, verbose=False,
-                         no_tests=False, symbols=False, host_bins=False, distdir=None,
-                         no_process=False, maven_zip=False):
-        self._set_log_level(verbose)
-        artifacts = self._make_artifacts(tree=tree, job=job, skip_cache=skip_cache,
-                                         download_tests=not no_tests,
-                                         download_symbols=symbols,
-                                         download_host_bins=host_bins,
-                                         download_maven_zip=maven_zip,
-                                         no_process=no_process)
-
-        return artifacts.install_from(source, distdir or self.distdir)
-
-    @ArtifactSubCommand('artifact', 'clear-cache',
-                        'Delete local artifacts and reset local artifact cache.')
-    def artifact_clear_cache(self, tree=None, job=None, verbose=False):
-        self._set_log_level(verbose)
-        artifacts = self._make_artifacts(tree=tree, job=job)
-        artifacts.clear_cache()
-        return 0
-
-    @SubCommand('artifact', 'toolchain')
-    @CommandArgument('--verbose', '-v', action='store_true',
-                     help='Print verbose output.')
-    @CommandArgument('--cache-dir', metavar='DIR',
-                     help='Directory where to store the artifacts cache')
-    @CommandArgument('--skip-cache', action='store_true',
-                     help='Skip all local caches to force re-fetching remote artifacts.',
-                     default=False)
-    @CommandArgument('--from-build', metavar='BUILD', nargs='+',
-                     help='Download toolchains resulting from the given build(s); '
-                     'BUILD is a name of a toolchain task, e.g. linux64-clang')
-    @CommandArgument('--tooltool-manifest', metavar='MANIFEST',
-                     help='Explicit tooltool manifest to process')
-    @CommandArgument('--authentication-file', metavar='FILE',
-                     help='Use the RelengAPI token found in the given file to authenticate')
-    @CommandArgument('--tooltool-url', metavar='URL',
-                     help='Use the given url as tooltool server')
-    @CommandArgument('--no-unpack', action='store_true',
-                     help='Do not unpack any downloaded file')
-    @CommandArgument('--retry', type=int, default=4,
-                     help='Number of times to retry failed downloads')
-    @CommandArgument('--artifact-manifest', metavar='FILE',
-                     help='Store a manifest about the downloaded taskcluster artifacts')
-    @CommandArgument('files', nargs='*',
-                     help='A list of files to download, in the form path@task-id, in '
-                     'addition to the files listed in the tooltool manifest.')
-    def artifact_toolchain(self, verbose=False, cache_dir=None,
-                           skip_cache=False, from_build=(),
-                           tooltool_manifest=None, authentication_file=None,
-                           tooltool_url=None, no_unpack=False, retry=None,
-                           artifact_manifest=None, files=()):
-        '''Download, cache and install pre-built toolchains.
-        '''
-        from mozbuild.artifacts import ArtifactCache
-        from mozbuild.action.tooltool import (
-            FileRecord,
-            open_manifest,
-            unpack_file,
-        )
-        from requests.adapters import HTTPAdapter
-        import redo
-        import requests
-
-        from taskgraph.util.taskcluster import (
-            get_artifact_url,
-        )
-
-        self._set_log_level(verbose)
-        # Normally, we'd use self.log_manager.enable_unstructured(),
-        # but that enables all logging, while we only really want tooltool's
-        # and it also makes structured log output twice.
-        # So we manually do what it does, and limit that to the tooltool
-        # logger.
-        if self.log_manager.terminal_handler:
-            logging.getLogger('mozbuild.action.tooltool').addHandler(
-                self.log_manager.terminal_handler)
-            logging.getLogger('redo').addHandler(
-                self.log_manager.terminal_handler)
-            self.log_manager.terminal_handler.addFilter(
-                self.log_manager.structured_filter)
-        if not cache_dir:
-            cache_dir = os.path.join(self._mach_context.state_dir, 'toolchains')
-
-        tooltool_url = (tooltool_url or
-                        'https://tooltool.mozilla-releng.net').rstrip('/')
-
-        cache = ArtifactCache(cache_dir=cache_dir, log=self.log,
-                              skip_cache=skip_cache)
-
-        if authentication_file:
-            with open(authentication_file, 'rb') as f:
-                token = f.read().strip()
-
-            class TooltoolAuthenticator(HTTPAdapter):
-                def send(self, request, *args, **kwargs):
-                    request.headers['Authorization'] = \
-                        'Bearer {}'.format(token)
-                    return super(TooltoolAuthenticator, self).send(
-                        request, *args, **kwargs)
-
-            cache._download_manager.session.mount(
-                tooltool_url, TooltoolAuthenticator())
-
-        class DownloadRecord(FileRecord):
-            def __init__(self, url, *args, **kwargs):
-                super(DownloadRecord, self).__init__(*args, **kwargs)
-                self.url = url
-                self.basename = self.filename
-
-            def fetch_with(self, cache):
-                self.filename = cache.fetch(self.url)
-                return self.filename
-
-            def validate(self):
-                if self.size is None and self.digest is None:
-                    return True
-                return super(DownloadRecord, self).validate()
-
-        class ArtifactRecord(DownloadRecord):
-            def __init__(self, task_id, artifact_name):
-                for _ in redo.retrier(attempts=retry+1, sleeptime=60):
-                    cot = cache._download_manager.session.get(
-                        get_artifact_url(task_id, 'public/chain-of-trust.json'))
-                    if cot.status_code >= 500:
-                        continue
-                    cot.raise_for_status()
-                    break
-                else:
-                    cot.raise_for_status()
-
-                digest = algorithm = None
-                data = json.loads(cot.content)
-                for algorithm, digest in (data.get('artifacts', {})
-                                              .get(artifact_name, {}).items()):
-                    pass
-
-                name = os.path.basename(artifact_name)
-                artifact_url = get_artifact_url(task_id, artifact_name,
-                                                use_proxy=not artifact_name.startswith('public/'))
-                super(ArtifactRecord, self).__init__(
-                    artifact_url, name,
-                    None, digest, algorithm, unpack=True)
-
-        records = OrderedDict()
-        downloaded = []
-
-        if tooltool_manifest:
-            manifest = open_manifest(tooltool_manifest)
-            for record in manifest.file_records:
-                url = '{}/{}/{}'.format(tooltool_url, record.algorithm,
-                                        record.digest)
-                records[record.filename] = DownloadRecord(
-                    url, record.filename, record.size, record.digest,
-                    record.algorithm, unpack=record.unpack,
-                    version=record.version, visibility=record.visibility)
-
-        if from_build:
-            if 'MOZ_AUTOMATION' in os.environ:
-                self.log(logging.ERROR, 'artifact', {},
-                         'Do not use --from-build in automation; all dependencies '
-                         'should be determined in the decision task.')
-                return 1
-            from taskgraph.optimize import IndexSearch
-            from taskgraph.parameters import Parameters
-            from taskgraph.generator import load_tasks_for_kind
-            params = Parameters(
-                level=os.environ.get('MOZ_SCM_LEVEL', '3'),
-                strict=False,
-            )
-
-            root_dir = mozpath.join(self.topsrcdir, 'taskcluster/ci')
-            toolchains = load_tasks_for_kind(params, 'toolchain', root_dir=root_dir)
-
-            aliases = {}
-            for t in toolchains.values():
-                alias = t.attributes.get('toolchain-alias')
-                if alias:
-                    aliases['toolchain-{}'.format(alias)] = \
-                        t.task['metadata']['name']
-
-            for b in from_build:
-                user_value = b
-
-                if not b.startswith('toolchain-'):
-                    b = 'toolchain-{}'.format(b)
-
-                task = toolchains.get(aliases.get(b, b))
-                if not task:
-                    self.log(logging.ERROR, 'artifact', {'build': user_value},
-                             'Could not find a toolchain build named `{build}`')
-                    return 1
-
-                task_id = IndexSearch().should_replace_task(
-                    task, {}, task.optimization.get('index-search', []))
-                artifact_name = task.attributes.get('toolchain-artifact')
-                if task_id in (True, False) or not artifact_name:
-                    self.log(logging.ERROR, 'artifact', {'build': user_value},
-                             'Could not find artifacts for a toolchain build '
-                             'named `{build}`. Local commits and other changes '
-                             'in your checkout may cause this error. Try '
-                             'updating to a fresh checkout of mozilla-central '
-                             'to use artifact builds.')
-                    return 1
-
-                record = ArtifactRecord(task_id, artifact_name)
-                records[record.filename] = record
-
-        # Handle the list of files of the form path@task-id on the command
-        # line. Each of those give a path to an artifact to download.
-        for f in files:
-            if '@' not in f:
-                self.log(logging.ERROR, 'artifact', {},
-                         'Expected a list of files of the form path@task-id')
-                return 1
-            name, task_id = f.rsplit('@', 1)
-            record = ArtifactRecord(task_id, name)
-            records[record.filename] = record
-
-        for record in records.itervalues():
-            self.log(logging.INFO, 'artifact', {'name': record.basename},
-                     'Downloading {name}')
-            valid = False
-            # sleeptime is 60 per retry.py, used by tooltool_wrapper.sh
-            for attempt, _ in enumerate(redo.retrier(attempts=retry+1,
-                                                     sleeptime=60)):
-                try:
-                    record.fetch_with(cache)
-                except (requests.exceptions.HTTPError,
-                        requests.exceptions.ChunkedEncodingError,
-                        requests.exceptions.ConnectionError) as e:
-
-                    if isinstance(e, requests.exceptions.HTTPError):
-                        # The relengapi proxy likes to return error 400 bad request
-                        # which seems improbably to be due to our (simple) GET
-                        # being borked.
-                        status = e.response.status_code
-                        should_retry = status >= 500 or status == 400
-                    else:
-                        should_retry = True
-
-                    if should_retry or attempt < retry:
-                        level = logging.WARN
-                    else:
-                        level = logging.ERROR
-                    # e.message is not always a string, so convert it first.
-                    self.log(level, 'artifact', {}, str(e.message))
-                    if not should_retry:
-                        break
-                    if attempt < retry:
-                        self.log(logging.INFO, 'artifact', {},
-                                 'Will retry in a moment...')
-                    continue
-                try:
-                    valid = record.validate()
-                except Exception:
-                    pass
-                if not valid:
-                    os.unlink(record.filename)
-                    if attempt < retry:
-                        self.log(logging.INFO, 'artifact', {},
-                                 'Corrupt download. Will retry in a moment...')
-                    continue
-
-                downloaded.append(record)
-                break
-
-            if not valid:
-                self.log(logging.ERROR, 'artifact', {'name': record.basename},
-                         'Failed to download {name}')
-                return 1
-
-        artifacts = {} if artifact_manifest else None
-
-        for record in downloaded:
-            local = os.path.join(os.getcwd(), record.basename)
-            if os.path.exists(local):
-                os.unlink(local)
-            # unpack_file needs the file with its final name to work
-            # (https://github.com/mozilla/build-tooltool/issues/38), so we
-            # need to copy it, even though we remove it later. Use hard links
-            # when possible.
-            try:
-                os.link(record.filename, local)
-            except Exception:
-                shutil.copy(record.filename, local)
-            # Keep a sha256 of each downloaded file, for the chain-of-trust
-            # validation.
-            if artifact_manifest is not None:
-                with open(local) as fh:
-                    h = hashlib.sha256()
-                    while True:
-                        data = fh.read(1024 * 1024)
-                        if not data:
-                            break
-                        h.update(data)
-                artifacts[record.url] = {
-                    'sha256': h.hexdigest(),
-                }
-            if record.unpack and not no_unpack:
-                unpack_file(local)
-                os.unlink(local)
-
-        if not downloaded:
-            self.log(logging.ERROR, 'artifact', {}, 'Nothing to download')
-            if files:
-                return 1
-
-        if artifacts:
-            ensureParentDir(artifact_manifest)
-            with open(artifact_manifest, 'w') as fh:
-                json.dump(artifacts, fh, indent=4, sort_keys=True)
-
-        return 0
-
-
-class StaticAnalysisSubCommand(SubCommand):
-    def __call__(self, func):
-        after = SubCommand.__call__(self, func)
-        args = [
-            CommandArgument('--verbose', '-v', action='store_true',
-                            help='Print verbose output.'),
-        ]
-        for arg in args:
-            after = arg(after)
-        return after
-
-
-class StaticAnalysisMonitor(object):
-    def __init__(self, srcdir, objdir, clang_tidy_config, total):
-        self._total = total
-        self._processed = 0
-        self._current = None
-        self._srcdir = srcdir
-
-        self._clang_tidy_config = clang_tidy_config['clang_checkers']
-        # Transform the configuration to support Regex
-        for item in self._clang_tidy_config:
-            if item['name'] == '-*':
-                continue
-            item['name'].replace('*', '.*')
-
-        from mozbuild.compilation.warnings import (
-            WarningsCollector,
-            WarningsDatabase,
-        )
-
-        self._warnings_database = WarningsDatabase()
-
-        def on_warning(warning):
-            self._warnings_database.insert(warning)
-
-        self._warnings_collector = WarningsCollector(on_warning, objdir=objdir)
-
-    @property
-    def num_files(self):
-        return self._total
-
-    @property
-    def num_files_processed(self):
-        return self._processed
-
-    @property
-    def current_file(self):
-        return self._current
-
-    @property
-    def warnings_db(self):
-        return self._warnings_database
-
-    def on_line(self, line):
-        warning = None
-
-        try:
-            warning = self._warnings_collector.process_line(line)
-        except Exception:
-            pass
-
-        if line.find('clang-tidy') != -1:
-            filename = line.split(' ')[-1]
-            if os.path.isfile(filename):
-                self._current = os.path.relpath(filename, self._srcdir)
-            else:
-                self._current = None
-            self._processed = self._processed + 1
-            return (warning, False)
-        if warning is not None:
-            def get_reliability(checker_name):
-                # get the matcher from self._clang_tidy_config that is the 'name' field
-                reliability = None
-                for item in self._clang_tidy_config:
-                    if item['name'] == checker_name:
-                        reliability = item.get('reliability', 'low')
-                        break
-                    else:
-                        # We are using a regex in order to also match 'mozilla-.* like checkers'
-                        matcher = re.match(item['name'], checker_name)
-                        if matcher is not None and matcher.group(0) == checker_name:
-                            reliability = item.get('reliability', 'low')
-                            break
-                return reliability
-            reliability = get_reliability(warning['flag'])
-            if reliability is not None:
-                warning['reliability'] = reliability
-        return (warning, True)
-
-
-@CommandProvider
-class StaticAnalysis(MachCommandBase):
-    """Utilities for running C++ static analysis checks and format."""
-
-    # List of file extension to consider (should start with dot)
-    _format_include_extensions = ('.cpp', '.c', '.cc', '.h', '.m', '.mm')
-    # File contaning all paths to exclude from formatting
-    _format_ignore_file = '.clang-format-ignore'
-
-    _clang_tidy_config = None
-    _cov_config = None
-
-    @Command('static-analysis', category='testing',
-             description='Run C++ static analysis checks')
-    def static_analysis(self):
-        # If not arguments are provided, just print a help message.
-        mach = Mach(os.getcwd())
-        mach.run(['static-analysis', '--help'])
-
-    @StaticAnalysisSubCommand('static-analysis', 'check',
-                              'Run the checks using the helper tool')
-    @CommandArgument('source', nargs='*', default=['.*'],
-                     help='Source files to be analyzed (regex on path). '
-                          'Can be omitted, in which case the entire code base '
-                          'is analyzed.  The source argument is ignored if '
-                          'there is anything fed through stdin, in which case '
-                          'the analysis is only performed on the files changed '
-                          'in the patch streamed through stdin.  This is called '
-                          'the diff mode.')
-    @CommandArgument('--checks', '-c', default='-*', metavar='checks',
-                     help='Static analysis checks to enable.  By default, this enables only '
-                     'checks that are published here: https://mzl.la/2DRHeTh, but can be any '
-                     'clang-tidy checks syntax.')
-    @CommandArgument('--jobs', '-j', default='0', metavar='jobs', type=int,
-                     help='Number of concurrent jobs to run. Default is the number of CPUs.')
-    @CommandArgument('--strip', '-p', default='1', metavar='NUM',
-                     help='Strip NUM leading components from file names in diff mode.')
-    @CommandArgument('--fix', '-f', default=False, action='store_true',
-                     help='Try to autofix errors detected by clang-tidy checkers.')
-    @CommandArgument('--header-filter', '-h-f', default='', metavar='header_filter',
-                     help='Regular expression matching the names of the headers to '
-                          'output diagnostics from. Diagnostics from the main file '
-                          'of each translation unit are always displayed')
-    @CommandArgument('--output', '-o', default=None,
-                     help='Write clang-tidy output in a file')
-    @CommandArgument('--format', default='text', choices=('text', 'json'),
-                     help='Output format to write in a file')
-    @CommandArgument('--outgoing', default=False, action='store_true',
-                     help='Run static analysis checks on outgoing files from mercurial repository')
-    def check(self, source=None, jobs=2, strip=1, verbose=False, checks='-*',
-              fix=False, header_filter='', output=None, format='text', outgoing=False):
-        from mozbuild.controller.building import (
-            StaticAnalysisFooter,
-            StaticAnalysisOutputManager,
-        )
-
-        self._set_log_level(verbose)
-        self.log_manager.enable_all_structured_loggers()
-
-        rc = self._get_clang_tools(verbose=verbose)
-        if rc != 0:
-            return rc
-
-        if self._is_version_eligible() is False:
-            self.log(logging.ERROR, 'static-analysis', {},
-                     "You're using an old version of clang-format binary."
-                     " Please update to a more recent one by running: './mach bootstrap'")
-            return 1
-
-        rc = self._build_compile_db(verbose=verbose)
-        rc = rc or self._build_export(jobs=jobs, verbose=verbose)
-        if rc != 0:
-            return rc
-
-        # Use outgoing files instead of source files
-        if outgoing:
-            repo = get_repository_object(self.topsrcdir)
-            files = repo.get_outgoing_files()
-            source = map(os.path.abspath, files)
-
-        # Split in several chunks to avoid hitting Python's limit of 100 groups in re
-        compile_db = json.loads(open(self._compile_db, 'r').read())
-        total = 0
-        import re
-        chunk_size = 50
-        for offset in range(0, len(source), chunk_size):
-            source_chunks = source[offset:offset + chunk_size]
-            name_re = re.compile('(' + ')|('.join(source_chunks) + ')')
-            for f in compile_db:
-                if name_re.search(f['file']):
-                    total = total + 1
-
-        if not total:
-            self.log(logging.INFO, 'static-analysis', {},
-                     "There are no files eligible for analysis. Please note that 'header' files "
-                     "cannot be used for analysis since they do not consist compilation units.")
-            return 0
-
-        cwd = self.topobjdir
-        self._compilation_commands_path = self.topobjdir
-        if self._clang_tidy_config is None:
-            self._clang_tidy_config = self._get_clang_tidy_config()
-        args = self._get_clang_tidy_command(
-            checks=checks, header_filter=header_filter, sources=source, jobs=jobs, fix=fix)
-
-        monitor = StaticAnalysisMonitor(
-            self.topsrcdir, self.topobjdir, self._clang_tidy_config, total)
-
-        footer = StaticAnalysisFooter(self.log_manager.terminal, monitor)
-        with StaticAnalysisOutputManager(self.log_manager, monitor, footer) as output_manager:
-            rc = self.run_process(args=args, ensure_exit_code=False,
-                                  line_handler=output_manager.on_line, cwd=cwd)
-
-            self.log(logging.WARNING, 'warning_summary',
-                     {'count': len(monitor.warnings_db)},
-                     '{count} warnings present.')
-
-            # Write output file
-            if output is not None:
-                output_manager.write(output, format)
-
-        if rc != 0:
-            return rc
-        # if we are building firefox for android it might be nice to
-        # also analyze the java code base
-        if self.substs['MOZ_BUILD_APP'] == 'mobile/android':
-            rc = self.check_java(source, jobs, strip, verbose, skip_export=True)
-        return rc
-
-    @StaticAnalysisSubCommand('static-analysis', 'check-coverity',
-                              'Run coverity static-analysis tool on the given files. '
-                              'Can only be run by automation! '
-                              'It\'s result is stored as an json file on the artifacts server.')
-    @CommandArgument('source', nargs='*', default=[],
-                     help='Source files to be analyzed by Coverity Static Analysis Tool. '
-                          'This is ran only in automation.')
-    @CommandArgument('--output', '-o', default=None,
-                     help='Write coverity output translated to json output in a file')
-    @CommandArgument('--coverity_output_path', '-co', default=None,
-                     help='Path where to write coverity results as cov-results.json. '
-                     'If no path is specified the default path from the coverity working '
-                     'directory, ~./mozbuild/coverity is used.')
-    @CommandArgument('--outgoing', default=False, action='store_true',
-                     help='Run coverity on outgoing files from mercurial or git repository')
-    def check_coverity(self, source=[], output=None, coverity_output_path=None,
-                       outgoing=False, verbose=False):
-        self._set_log_level(verbose)
-        self.log_manager.enable_all_structured_loggers()
-
-        if 'MOZ_AUTOMATION' not in os.environ:
-            self.log(logging.INFO, 'static-analysis', {},
-                     'Coverity based static-analysis cannot be ran outside automation.')
-            return
-
-        # Use outgoing files instead of source files
-        if outgoing:
-            repo = get_repository_object(self.topsrcdir)
-            files = repo.get_outgoing_files()
-            source = map(os.path.abspath, files)
-
-        if len(source) == 0:
-            self.log(logging.ERROR, 'static-analysis', {},
-                     'There are no files that coverity can use to scan.')
-            return 0
-
-        rc = self._build_compile_db(verbose=verbose)
-        rc = rc or self._build_export(jobs=2, verbose=verbose)
-
-        if rc != 0:
-            return rc
-
-        commands_list = self.get_files_with_commands(source)
-        if len(commands_list) == 0:
-            self.log(logging.INFO, 'static-analysis', {},
-                     'There are no files that need to be analyzed.')
-            return 0
-
-        # Load the configuration file for coverity static-analysis
-        # For the moment we store only the reliability index for each checker
-        # as the rest is managed on the https://github.com/mozilla/release-services side.
-        self._cov_config = self._get_cov_config()
-
-        rc = self.setup_coverity()
-        if rc != 0:
-            return rc
-
-        # First run cov-run-desktop --setup in order to setup the analysis env
-        cmd = [self.cov_run_desktop, '--setup']
-        self.log(logging.INFO, 'static-analysis', {},
-                 'Running {} --setup'.format(self.cov_run_desktop))
-
-        rc = self.run_process(args=cmd, cwd=self.cov_path, pass_thru=True)
-
-        if rc != 0:
-            self.log(logging.ERROR, 'static-analysis', {},
-                     'Running {} --setup failed!'.format(self.cov_run_desktop))
-            return rc
-
-        # Run cov-configure for clang
-        cmd = [self.cov_configure, '--clang']
-        self.log(logging.INFO, 'static-analysis', {},
-                 'Running {} --clang'.format(self.cov_configure))
-
-        rc = self.run_process(args=cmd, cwd=self.cov_path, pass_thru=True)
-
-        if rc != 0:
-            self.log(logging.ERROR, 'static-analysis', {},
-                     'Running {} --clang failed!'.format(self.cov_configure))
-            return rc
-
-        # For each element in commands_list run `cov-translate`
-        for element in commands_list:
-            cmd = [self.cov_translate, '--dir', self.cov_idir_path] + element['command'].split(' ')
-            self.log(logging.INFO, 'static-analysis', {},
-                     'Running Coverity Tranlate for {}'.format(cmd))
-            rc = self.run_process(args=cmd, cwd=element['directory'], pass_thru=True)
-            if rc != 0:
-                self.log(logging.ERROR, 'static-analysis', {},
-                         'Running Coverity Tranlate failed for {}'.format(cmd))
-                return cmd
-
-        if coverity_output_path is None:
-            cov_result = mozpath.join(self.cov_state_path, 'cov-results.json')
-        else:
-            cov_result = mozpath.join(coverity_output_path, 'cov-results.json')
-
-        # Once the capture is performed we need to do the actual Coverity Desktop analysis
-        cmd = [self.cov_run_desktop, '--json-output-v6', cov_result, '--analyze-captured-source']
-        self.log(logging.INFO, 'static-analysis', {},
-                 'Running Coverity Analysis for {}'.format(cmd))
-        rc = self.run_process(cmd, cwd=self.cov_state_path, pass_thru=True)
-        if rc != 0:
-            self.log(logging.ERROR, 'static-analysis', {}, 'Coverity Analysis failed!')
-
-        if output is not None:
-            self.dump_cov_artifact(cov_result, source, output)
-
-    def get_reliability_index_for_cov_checker(self, checker_name):
-        if self._cov_config is None:
-            self.log(logging.INFO, 'static-analysis', {}, 'Coverity config file not found, '
-                     'using default-value \'reliablity\' = medium. for checker {}'.format(
-                        checker_name))
-            return 'medium'
-
-        checkers = self._cov_config['coverity_checkers']
-        if checker_name not in checkers:
-            self.log(logging.INFO, 'static-analysis', {},
-                     'Coverity checker {} not found to determine reliability index. '
-                     'For the moment we shall use the default \'reliablity\' = medium.'.format(
-                        checker_name))
-            return 'medium'
-
-        if 'reliability' not in checkers[checker_name]:
-            # This checker doesn't have a reliability index
-            self.log(logging.INFO, 'static-analysis', {},
-                     'Coverity checker {} doesn\'t have a reliability index set, '
-                     'field \'reliability is missing\', please cosinder adding it. '
-                     'For the moment we shall use the default \'reliablity\' = medium.'.format(
-                        checker_name))
-            return 'medium'
-
-        return checkers[checker_name]['reliability']
-
-    def dump_cov_artifact(self, cov_results, source, output):
-        # Parse Coverity json into structured issues
-        with open(cov_results) as f:
-            result = json.load(f)
-
-            # Parse the issues to a standard json format
-            issues_dict = {'files': {}}
-
-            files_list = issues_dict['files']
-
-            def build_element(issue):
-                # We look only for main event
-                event_path = next(
-                    (event for event in issue['events'] if event['main'] is True), None)
-
-                dict_issue = {
-                    'line': issue['mainEventLineNumber'],
-                    'flag': issue['checkerName'],
-                    'message': event_path['eventDescription'],
-                    'reliability': self.get_reliability_index_for_cov_checker(
-                        issue['checkerName']
-                        ),
-                    'extra': {
-                        'category': issue['checkerProperties']['category'],
-                        'stateOnServer': issue['stateOnServer'],
-                        'stack': []
-                    }
-                }
-
-                # Embed all events into extra message
-                for event in issue['events']:
-                    dict_issue['extra']['stack'].append(
-                        {'file_path': event['strippedFilePathname'],
-                         'line_number': event['lineNumber'],
-                         'path_type': event['eventTag'],
-                         'description': event['eventDescription']})
-
-                return dict_issue
-
-            for issue in result['issues']:
-                path = self.cov_is_file_in_source(issue['strippedMainEventFilePathname'], source)
-                if path is None:
-                    # Since we skip a result we should log it
-                    self.log(logging.INFO, 'static-analysis', {},
-                             'Skipping CID: {0} from file: {1} since it\'s not related '
-                             'with the current patch.'.format(
-                                issue['stateOnServer']['cid'],
-                                issue['strippedMainEventFilePathname'])
-                             )
-                    continue
-                if path in files_list:
-                    files_list[path]['warnings'].append(build_element(issue))
-                else:
-                    files_list[path] = {'warnings': [build_element(issue)]}
-
-            with open(output, 'w') as f:
-                json.dump(issues_dict, f)
-
-    def get_coverity_secrets(self):
-        from taskgraph.util.taskcluster import get_root_url
-
-        secret_name = 'project/relman/coverity'
-        secrets_url = '{}/secrets/v1/secret/{}'.format(get_root_url(True), secret_name)
-
-        self.log(logging.INFO, 'static-analysis', {},
-                 'Using symbol upload token from the secrets service: "{}"'.format(secrets_url))
-
-        import requests
-        res = requests.get(secrets_url)
-        res.raise_for_status()
-        secret = res.json()
-        cov_config = secret['secret'] if 'secret' in secret else None
-
-        if cov_config is None:
-            self.log(logging.ERROR, 'static-analysis', {},
-                     'Ill formatted secret for Coverity. Aborting analysis.')
-            return 1
-
-        self.cov_analysis_url = cov_config.get('package_url')
-        self.cov_package_name = cov_config.get('package_name')
-        self.cov_url = cov_config.get('server_url')
-        # In case we don't have a port in the secret we use the default one,
-        # for a default coverity deployment.
-        self.cov_port = cov_config.get('server_port', 8443)
-        self.cov_auth = cov_config.get('auth_key')
-        self.cov_package_ver = cov_config.get('package_ver')
-        self.cov_full_stack = cov_config.get('full_stack', False)
-
-        return 0
-
-    def download_coverity(self):
-        if self.cov_url is None or self.cov_port is None or \
-                self.cov_analysis_url is None or \
-                self.cov_auth is None:
-            self.log(logging.ERROR, 'static-analysis', {}, 'Missing Coverity secret on try job!')
-            return 1
-
-        COVERITY_CONFIG = '''
-        {
-            "type": "Coverity configuration",
-            "format_version": 1,
-            "settings": {
-            "server": {
-                "host": "%s",
-                "ssl" : true,
-                "port": %s,
-                "on_new_cert" : "trust",
-                "auth_key_file": "%s"
-            },
-            "stream": "Firefox",
-            "cov_run_desktop": {
-                "build_cmd": [],
-                "clean_cmd": []
-            }
-            }
-        }
-        '''
-        # Generate the coverity.conf and auth files
-        cov_auth_path = mozpath.join(self.cov_state_path, 'auth')
-        cov_setup_path = mozpath.join(self.cov_state_path, 'coverity.conf')
-        cov_conf = COVERITY_CONFIG % (self.cov_url, self.cov_port, cov_auth_path)
-
-        def download(artifact_url, target):
-            import requests
-            resp = requests.get(artifact_url, verify=False, stream=True)
-            resp.raise_for_status()
-
-            # Extract archive into destination
-            with tarfile.open(fileobj=io.BytesIO(resp.content)) as tar:
-                tar.extractall(target)
-
-        download(self.cov_analysis_url, self.cov_state_path)
-
-        with open(cov_auth_path, 'w') as f:
-            f.write(self.cov_auth)
-
-        # Modify it's permission to 600
-        os.chmod(cov_auth_path, 0o600)
-
-        with open(cov_setup_path, 'a') as f:
-            f.write(cov_conf)
-
-    def setup_coverity(self, force_download=True):
-        rc, config, _ = self._get_config_environment()
-        rc = rc or self.get_coverity_secrets()
-
-        if rc != 0:
-            return rc
-
-        # Create a directory in mozbuild where we setup coverity
-        self.cov_state_path = mozpath.join(self._mach_context.state_dir, "coverity")
-
-        if force_download is True and os.path.exists(self.cov_state_path):
-            shutil.rmtree(self.cov_state_path)
-
-        os.mkdir(self.cov_state_path)
-
-        # Download everything that we need for Coverity from out private instance
-        self.download_coverity()
-
-        self.cov_path = mozpath.join(self.cov_state_path, self.cov_package_name)
-        self.cov_run_desktop = mozpath.join(self.cov_path, 'bin', 'cov-run-desktop')
-        self.cov_translate = mozpath.join(self.cov_path, 'bin', 'cov-translate')
-        self.cov_configure = mozpath.join(self.cov_path, 'bin', 'cov-configure')
-        self.cov_work_path = mozpath.join(self.cov_state_path, 'data-coverity')
-        self.cov_idir_path = mozpath.join(self.cov_work_path, self.cov_package_ver, 'idir')
-
-        if not os.path.exists(self.cov_path):
-            self.log(logging.ERROR, 'static-analysis', {},
-                     'Missing Coverity in {}'.format(self.cov_path))
-            return 1
-
-        return 0
-
-    def cov_is_file_in_source(self, abs_path, source):
-        # We have as an input an absolute path for whom we verify if it's a symlink,
-        # if so, we follow that symlink and we match it with elements from source.
-        # If the match is done we return abs_path, otherwise None
-        assert isinstance(source, list)
-        if os.path.islink(abs_path):
-            abs_path = os.path.realpath(abs_path)
-        if abs_path in source:
-            return abs_path
-        return None
-
-    def get_files_with_commands(self, source):
-        '''
-        Returns an array of dictionaries having file_path with build command
-        '''
-
-        compile_db = json.load(open(self._compile_db, 'r'))
-
-        commands_list = []
-
-        for f in source:
-            # It must be a C/C++ file
-            _, ext = os.path.splitext(f)
-
-            if ext.lower() not in self._format_include_extensions:
-                self.log(logging.INFO, 'static-analysis', {}, 'Skipping {}'.format(f))
-                continue
-            file_with_abspath = os.path.join(self.topsrcdir, f)
-            for f in compile_db:
-                # Found for a file that we are looking
-                if file_with_abspath == f['file']:
-                    commands_list.append(f)
-
-        return commands_list
-
-    @StaticAnalysisSubCommand('static-analysis', 'check-java',
-                              'Run infer on the java codebase.')
-    @CommandArgument('source', nargs='*', default=['mobile'],
-                     help='Source files to be analyzed. '
-                          'Can be omitted, in which case the entire code base '
-                          'is analyzed.  The source argument is ignored if '
-                          'there is anything fed through stdin, in which case '
-                          'the analysis is only performed on the files changed '
-                          'in the patch streamed through stdin.  This is called '
-                          'the diff mode.')
-    @CommandArgument('--checks', '-c', default=[], metavar='checks', nargs='*',
-                     help='Static analysis checks to enable.')
-    @CommandArgument('--jobs', '-j', default='0', metavar='jobs', type=int,
-                     help='Number of concurrent jobs to run.'
-                     ' Default is the number of CPUs.')
-    @CommandArgument('--task', '-t', type=str,
-                     default='compileWithGeckoBinariesDebugSources',
-                     help='Which gradle tasks to use to compile the java codebase.')
-    @CommandArgument('--outgoing', default=False, action='store_true',
-                     help='Run infer checks on outgoing files from repository')
-    @CommandArgument('--output', default=None,
-                     help='Write infer json output in a file')
-    def check_java(self, source=['mobile'], jobs=2, strip=1, verbose=False, checks=[],
-                   task='compileWithGeckoBinariesDebugSources',
-                   skip_export=False, outgoing=False, output=None):
-        self._set_log_level(verbose)
-        self.log_manager.enable_all_structured_loggers()
-        if self.substs['MOZ_BUILD_APP'] != 'mobile/android':
-            self.log(logging.WARNING, 'static-analysis', {},
-                     'Cannot check java source code unless you are building for android!')
-            return 1
-        rc = self._check_for_java()
-        if rc != 0:
-            return 1
-        if output is not None:
-            output = os.path.abspath(output)
-            if not os.path.isdir(os.path.dirname(output)):
-                self.log(logging.WARNING, 'static-analysis', {},
-                         'Missing report destination folder for {}'.format(output))
-
-        # if source contains the whole mobile folder, then we just have to
-        # analyze everything
-        check_all = any(i.rstrip(os.sep).split(os.sep)[-1] == 'mobile' for i in source)
-        # gather all java sources from the source variable
-        java_sources = []
-        if outgoing:
-            repo = get_repository_object(self.topsrcdir)
-            java_sources = self._get_java_files(repo.get_outgoing_files())
-            if not java_sources:
-                self.log(logging.WARNING, 'static-analysis', {},
-                         'No outgoing Java files to check')
-                return 0
-        elif not check_all:
-            java_sources = self._get_java_files(source)
-            if not java_sources:
-                return 0
-        if not skip_export:
-            rc = self._build_export(jobs=jobs, verbose=verbose)
-            if rc != 0:
-                return rc
-        rc = self._get_infer(verbose=verbose)
-        if rc != 0:
-            self.log(logging.WARNING, 'static-analysis', {},
-                     'This command is only available for linux64!')
-            return rc
-        # which checkers to use, and which folders to exclude
-        all_checkers, third_party_path = self._get_infer_config()
-        checkers, excludes = self._get_infer_args(
-            checks=checks or all_checkers,
-            third_party_path=third_party_path
-        )
-        rc = rc or self._gradle(['clean'])  # clean so that we can recompile
-        # infer capture command
-        capture_cmd = [self._infer_path, 'capture'] + excludes + ['--']
-        rc = rc or self._gradle([task], infer_args=capture_cmd, verbose=verbose)
-        tmp_file, args = self._get_infer_source_args(java_sources)
-        # infer analyze command
-        analysis_cmd = [self._infer_path, 'analyze', '--keep-going'] +  \
-            checkers + args
-        rc = rc or self.run_process(args=analysis_cmd, cwd=self.topsrcdir, pass_thru=True)
-        if tmp_file:
-            tmp_file.close()
-
-        # Copy the infer report
-        report_path = os.path.join(self.topsrcdir, 'infer-out', 'report.json')
-        if output is not None and os.path.exists(report_path):
-            shutil.copy(report_path, output)
-            self.log(logging.INFO, 'static-analysis', {},
-                     'Report available in {}'.format(output))
-
-        return rc
-
-    def _get_java_files(self, sources):
-        java_sources = []
-        for i in sources:
-            f = mozpath.join(self.topsrcdir, i)
-            if os.path.isdir(f):
-                for root, dirs, files in os.walk(f):
-                    dirs.sort()
-                    for file in sorted(files):
-                        if file.endswith('.java'):
-                            java_sources.append(mozpath.join(root, file))
-            elif f.endswith('.java'):
-                java_sources.append(f)
-        return java_sources
-
-    def _get_infer_source_args(self, sources):
-        '''Return the arguments to only analyze <sources>'''
-        if not sources:
-            return (None, [])
-        # create a temporary file in which we place all sources
-        # this is used by the analysis command to only analyze certain files
-        f = tempfile.NamedTemporaryFile()
-        for source in sources:
-            f.write(source+'\n')
-        f.flush()
-        return (f, ['--changed-files-index', f.name])
-
-    def _get_infer_config(self):
-        '''Load the infer config file.'''
-        checkers = []
-        tp_path = ''
-        with open(mozpath.join(self.topsrcdir, 'tools',
-                               'infer', 'config.yaml')) as f:
-            try:
-                config = yaml.safe_load(f)
-                for item in config['infer_checkers']:
-                    if item['publish']:
-                        checkers.append(item['name'])
-                tp_path = mozpath.join(self.topsrcdir, config['third_party'])
-            except Exception:
-                print('Looks like config.yaml is not valid, so we are unable '
-                      'to determine default checkers, and which folder to '
-                      'exclude, using defaults provided by infer')
-        return checkers, tp_path
-
-    def _get_infer_args(self, checks, third_party_path):
-        '''Return the arguments which include the checkers <checks>, and
-        excludes all folder in <third_party_path>.'''
-        checkers = ['-a', 'checkers']
-        excludes = []
-        for checker in checks:
-            checkers.append('--' + checker)
-        with open(third_party_path) as f:
-            for line in f:
-                excludes.append('--skip-analysis-in-path')
-                excludes.append(line.strip('\n'))
-        return checkers, excludes
-
-    def _get_clang_tidy_config(self):
-        try:
-            file_handler = open(mozpath.join(self.topsrcdir, "tools", "clang-tidy", "config.yaml"))
-            config = yaml.safe_load(file_handler)
-        except Exception:
-            self.log(logging.ERROR, 'static-analysis', {},
-                     'Looks like config.yaml is not valid, we are going to use default'
-                     ' values for the rest of the analysis for clang-tidy.')
-            return None
-        return config
-
-    def _get_cov_config(self):
-        try:
-            file_handler = open(mozpath.join(self.topsrcdir, "tools", "coverity", "config.yaml"))
-            config = yaml.safe_load(file_handler)
-        except Exception:
-            self.log(logging.ERROR, 'static-analysis', {},
-                     'Looks like config.yaml is not valid, we are going to use default'
-                     ' values for the rest of the analysis for coverity.')
-            return None
-        return config
-
-    def _is_version_eligible(self):
-        # make sure that we've cached self._clang_tidy_config
-        if self._clang_tidy_config is None:
-            self._clang_tidy_config = self._get_clang_tidy_config()
-
-        version = None
-        if 'package_version' in self._clang_tidy_config:
-            version = self._clang_tidy_config['package_version']
-        else:
-            self.log(logging.ERROR, 'static-analysis', {},
-                     "Unable to find 'package_version' in the config.yml")
-            return False
-
-        # Because the fact that we ship together clang-tidy and clang-format
-        # we are sure that these two will always share the same version.
-        # Thus in order to determine that the version is compatible we only
-        # need to check one of them, going with clang-format
-        cmd = [self._clang_format_path, '--version']
-        try:
-            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
-            version_string = 'clang-format version ' + version
-            if output.startswith(version_string):
-                return True
-        except subprocess.CalledProcessError as e:
-            self.log(logging.ERROR, 'static-analysis', {},
-                     "Error determining the version clang-tidy/format binary, please see the "
-                     "attached exception: \n{}".format(e.output))
-
-        return False
-
-    def _get_clang_tidy_command(self, checks, header_filter, sources, jobs, fix):
-
-        if checks == '-*':
-            checks = self._get_checks()
-
-        common_args = ['-clang-tidy-binary', self._clang_tidy_path,
-                       '-clang-apply-replacements-binary', self._clang_apply_replacements,
-                       '-checks=%s' % checks,
-                       '-extra-arg=-DMOZ_CLANG_PLUGIN']
-
-        # Flag header-filter is passed in order to limit the diagnostic messages only
-        # to the specified header files. When no value is specified the default value
-        # is considered to be the source in order to limit the diagnostic message to
-        # the source files or folders.
-        common_args += ['-header-filter=%s' % (header_filter
-                                               if len(header_filter) else '|'.join(sources))]
-
-        # From our configuration file, config.yaml, we build the configuration list, for
-        # the checkers that are used. These configuration options are used to better fit
-        # the checkers to our code.
-        cfg = self._get_checks_config()
-        if cfg:
-            common_args += ['-config=%s' % yaml.dump(cfg)]
-
-        if fix:
-            common_args += ['-fix']
-
-        return [
-            self.virtualenv_manager.python_path, self._run_clang_tidy_path, '-j',
-            str(jobs), '-p', self._compilation_commands_path
-        ] + common_args + sources
-
-    def _check_for_java(self):
-        '''Check if javac can be found.'''
-        import distutils
-        java = self.substs.get('JAVA')
-        java = java or os.getenv('JAVA_HOME')
-        java = java or distutils.spawn.find_executable('javac')
-        error = 'javac was not found! Please install javac and either add it to your PATH, '
-        error += 'set JAVA_HOME, or add the following to your mozconfig:\n'
-        error += '  --with-java-bin-path=/path/to/java/bin/'
-        if not java:
-            self.log(logging.ERROR, 'ERROR: static-analysis', {}, error)
-            return 1
-        return 0
-
-    def _gradle(self, args, infer_args=None, verbose=False, autotest=False,
-                suppress_output=True):
-        infer_args = infer_args or []
-        if autotest:
-            cwd = mozpath.join(self.topsrcdir, 'tools', 'infer', 'test')
-            gradle = mozpath.join(cwd, 'gradlew')
-        else:
-            gradle = self.substs['GRADLE']
-            cwd = self.topsrcdir
-        extra_env = {
-            'GRADLE_OPTS': '-Dfile.encoding=utf-8',  # see mobile/android/mach_commands.py
-            'JAVA_TOOL_OPTIONS': '-Dfile.encoding=utf-8',
-        }
-        if suppress_output:
-            devnull = open(os.devnull, 'w')
-            return subprocess.call(
-                infer_args + [gradle] + args,
-                env=dict(os.environ, **extra_env),
-                cwd=cwd, stdout=devnull, stderr=subprocess.STDOUT, close_fds=True)
-
-        return self.run_process(
-            infer_args + [gradle] + args,
-            append_env=extra_env,
-            pass_thru=True,  # Allow user to run gradle interactively.
-            ensure_exit_code=False,  # Don't throw on non-zero exit code.
-            cwd=cwd)
-
-    @StaticAnalysisSubCommand('static-analysis', 'autotest',
-                              'Run the auto-test suite in order to determine that'
-                              ' the analysis did not regress.')
-    @CommandArgument('--dump-results', '-d', default=False, action='store_true',
-                     help='Generate the baseline for the regression test. Based on'
-                     ' this baseline we will test future results.')
-    @CommandArgument('--intree-tool', '-i', default=False, action='store_true',
-                     help='Use a pre-aquired in-tree clang-tidy package.')
-    @CommandArgument('checker_names', nargs='*', default=[],
-                     help='Checkers that are going to be auto-tested.')
-    def autotest(self, verbose=False, dump_results=False, intree_tool=False, checker_names=[]):
-        # If 'dump_results' is True than we just want to generate the issues files for each
-        # checker in particulat and thus 'force_download' becomes 'False' since we want to
-        # do this on a local trusted clang-tidy package.
-        self._set_log_level(verbose)
-        self._dump_results = dump_results
-
-        force_download = not self._dump_results
-
-        # Function return codes
-        self.TOOLS_SUCCESS = 0
-        self.TOOLS_FAILED_DOWNLOAD = 1
-        self.TOOLS_UNSUPORTED_PLATFORM = 2
-        self.TOOLS_CHECKER_NO_TEST_FILE = 3
-        self.TOOLS_CHECKER_RETURNED_NO_ISSUES = 4
-        self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND = 5
-        self.TOOLS_CHECKER_DIFF_FAILED = 6
-        self.TOOLS_CHECKER_NOT_FOUND = 7
-        self.TOOLS_CHECKER_FAILED_FILE = 8
-        self.TOOLS_CHECKER_LIST_EMPTY = 9
-        self.TOOLS_GRADLE_FAILED = 10
-
-        # Configure the tree or download clang-tidy package, depending on the option that we choose
-        if intree_tool:
-            _, config, _ = self._get_config_environment()
-            clang_tools_path = self.topsrcdir
-            self._clang_tidy_path = mozpath.join(
-                clang_tools_path, "clang-tidy", "bin",
-                "clang-tidy" + config.substs.get('BIN_SUFFIX', ''))
-            self._clang_format_path = mozpath.join(
-                clang_tools_path, "clang-tidy", "bin",
-                "clang-format" + config.substs.get('BIN_SUFFIX', ''))
-            self._clang_apply_replacements = mozpath.join(
-                clang_tools_path, "clang-tidy", "bin",
-                "clang-apply-replacements" + config.substs.get('BIN_SUFFIX', ''))
-            self._run_clang_tidy_path = mozpath.join(clang_tools_path, "clang-tidy", "share",
-                                                     "clang", "run-clang-tidy.py")
-            self._clang_format_diff = mozpath.join(clang_tools_path, "clang-tidy", "share",
-                                                   "clang", "clang-format-diff.py")
-
-            # Ensure that clang-tidy is present
-            rc = not os.path.exists(self._clang_tidy_path)
-        else:
-            rc = self._get_clang_tools(force=force_download, verbose=verbose)
-
-        if rc != 0:
-            self.log(logging.ERROR, 'ERROR: static-analysis', {},
-                     'clang-tidy unable to locate package.')
-            return self.TOOLS_FAILED_DOWNLOAD
-
-        self._clang_tidy_base_path = mozpath.join(self.topsrcdir, "tools", "clang-tidy")
-
-        # For each checker run it
-        self._clang_tidy_config = self._get_clang_tidy_config()
-        platform, _ = self.platform
-
-        if platform not in self._clang_tidy_config['platforms']:
-            self.log(
-                logging.ERROR, 'static-analysis', {},
-                "RUNNING: clang-tidy autotest for platform {} not supported.".format(
-                    platform)
-                )
-            return self.TOOLS_UNSUPORTED_PLATFORM
-
-        import concurrent.futures
-        import multiprocessing
-
-        max_workers = multiprocessing.cpu_count()
-
-        self.log(logging.INFO, 'static-analysis', {},
-                 "RUNNING: clang-tidy autotest for platform {0} with {1} workers.".format(
-                     platform, max_workers))
-
-        # List all available checkers
-        cmd = [self._clang_tidy_path, '-list-checks', '-checks=*']
-        clang_output = subprocess.check_output(
-            cmd, stderr=subprocess.STDOUT).decode('utf-8')
-        available_checks = clang_output.split('\n')[1:]
-        self._clang_tidy_checks = [c.strip() for c in available_checks if c]
-
-        # Build the dummy compile_commands.json
-        self._compilation_commands_path = self._create_temp_compilation_db(self._clang_tidy_config)
-        checkers_test_batch = []
-        checkers_results = []
-        with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
-            futures = []
-            for item in self._clang_tidy_config['clang_checkers']:
-                # Skip if any of the following statements is true:
-                # 1. Checker attribute 'publish' is False.
-                not_published = not bool(item.get('publish', True))
-                # 2. Checker has restricted-platforms and current platform is not of them.
-                ignored_platform = ('restricted-platforms' in item and
-                                    platform not in item['restricted-platforms'])
-                # 3. Checker name is mozilla-* or -*.
-                ignored_checker = item['name'] in ['mozilla-*', '-*']
-                # 4. List checker_names is passed and the current checker is not part of the
-                #    list or 'publish' is False
-                checker_not_in_list = checker_names and (
-                    item['name'] not in checker_names or not_published)
-                if not_published or \
-                   ignored_platform or \
-                   ignored_checker or \
-                   checker_not_in_list:
-                    continue
-                checkers_test_batch.append(item['name'])
-                futures.append(executor.submit(self._verify_checker, item, checkers_results))
-
-            error_code = self.TOOLS_SUCCESS
-            for future in concurrent.futures.as_completed(futures):
-                # Wait for every task to finish
-                ret_val = future.result()
-                if ret_val != self.TOOLS_SUCCESS:
-                    # We are interested only in one error and we don't break
-                    # the execution of for loop since we want to make sure that all
-                    # tasks finished.
-                    error_code = ret_val
-
-            if error_code != self.TOOLS_SUCCESS:
-
-                self.log(logging.INFO, 'static-analysis', {},
-                         "FAIL: the following clang-tidy check(s) failed:")
-                for failure in checkers_results:
-                    checker_error = failure['checker-error']
-                    checker_name = failure['checker-name']
-                    info1 = failure['info1']
-                    info2 = failure['info2']
-                    info3 = failure['info3']
-
-                    message_to_log = ''
-                    if checker_error == self.TOOLS_CHECKER_NOT_FOUND:
-                        message_to_log = \
-                            "\tChecker {} not present in this clang-tidy version.".format(
-                                checker_name)
-                    elif checker_error == self.TOOLS_CHECKER_NO_TEST_FILE:
-                        message_to_log = \
-                            "\tChecker {0} does not have a test file - {0}.cpp".format(
-                                checker_name)
-                    elif checker_error == self.TOOLS_CHECKER_RETURNED_NO_ISSUES:
-                        message_to_log = (
-                            "\tChecker {0} did not find any issues in its test file, "
-                            "clang-tidy output for the run is:\n{1}"
-                            ).format(checker_name, info1)
-                    elif checker_error == self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND:
-                        message_to_log = \
-                            "\tChecker {0} does not have a result file - {0}.json".format(
-                                checker_name)
-                    elif checker_error == self.TOOLS_CHECKER_DIFF_FAILED:
-                        message_to_log = (
-                            "\tChecker {0}\nExpected: {1}\n"
-                            "Got: {2}\n"
-                            "clang-tidy output for the run is:\n"
-                            "{3}"
-                            ).format(checker_name, info1, info2, info3)
-
-                    print('\n'+message_to_log)
-
-                # Also delete the tmp folder
-                shutil.rmtree(self._compilation_commands_path)
-                return error_code
-
-            # Run the analysis on all checkers at the same time only if we don't dump results.
-            if not self._dump_results:
-                ret_val = self._run_analysis_batch(checkers_test_batch)
-                if ret_val != self.TOOLS_SUCCESS:
-                    shutil.rmtree(self._compilation_commands_path)
-                    return ret_val
-
-        self.log(logging.INFO, 'static-analysis', {}, "SUCCESS: clang-tidy all tests passed.")
-        # Also delete the tmp folder
-        shutil.rmtree(self._compilation_commands_path)
-        return self._autotest_infer(intree_tool, force_download, verbose)
-
-    def _run_analysis(self, checks, header_filter, sources, jobs=1, fix=False, print_out=False):
-        cmd = self._get_clang_tidy_command(
-            checks=checks, header_filter=header_filter,
-            sources=sources,
-            jobs=jobs, fix=fix)
-
-        try:
-            clang_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
-        except subprocess.CalledProcessError as e:
-            print(e.output)
-            return None
-        return self._parse_issues(clang_output), clang_output
-
-    def _run_analysis_batch(self, items):
-        self.log(logging.INFO, 'static-analysis', {},
-                 "RUNNING: clang-tidy checker batch analysis.")
-        if not len(items):
-            self.log(logging.ERROR, 'static-analysis', {},
-                     "ERROR: clang-tidy checker list is empty!")
-            return self.TOOLS_CHECKER_LIST_EMPTY
-
-        issues, clang_output = self._run_analysis(
-            checks='-*,' + ",".join(items),
-            header_filter='',
-            sources=[mozpath.join(self._clang_tidy_base_path, "test", checker) + '.cpp'
-                     for checker in items],
-            print_out=True)
-
-        if issues is None:
-            return self.TOOLS_CHECKER_FAILED_FILE
-
-        failed_checks = []
-        failed_checks_baseline = []
-        for checker in items:
-            test_file_path_json = mozpath.join(
-                self._clang_tidy_base_path, "test", checker) + '.json'
-            # Read the pre-determined issues
-            baseline_issues = self._get_autotest_stored_issues(test_file_path_json)
-
-            # We also stored the 'reliability' index so strip that from the baseline_issues
-            baseline_issues[:] = [item for item in baseline_issues if 'reliability' not in item]
-
-            found = all([element_base in issues for element_base in baseline_issues])
-
-            if not found:
-                failed_checks.append(checker)
-                failed_checks_baseline.append(baseline_issues)
-
-        if len(failed_checks) > 0:
-            self.log(logging.ERROR, 'static-analysis', {},
-                     'The following check(s) failed for bulk analysis: ' + ' '.join(failed_checks))
-
-            for failed_check, baseline_issue in zip(failed_checks, failed_checks_baseline):
-                print('\tChecker {0} expect following results: \n\t\t{1}'.format(
-                    failed_check, baseline_issue))
-
-            print('This is the output generated by clang-tidy for the bulk build:\n{}'.format(
-                clang_output))
-            return self.TOOLS_CHECKER_DIFF_FAILED
-
-        return self.TOOLS_SUCCESS
-
-    def _create_temp_compilation_db(self, config):
-        directory = tempfile.mkdtemp(prefix='cc')
-        with open(mozpath.join(directory, "compile_commands.json"), "wb") as file_handler:
-            compile_commands = []
-            director = mozpath.join(self.topsrcdir, 'tools', 'clang-tidy', 'test')
-            for item in config['clang_checkers']:
-                if item['name'] in ['-*', 'mozilla-*']:
-                    continue
-                file = item['name'] + '.cpp'
-                element = {}
-                element["directory"] = director
-                element["command"] = 'cpp ' + file
-                element["file"] = mozpath.join(director, file)
-                compile_commands.append(element)
-
-            json.dump(compile_commands, file_handler)
-            file_handler.flush()
-
-            return directory
-
-    def _autotest_infer(self, intree_tool, force_download, verbose):
-        # infer is not available on other platforms, but autotest should work even without
-        # it being installed
-        if self.platform[0] == 'linux64':
-            rc = self._check_for_java()
-            if rc != 0:
-                return 1
-            rc = self._get_infer(force=force_download, verbose=verbose, intree_tool=intree_tool)
-            if rc != 0:
-                self.log(logging.ERROR, 'ERROR: static-analysis', {},
-                         'infer unable to locate package.')
-                return self.TOOLS_FAILED_DOWNLOAD
-            self.__infer_tool = mozpath.join(self.topsrcdir, 'tools', 'infer')
-            self.__infer_test_folder = mozpath.join(self.__infer_tool, 'test')
-
-            import concurrent.futures
-            import multiprocessing
-            max_workers = multiprocessing.cpu_count()
-            self.log(logging.INFO, 'static-analysis', {},
-                     "RUNNING: infer autotest for platform {0} with {1} workers.".format(
-                         self.platform[0], max_workers))
-            # clean previous autotest if it exists
-            rc = self._gradle(['autotest:clean'], autotest=True)
-            if rc != 0:
-                return rc
-            import yaml
-            with open(mozpath.join(self.__infer_tool, 'config.yaml')) as f:
-                config = yaml.safe_load(f)
-            with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
-                futures = []
-                for item in config['infer_checkers']:
-                    if item['publish']:
-                        futures.append(executor.submit(self._verify_infer_checker, item))
-                # this is always included in check-java, but not in config.yaml
-                futures.append(executor.submit(self._verify_infer_checker,
-                                               {'name': 'checkers'}))
-                for future in concurrent.futures.as_completed(futures):
-                    ret_val = future.result()
-                    if ret_val != self.TOOLS_SUCCESS:
-                        return ret_val
-            self.log(logging.INFO, 'static-analysis', {}, "SUCCESS: infer all tests passed.")
-        else:
-            self.log(logging.WARNING, 'static-analysis', {},
-                     "Skipping infer autotest, because it is only available on linux64!")
-        return self.TOOLS_SUCCESS
-
-    def _verify_infer_checker(self, item):
-        '''Given a checker, this method verifies the following:
-          1. if there is a `checker`.json and `checker`.java file in
-             `tools/infer/test/autotest/src`
-          2. if running infer on `checker`.java yields the same result as `checker`.json
-        An `item` is simply a dictionary, which needs to have a `name` field set, which is the
-        name of the checker.
-        '''
-        def to_camelcase(str):
-            return ''.join([s.capitalize() for s in str.split('-')])
-        check = item['name']
-        test_file_path = mozpath.join(self.__infer_tool, 'test', 'autotest', 'src',
-                                      'main', 'java', to_camelcase(check))
-        test_file_path_java = test_file_path + '.java'
-        test_file_path_json = test_file_path + '.json'
-        self.log(logging.INFO, 'static-analysis', {}, "RUNNING: infer check {}.".format(check))
-        # Verify if the test file exists for this checker
-        if not os.path.exists(test_file_path_java):
-            self.log(logging.ERROR, 'static-analysis', {},
-                     "ERROR: infer check {} doesn't have a test file.".format(check))
-            return self.TOOLS_CHECKER_NO_TEST_FILE
-        # run infer on a particular test file
-        out_folder = mozpath.join(self.__infer_test_folder, 'test-infer-{}'.format(check))
-        if check == 'checkers':
-            check_arg = ['-a', 'checkers']
-        else:
-            check_arg = ['--{}-only'.format(check)]
-        infer_args = [self._infer_path, 'run'] + check_arg + ['-o', out_folder, '--']
-        gradle_args = ['autotest:compileInferTest{}'.format(to_camelcase(check))]
-        rc = self._gradle(gradle_args, infer_args=infer_args, autotest=True)
-        if rc != 0:
-            self.log(logging.ERROR, 'static-analysis', {},
-                     "ERROR: infer failed to execute gradle {}.".format(gradle_args))
-            return self.TOOLS_GRADLE_FAILED
-        issues = json.load(open(mozpath.join(out_folder, 'report.json')))
-        # remove folder that infer creates because the issues are loaded into memory
-        shutil.rmtree(out_folder)
-        # Verify to see if we got any issues, if not raise exception
-        if not issues:
-            self.log(
-                logging.ERROR, 'static-analysis', {},
-                "ERROR: infer check {0} did not find any issues in its associated test suite."
-                .format(check)
-            )
-            return self.TOOLS_CHECKER_RETURNED_NO_ISSUES
-        if self._dump_results:
-            self._build_autotest_result(test_file_path_json, issues)
-        else:
-            if not os.path.exists(test_file_path_json):
-                # Result file for test not found maybe regenerate it?
-                self.log(
-                    logging.ERROR, 'static-analysis', {},
-                    "ERROR: infer result file not found for check {0}".format(check)
-                )
-                return self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND
-            # Read the pre-determined issues
-            baseline_issues = self._get_autotest_stored_issues(test_file_path_json)
-
-            def ordered(obj):
-                if isinstance(obj, dict):
-                    return sorted((k, ordered(v)) for k, v in obj.items())
-                if isinstance(obj, list):
-                    return sorted(ordered(x) for x in obj)
-                return obj
-            # Compare the two lists
-            if ordered(issues) != ordered(baseline_issues):
-                error_str = "ERROR: in check {} Expected: ".format(check)
-                error_str += '\n' + json.dumps(baseline_issues, indent=2)
-                error_str += '\n Got:\n' + json.dumps(issues, indent=2)
-                self.log(logging.ERROR, 'static-analysis', {},
-                         'ERROR: infer autotest for check {} failed, check stdout for more details'
-                         .format(check))
-                print(error_str)
-                return self.TOOLS_CHECKER_DIFF_FAILED
-        return self.TOOLS_SUCCESS
-
-    @StaticAnalysisSubCommand('static-analysis', 'install',
-                              'Install the static analysis helper tool')
-    @CommandArgument('source', nargs='?', type=str,
-                     help='Where to fetch a local archive containing the static-analysis and '
-                     'format helper tool.'
-                          'It will be installed in ~/.mozbuild/clang-tools and ~/.mozbuild/infer.'
-                          'Can be omitted, in which case the latest clang-tools and infer '
-                          'helper for the platform would be automatically detected and installed.')
-    @CommandArgument('--skip-cache', action='store_true',
-                     help='Skip all local caches to force re-fetching the helper tool.',
-                     default=False)
-    @CommandArgument('--force', action='store_true',
-                     help='Force re-install even though the tool exists in mozbuild.',
-                     default=False)
-    @CommandArgument('--minimal-install', action='store_true',
-                     help='Download only clang based tool.',
-                     default=False)
-    def install(self, source=None, skip_cache=False, force=False, minimal_install=False,
-                verbose=False):
-        self._set_log_level(verbose)
-        rc = self._get_clang_tools(force=force, skip_cache=skip_cache,
-                                   source=source, verbose=verbose)
-        if rc == 0 and not minimal_install:
-            # XXX ignore the return code because if it fails or not, infer is
-            # not mandatory, but clang-tidy is
-            self._get_infer(force=force, skip_cache=skip_cache, verbose=verbose)
-        return rc
-
-    @StaticAnalysisSubCommand('static-analysis', 'clear-cache',
-                              'Delete local helpers and reset static analysis helper tool cache')
-    def clear_cache(self, verbose=False):
-        self._set_log_level(verbose)
-        rc = self._get_clang_tools(force=True, download_if_needed=True, skip_cache=True,
-                                   verbose=verbose)
-        if rc == 0:
-            self._get_infer(force=True, download_if_needed=True, skip_cache=True,
-                            verbose=verbose)
-        if rc != 0:
-            return rc
-        return self._artifact_manager.artifact_clear_cache()
-
-    @StaticAnalysisSubCommand('static-analysis', 'print-checks',
-                              'Print a list of the static analysis checks performed by default')
-    def print_checks(self, verbose=False):
-        self._set_log_level(verbose)
-        rc = self._get_clang_tools(verbose=verbose)
-        if rc == 0:
-            rc = self._get_infer(verbose=verbose)
-        if rc != 0:
-            return rc
-        args = [self._clang_tidy_path, '-list-checks', '-checks=%s' % self._get_checks()]
-        rc = self._run_command_in_objdir(args=args, pass_thru=True)
-        if rc != 0:
-            return rc
-        checkers, _ = self._get_infer_config()
-        print('Infer checks:')
-        for checker in checkers:
-            print(' '*4 + checker)
-        return 0
-
-    @Command('clang-format',  category='misc', description='Run clang-format on current changes')
-    @CommandArgument('--show', '-s', action='store_const', const='stdout', dest='output_path',
-                     help='Show diff output on stdout instead of applying changes')
-    @CommandArgument('--assume-filename', '-a', nargs=1, default=None,
-                     help='This option is usually used in the context of hg-formatsource.'
-                          'When reading from stdin, clang-format assumes this '
-                          'filename to look for a style config file (with '
-                          '-style=file) and to determine the language. When '
-                          'specifying this option only one file should be used '
-                          'as an input and the output will be forwarded to stdin. '
-                          'This option also impairs the download of the clang-tools '
-                          'and assumes the package is already located in it\'s default '
-                          'location')
-    @CommandArgument('--path', '-p', nargs='+', default=None,
-                     help='Specify the path(s) to reformat')
-    @CommandArgument('--commit', '-c', default=None,
-                     help='Specify a commit to reformat from.'
-                          'For git you can also pass a range of commits (foo..bar)'
-                          'to format all of them at the same time.')
-    @CommandArgument('--output', '-o', default=None, dest='output_path',
-                     help='Specify a file handle to write clang-format raw output instead of '
-                          'applying changes. This can be stdout or a file path.')
-    @CommandArgument('--format', '-f', choices=('diff', 'json'), default='diff',
-                     dest='output_format',
-                     help='Specify the output format used: diff is the raw patch provided by '
-                     'clang-format, json is a list of atomic changes to process.')
-    @CommandArgument('--outgoing', default=False, action='store_true',
-                     help='Run clang-format on outgoing files from mercurial repository')
-    def clang_format(self, assume_filename, path, commit, output_path=None, output_format='diff',
-                     verbose=False, outgoing=False):
-        # Run clang-format or clang-format-diff on the local changes
-        # or files/directories
-        if path is None and outgoing:
-            repo = get_repository_object(self.topsrcdir)
-            path = repo.get_outgoing_files()
-
-        if path:
-            # Create the full path list
-            def path_maker(f_name): return os.path.join(self.topsrcdir, f_name)
-            path = map(path_maker, path)
-
-        os.chdir(self.topsrcdir)
-
-        # Load output file handle, either stdout or a file handle in write mode
-        output = None
-        if output_path is not None:
-            output = sys.stdout if output_path == 'stdout' else open(output_path, 'w')
-
-        # With assume_filename we want to have stdout clean since the result of the
-        # format will be redirected to stdout. Only in case of errror we
-        # write something to stdout.
-        # We don't actually want to get the clang-tools here since we want in some
-        # scenarios to do this in parallel so we relay on the fact that the tools
-        # have already been downloaded via './mach bootstrap' or directly via
-        # './mach static-analysis install'
-        if assume_filename:
-            rc = self._set_clang_tools_paths()
-            if rc != 0:
-                print("clang-format: Unable to set path to clang-format tools.")
-                return rc
-
-            if not self._do_clang_tools_exist():
-                print("clang-format: Unable to set locate clang-format tools.")
-                return 1
-        else:
-            rc = self._get_clang_tools(verbose=verbose)
-            if rc != 0:
-                return rc
-
-        if self._is_version_eligible() is False:
-            self.log(logging.ERROR, 'static-analysis', {},
-                     "You're using an old version of clang-format binary."
-                     " Please update to a more recent one by running: './mach bootstrap'")
-            return 1
-
-        if path is None:
-            return self._run_clang_format_diff(self._clang_format_diff,
-                                               self._clang_format_path, commit, output)
-
-        if assume_filename:
-            return self._run_clang_format_in_console(self._clang_format_path,
-                                                     path, assume_filename)
-
-        return self._run_clang_format_path(self._clang_format_path, path, output, output_format)
-
-    def _verify_checker(self, item, checkers_results):
-        check = item['name']
-        test_file_path = mozpath.join(self._clang_tidy_base_path, "test", check)
-        test_file_path_cpp = test_file_path + '.cpp'
-        test_file_path_json = test_file_path + '.json'
-
-        self.log(logging.INFO, 'static-analysis', {},
-                 "RUNNING: clang-tidy checker {}.".format(check))
-
-        # Structured information in case a checker fails
-        checker_error = {
-            'checker-name': check,
-            'checker-error': '',
-            'info1': '',
-            'info2': '',
-            'info3': ''
-        }
-
-        # Verify if this checker actually exists
-        if check not in self._clang_tidy_checks:
-            checker_error['checker-error'] = self.TOOLS_CHECKER_NOT_FOUND
-            checkers_results.append(checker_error)
-            return self.TOOLS_CHECKER_NOT_FOUND
-
-        # Verify if the test file exists for this checker
-        if not os.path.exists(test_file_path_cpp):
-            checker_error['checker-error'] = self.TOOLS_CHECKER_NO_TEST_FILE
-            checkers_results.append(checker_error)
-            return self.TOOLS_CHECKER_NO_TEST_FILE
-
-        issues, clang_output = self._run_analysis(
-            checks='-*,' + check, header_filter='', sources=[test_file_path_cpp])
-        if issues is None:
-            return self.TOOLS_CHECKER_FAILED_FILE
-
-        # Verify to see if we got any issues, if not raise exception
-        if not issues:
-            checker_error['checker-error'] = self.TOOLS_CHECKER_RETURNED_NO_ISSUES
-            checker_error['info1'] = clang_output
-            checkers_results.append(checker_error)
-            return self.TOOLS_CHECKER_RETURNED_NO_ISSUES
-
-        # Also store the 'reliability' index for this checker
-        issues.append({'reliability': item['reliability']})
-
-        if self._dump_results:
-            self._build_autotest_result(test_file_path_json, json.dumps(issues))
-        else:
-            if not os.path.exists(test_file_path_json):
-                # Result file for test not found maybe regenerate it?
-                checker_error['checker-error'] = self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND
-                checkers_results.append(checker_error)
-                return self.TOOLS_CHECKER_RESULT_FILE_NOT_FOUND
-
-            # Read the pre-determined issues
-            baseline_issues = self._get_autotest_stored_issues(test_file_path_json)
-
-            # Compare the two lists
-            if issues != baseline_issues:
-                checker_error['checker-error'] = self.TOOLS_CHECKER_DIFF_FAILED
-                checker_error['info1'] = baseline_issues
-                checker_error['info2'] = issues
-                checker_error['info3'] = clang_output
-                checkers_results.append(checker_error)
-                return self.TOOLS_CHECKER_DIFF_FAILED
-
-        return self.TOOLS_SUCCESS
-
-    def _build_autotest_result(self, file, issues):
-        with open(file, 'w') as f:
-            f.write(issues)
-
-    def _get_autotest_stored_issues(self, file):
-        with open(file) as f:
-            return json.load(f)
-
-    def _parse_issues(self, clang_output):
-        '''
-        Parse clang-tidy output into structured issues
-        '''
-
-        # Limit clang output parsing to 'Enabled checks:'
-        end = re.search(r'^Enabled checks:\n', clang_output, re.MULTILINE)
-        if end is not None:
-            clang_output = clang_output[:end.start()-1]
-
-        platform, _ = self.platform
-        # Starting with clang 8, for the diagnostic messages we have multiple `LF CR`
-        # in order to be compatiable with msvc compiler format, and for this
-        # we are not interested to match the end of line.
-        regex_string = r'(.+):(\d+):(\d+): (warning|error): ([^\[\]\n]+)(?: \[([\.\w-]+)\])'
-
-        # For non 'win' based platforms we also need the 'end of the line' regex
-        if platform not in ('win64', 'win32'):
-            regex_string += '?$'
-
-        regex_header = re.compile(regex_string, re.MULTILINE)
-
-        # Sort headers by positions
-        headers = sorted(
-            regex_header.finditer(clang_output),
-            key=lambda h: h.start()
-        )
-        issues = []
-        for _, header in enumerate(headers):
-            header_group = header.groups()
-            element = [header_group[3], header_group[4], header_group[5]]
-            issues.append(element)
-        return issues
-
-    def _get_checks(self):
-        checks = '-*'
-        try:
-            config = self._clang_tidy_config
-            for item in config['clang_checkers']:
-                if item.get('publish', True):
-                    checks += ',' + item['name']
-        except Exception:
-            print('Looks like config.yaml is not valid, so we are unable to '
-                  'determine default checkers, using \'-checks=-*,mozilla-*\'')
-            checks += ',mozilla-*'
-        finally:
-            return checks
-
-    def _get_checks_config(self):
-        config_list = []
-        checker_config = {}
-        try:
-            config = self._clang_tidy_config
-            for checker in config['clang_checkers']:
-                if checker.get('publish', True) and 'config' in checker:
-                    for checker_option in checker['config']:
-                        # Verify if the format of the Option is correct,
-                        # possibilities are:
-                        # 1. CheckerName.Option
-                        # 2. Option -> that will become CheckerName.Option
-                        if not checker_option['key'].startswith(checker['name']):
-                            checker_option['key'] = "{}.{}".format(
-                                checker['name'], checker_option['key'])
-                    config_list += checker['config']
-            checker_config['CheckOptions'] = config_list
-        except Exception:
-            print('Looks like config.yaml is not valid, so we are unable to '
-                  'determine configuration for checkers, so using default')
-            checker_config = None
-        finally:
-            return checker_config
-
-    def _get_config_environment(self):
-        ran_configure = False
-        config = None
-        builder = Build(self._mach_context)
-
-        try:
-            config = self.config_environment
-        except Exception:
-            print('Looks like configure has not run yet, running it now...')
-            rc = builder.configure()
-            if rc != 0:
-                return (rc, config, ran_configure)
-            ran_configure = True
-            try:
-                config = self.config_environment
-            except Exception:
-                pass
-
-        return (0, config, ran_configure)
-
-    def _build_compile_db(self, verbose=False):
-        self._compile_db = mozpath.join(self.topobjdir, 'compile_commands.json')
-        if os.path.exists(self._compile_db):
-            return 0
-
-        rc, config, ran_configure = self._get_config_environment()
-        if rc != 0:
-            return rc
-
-        if ran_configure:
-            # Configure may have created the compilation database if the
-            # mozconfig enables building the CompileDB backend by default,
-            # So we recurse to see if the file exists once again.
-            return self._build_compile_db(verbose=verbose)
-
-        if config:
-            print('Looks like a clang compilation database has not been '
-                  'created yet, creating it now...')
-            builder = Build(self._mach_context)
-            rc = builder.build_backend(['CompileDB'], verbose=verbose)
-            if rc != 0:
-                return rc
-            assert os.path.exists(self._compile_db)
-            return 0
-
-    def _build_export(self, jobs, verbose=False):
-        def on_line(line):
-            self.log(logging.INFO, 'build_output', {'line': line}, '{line}')
-
-        builder = Build(self._mach_context)
-        # First install what we can through install manifests.
-        rc = builder._run_make(directory=self.topobjdir, target='pre-export',
-                               line_handler=None, silent=not verbose)
-        if rc != 0:
-            return rc
-
-        # Then build the rest of the build dependencies by running the full
-        # export target, because we can't do anything better.
-        return builder._run_make(directory=self.topobjdir, target='export',
-                                 line_handler=None, silent=not verbose,
-                                 num_jobs=jobs)
-
-    def _set_clang_tools_paths(self):
-        rc, config, _ = self._get_config_environment()
-
-        if rc != 0:
-            return rc
-
-        self._clang_tools_path = mozpath.join(self._mach_context.state_dir, "clang-tools")
-        self._clang_tidy_path = mozpath.join(self._clang_tools_path, "clang-tidy", "bin",
-                                             "clang-tidy" + config.substs.get('BIN_SUFFIX', ''))
-        self._clang_format_path = mozpath.join(
-            self._clang_tools_path, "clang-tidy", "bin",
-            "clang-format" + config.substs.get('BIN_SUFFIX', ''))
-        self._clang_apply_replacements = mozpath.join(
-            self._clang_tools_path, "clang-tidy", "bin",
-            "clang-apply-replacements" + config.substs.get('BIN_SUFFIX', ''))
-        self._run_clang_tidy_path = mozpath.join(self._clang_tools_path, "clang-tidy",
-                                                 "share", "clang", "run-clang-tidy.py")
-        self._clang_format_diff = mozpath.join(self._clang_tools_path, "clang-tidy",
-                                               "share", "clang", "clang-format-diff.py")
-        return 0
-
-    def _do_clang_tools_exist(self):
-        return os.path.exists(self._clang_tidy_path) and \
-               os.path.exists(self._clang_format_path) and \
-               os.path.exists(self._clang_apply_replacements) and \
-               os.path.exists(self._run_clang_tidy_path)
-
-    def _get_clang_tools(self, force=False, skip_cache=False,
-                         source=None, download_if_needed=True,
-                         verbose=False):
-
-        rc = self._set_clang_tools_paths()
-
-        if rc != 0:
-            return rc
-
-        if self._do_clang_tools_exist() and not force:
-            return 0
-
-        if os.path.isdir(self._clang_tools_path) and download_if_needed:
-            # The directory exists, perhaps it's corrupted?  Delete it
-            # and start from scratch.
-            shutil.rmtree(self._clang_tools_path)
-            return self._get_clang_tools(force=force, skip_cache=skip_cache,
-                                         source=source, verbose=verbose,
-                                         download_if_needed=download_if_needed)
-
-        # Create base directory where we store clang binary
-        os.mkdir(self._clang_tools_path)
-
-        if source:
-            return self._get_clang_tools_from_source(source)
-
-        self._artifact_manager = PackageFrontend(self._mach_context)
-
-        if not download_if_needed:
-            return 0
-
-        job, _ = self.platform
-
-        if job is None:
-            raise Exception('The current platform isn\'t supported. '
-                            'Currently only the following platforms are '
-                            'supported: win32/win64, linux64 and macosx64.')
-
-        job += '-clang-tidy'
-
-        # We want to unpack data in the clang-tidy mozbuild folder
-        currentWorkingDir = os.getcwd()
-        os.chdir(self._clang_tools_path)
-        rc = self._artifact_manager.artifact_toolchain(verbose=verbose,
-                                                       skip_cache=skip_cache,
-                                                       from_build=[job],
-                                                       no_unpack=False,
-                                                       retry=0)
-        # Change back the cwd
-        os.chdir(currentWorkingDir)
-
-        return rc
-
-    def _get_clang_tools_from_source(self, filename):
-        from mozbuild.action.tooltool import unpack_file
-        clang_tidy_path = mozpath.join(self._mach_context.state_dir,
-                                       "clang-tools")
-
-        currentWorkingDir = os.getcwd()
-        os.chdir(clang_tidy_path)
-
-        unpack_file(filename)
-
-        # Change back the cwd
-        os.chdir(currentWorkingDir)
-
-        clang_path = mozpath.join(clang_tidy_path, 'clang')
-
-        if not os.path.isdir(clang_path):
-            raise Exception('Extracted the archive but didn\'t find '
-                            'the expected output')
-
-        assert os.path.exists(self._clang_tidy_path)
-        assert os.path.exists(self._clang_format_path)
-        assert os.path.exists(self._clang_apply_replacements)
-        assert os.path.exists(self._run_clang_tidy_path)
-        return 0
-
-    def _get_clang_format_diff_command(self, commit):
-        if self.repository.name == 'hg':
-            args = ["hg", "diff", "-U0"]
-            if commit:
-                args += ["-c", commit]
-            else:
-                args += ["-r", ".^"]
-            for dot_extension in self._format_include_extensions:
-                args += ['--include', 'glob:**{0}'.format(dot_extension)]
-            args += ['--exclude', 'listfile:{0}'.format(self._format_ignore_file)]
-        else:
-            commit_range = "HEAD"  # All uncommitted changes.
-            if commit:
-                commit_range = commit if ".." in commit else "{}~..{}".format(commit, commit)
-            args = ["git", "diff", "--no-color", "-U0", commit_range, "--"]
-            for dot_extension in self._format_include_extensions:
-                args += ['*{0}'.format(dot_extension)]
-            # git-diff doesn't support an 'exclude-from-files' param, but
-            # allow to add individual exclude pattern since v1.9, see
-            # https://git-scm.com/docs/gitglossary#gitglossary-aiddefpathspecapathspec
-            with open(self._format_ignore_file, 'rb') as exclude_pattern_file:
-                for pattern in exclude_pattern_file.readlines():
-                    pattern = pattern.rstrip()
-                    pattern = pattern.replace('.*', '**')
-                    if not pattern or pattern.startswith('#'):
-                        continue  # empty or comment
-                    magics = ['exclude']
-                    if pattern.startswith('^'):
-                        magics += ['top']
-                        pattern = pattern[1:]
-                    args += [':({0}){1}'.format(','.join(magics), pattern)]
-        return args
-
-    def _get_infer(self, force=False, skip_cache=False, download_if_needed=True,
-                   verbose=False, intree_tool=False):
-        rc, config, _ = self._get_config_environment()
-        if rc != 0:
-            return rc
-        infer_path = self.topsrcdir if intree_tool else \
-            mozpath.join(self._mach_context.state_dir, 'infer')
-        self._infer_path = mozpath.join(infer_path, 'infer', 'bin', 'infer' +
-                                        config.substs.get('BIN_SUFFIX', ''))
-        if intree_tool:
-            return not os.path.exists(self._infer_path)
-        if os.path.exists(self._infer_path) and not force:
-            return 0
-
-        if os.path.isdir(infer_path) and download_if_needed:
-            # The directory exists, perhaps it's corrupted?  Delete it
-            # and start from scratch.
-            shutil.rmtree(infer_path)
-            return self._get_infer(force=force, skip_cache=skip_cache,
-                                   verbose=verbose,
-                                   download_if_needed=download_if_needed)
-        os.mkdir(infer_path)
-        self._artifact_manager = PackageFrontend(self._mach_context)
-        if not download_if_needed:
-            return 0
-        job, _ = self.platform
-        if job != 'linux64':
-            return -1
-        else:
-            job += '-infer'
-        # We want to unpack data in the infer mozbuild folder
-        currentWorkingDir = os.getcwd()
-        os.chdir(infer_path)
-        rc = self._artifact_manager.artifact_toolchain(verbose=verbose,
-                                                       skip_cache=skip_cache,
-                                                       from_build=[job],
-                                                       no_unpack=False,
-                                                       retry=0)
-        # Change back the cwd
-        os.chdir(currentWorkingDir)
-        return rc
-
-    def _run_clang_format_diff(self, clang_format_diff, clang_format, commit, output_file):
-        # Run clang-format on the diff
-        # Note that this will potentially miss a lot things
-        from subprocess import Popen, PIPE, check_output, CalledProcessError
-
-        diff_process = Popen(self._get_clang_format_diff_command(commit), stdout=PIPE)
-        args = [sys.executable, clang_format_diff, "-p1", "-binary=%s" % clang_format]
-
-        if not output_file:
-            args.append("-i")
-        try:
-            output = check_output(args, stdin=diff_process.stdout)
-            if output_file:
-                # We want to print the diffs
-                print(output, file=output_file)
-
-            return 0
-        except CalledProcessError as e:
-            # Something wrong happend
-            print("clang-format: An error occured while running clang-format-diff.")
-            return e.returncode
-
-    def _is_ignored_path(self, ignored_dir_re, f):
-        # Remove upto topsrcdir in pathname and match
-        if f.startswith(self.topsrcdir + '/'):
-            match_f = f[len(self.topsrcdir + '/'):]
-        else:
-            match_f = f
-        return re.match(ignored_dir_re, match_f)
-
-    def _generate_path_list(self, paths, verbose=True):
-        path_to_third_party = os.path.joi