Merge inbound to mozilla-central. a=merge
authorCsoregi Natalia <ncsoregi@mozilla.com>
Sat, 28 Apr 2018 00:53:15 +0300
changeset 416055 8b2c1fc3d6c348f053fe33a478fa3b1ddb5eb8a6
parent 416033 2a66df1058cd34a1d48e1e1e3bcd2db36714ed51 (current diff)
parent 416054 f846639066aa8f7e5aff7fa363a450885fc887b4 (diff)
child 416110 97320da8d8e5b1fb2c3c9ff469f10ed79e15eb0e
child 416147 5037bf398c93f85b65b02b20f09a32bc347cda41
push id33915
push userncsoregi@mozilla.com
push dateFri, 27 Apr 2018 21:53:44 +0000
treeherdermozilla-central@8b2c1fc3d6c3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone61.0a1
first release with
nightly linux32
8b2c1fc3d6c3 / 61.0a1 / 20180427220103 / files
nightly linux64
8b2c1fc3d6c3 / 61.0a1 / 20180427220103 / files
nightly mac
8b2c1fc3d6c3 / 61.0a1 / 20180427220103 / files
nightly win32
8b2c1fc3d6c3 / 61.0a1 / 20180427220103 / files
nightly win64
8b2c1fc3d6c3 / 61.0a1 / 20180427220103 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge inbound to mozilla-central. a=merge
toolkit/components/payments/.eslintrc.js
toolkit/components/payments/content/paymentDialogFrameScript.js
toolkit/components/payments/content/paymentDialogWrapper.css
toolkit/components/payments/content/paymentDialogWrapper.js
toolkit/components/payments/content/paymentDialogWrapper.xhtml
toolkit/components/payments/docs/index.rst
toolkit/components/payments/jar.mn
toolkit/components/payments/moz.build
toolkit/components/payments/paymentUIService.js
toolkit/components/payments/payments.manifest
toolkit/components/payments/res/PaymentsStore.js
toolkit/components/payments/res/components/address-option.css
toolkit/components/payments/res/components/address-option.js
toolkit/components/payments/res/components/basic-card-option.css
toolkit/components/payments/res/components/basic-card-option.js
toolkit/components/payments/res/components/currency-amount.js
toolkit/components/payments/res/components/labelled-checkbox.js
toolkit/components/payments/res/components/payment-details-item.css
toolkit/components/payments/res/components/payment-details-item.js
toolkit/components/payments/res/components/rich-option.js
toolkit/components/payments/res/components/rich-select.css
toolkit/components/payments/res/components/rich-select.js
toolkit/components/payments/res/components/shipping-option.css
toolkit/components/payments/res/components/shipping-option.js
toolkit/components/payments/res/containers/address-picker.js
toolkit/components/payments/res/containers/basic-card-form.js
toolkit/components/payments/res/containers/order-details.css
toolkit/components/payments/res/containers/order-details.js
toolkit/components/payments/res/containers/payment-dialog.js
toolkit/components/payments/res/containers/payment-method-picker.js
toolkit/components/payments/res/containers/shipping-option-picker.js
toolkit/components/payments/res/debugging.css
toolkit/components/payments/res/debugging.html
toolkit/components/payments/res/debugging.js
toolkit/components/payments/res/mixins/ObservedPropertiesMixin.js
toolkit/components/payments/res/mixins/PaymentStateSubscriberMixin.js
toolkit/components/payments/res/paymentRequest.css
toolkit/components/payments/res/paymentRequest.js
toolkit/components/payments/res/paymentRequest.xhtml
toolkit/components/payments/res/unprivileged-fallbacks.js
toolkit/components/payments/res/vendor/custom-elements.min.js
toolkit/components/payments/res/vendor/custom-elements.min.js.map
toolkit/components/payments/server.py
toolkit/components/payments/test/PaymentTestUtils.jsm
toolkit/components/payments/test/browser/.eslintrc.js
toolkit/components/payments/test/browser/blank_page.html
toolkit/components/payments/test/browser/browser.ini
toolkit/components/payments/test/browser/browser_card_edit.js
toolkit/components/payments/test/browser/browser_change_shipping.js
toolkit/components/payments/test/browser/browser_host_name.js
toolkit/components/payments/test/browser/browser_profile_storage.js
toolkit/components/payments/test/browser/browser_request_serialization.js
toolkit/components/payments/test/browser/browser_request_shipping.js
toolkit/components/payments/test/browser/browser_request_summary.js
toolkit/components/payments/test/browser/browser_shippingaddresschange_error.js
toolkit/components/payments/test/browser/browser_show_dialog.js
toolkit/components/payments/test/browser/browser_total.js
toolkit/components/payments/test/browser/head.js
toolkit/components/payments/test/mochitest/.eslintrc.js
toolkit/components/payments/test/mochitest/formautofill/mochitest.ini
toolkit/components/payments/test/mochitest/formautofill/test_editCreditCard.html
toolkit/components/payments/test/mochitest/mochitest.ini
toolkit/components/payments/test/mochitest/payments_common.js
toolkit/components/payments/test/mochitest/test_ObservedPropertiesMixin.html
toolkit/components/payments/test/mochitest/test_PaymentStateSubscriberMixin.html
toolkit/components/payments/test/mochitest/test_PaymentsStore.html
toolkit/components/payments/test/mochitest/test_address_picker.html
toolkit/components/payments/test/mochitest/test_basic_card_form.html
toolkit/components/payments/test/mochitest/test_currency_amount.html
toolkit/components/payments/test/mochitest/test_labelled_checkbox.html
toolkit/components/payments/test/mochitest/test_order_details.html
toolkit/components/payments/test/mochitest/test_payer_address_picker.html
toolkit/components/payments/test/mochitest/test_payment_details_item.html
toolkit/components/payments/test/mochitest/test_payment_dialog.html
toolkit/components/payments/test/mochitest/test_payment_method_picker.html
toolkit/components/payments/test/mochitest/test_rich_select.html
toolkit/components/payments/test/mochitest/test_shipping_option_picker.html
toolkit/components/payments/test/unit/.eslintrc.js
toolkit/components/payments/test/unit/head.js
toolkit/components/payments/test/unit/test_response_creation.js
toolkit/components/payments/test/unit/xpcshell.ini
--- a/.eslintignore
+++ b/.eslintignore
@@ -382,21 +382,21 @@ toolkit/components/help/**
 
 # Intentionally invalid JS
 toolkit/components/workerloader/tests/moduleF-syntax-error.js
 
 # Tests old non-star function generators
 toolkit/modules/tests/xpcshell/test_task.js
 
 # External code:
+browser/components/payments/res/vendor/*
 toolkit/components/microformats/test/**
 toolkit/components/microformats/microformat-shiv.js
 toolkit/components/reader/Readability.js
 toolkit/components/reader/JSDOMParser.js
-toolkit/components/payments/res/vendor/*
 
 # Uses preprocessing
 toolkit/content/widgets/wizard.xml
 toolkit/modules/AppConstants.jsm
 toolkit/mozapps/update/tests/data/xpcshellConstantsPP.js
 
 # Third party
 toolkit/modules/third_party/**
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -781,17 +781,16 @@ dependencies = [
 
 [[package]]
 name = "geckoservo"
 version = "0.0.1"
 dependencies = [
  "atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "cssparser 0.23.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "cstr 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "env_logger 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "malloc_size_of 0.0.1",
  "nsstring 0.1.0",
  "parking_lot 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "selectors 0.19.0",
  "servo_arc 0.1.1",
  "smallvec 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -823,18 +822,19 @@ version = "0.1.0"
 dependencies = [
  "audioipc-client 0.4.0",
  "audioipc-server 0.2.3",
  "cose-c 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "cubeb-pulse 0.2.0",
  "cubeb-sys 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "encoding_c 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "encoding_glue 0.1.0",
+ "env_logger 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "geckoservo 0.0.1",
- "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "mozurl 0.0.1",
  "mp4parse_capi 0.10.1",
  "netwerk_helper 0.0.1",
  "nserror 0.1.0",
  "nsstring 0.1.0",
  "prefs_parser 0.0.1",
  "rsdparsa_capi 0.1.0",
  "u2fhid 0.1.0",
--- a/browser/base/content/test/sanitize/browser_sanitizeDialog.js
+++ b/browser/base/content/test/sanitize/browser_sanitizeDialog.js
@@ -586,17 +586,17 @@ add_task(async function test_offline_cac
     // Offline cache visit happens synchronously, since it's forwarded to the old code
     is(size, 0, "offline application cache entries evicted");
   };
 
   var cacheListener = {
     onCacheEntryCheck() { return Ci.nsICacheEntryOpenCallback.ENTRY_WANTED; },
     onCacheEntryAvailable(entry, isnew, unused, status) {
       is(status, Cr.NS_OK);
-      var stream = entry.openOutputStream(0);
+      var stream = entry.openOutputStream(0, -1);
       var content = "content";
       stream.write(content, content.length);
       stream.close();
       entry.close();
       wh.open();
     }
   };
 
--- a/browser/base/content/test/static/browser_parsable_script.js
+++ b/browser/base/content/test/static/browser_parsable_script.js
@@ -5,19 +5,19 @@
  * detect newly occurring issues in shipping JS. It is a list of regexes
  * matching files which have errors:
  */
 const kWhitelist = new Set([
   /browser\/content\/browser\/places\/controller.js$/,
 ]);
 
 const kESModuleList = new Set([
-  /toolkit\/res\/payments\/(components|containers|mixins)\/.*\.js$/,
-  /toolkit\/res\/payments\/paymentRequest\.js$/,
-  /toolkit\/res\/payments\/PaymentsStore\.js$/,
+  /browser\/res\/payments\/(components|containers|mixins)\/.*\.js$/,
+  /browser\/res\/payments\/paymentRequest\.js$/,
+  /browser\/res\/payments\/PaymentsStore\.js$/,
 ]);
 
 // Normally we would use reflect.jsm to get Reflect.parse. However, if
 // we do that, then all the AST data is allocated in reflect.jsm's
 // zone. That exposes a bug in our GC. The GC collects reflect.jsm's
 // zone but not the zone in which our test code lives (since no new
 // data is being allocated in it). The cross-compartment wrappers in
 // our zone that point to the AST data never get collected, and so the
--- a/browser/components/moz.build
+++ b/browser/components/moz.build
@@ -54,16 +54,19 @@ DIRS += [
     'shell',
     'syncedtabs',
     'uitour',
     'translation',
 ]
 
 DIRS += ['build']
 
+if CONFIG['NIGHTLY_BUILD']:
+    DIRS += ['payments']
+
 XPIDL_SOURCES += [
     'nsIBrowserHandler.idl',
 ]
 
 XPIDL_MODULE = 'browsercompsbase'
 
 EXTRA_PP_COMPONENTS += [
     'BrowserComponents.manifest',
rename from toolkit/components/payments/.eslintrc.js
rename to browser/components/payments/.eslintrc.js
rename from toolkit/components/payments/content/paymentDialogFrameScript.js
rename to browser/components/payments/content/paymentDialogFrameScript.js
rename from toolkit/components/payments/content/paymentDialogWrapper.css
rename to browser/components/payments/content/paymentDialogWrapper.css
rename from toolkit/components/payments/content/paymentDialogWrapper.js
rename to browser/components/payments/content/paymentDialogWrapper.js
--- a/toolkit/components/payments/content/paymentDialogWrapper.js
+++ b/browser/components/payments/content/paymentDialogWrapper.js
@@ -16,27 +16,27 @@ ChromeUtils.import("resource://gre/modul
 ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm");
 
 ChromeUtils.defineModuleGetter(this, "MasterPassword",
                                "resource://formautofill/MasterPassword.jsm");
 ChromeUtils.defineModuleGetter(this, "PrivateBrowsingUtils",
                                "resource://gre/modules/PrivateBrowsingUtils.jsm");
 
 XPCOMUtils.defineLazyGetter(this, "formAutofillStorage", () => {
-  let formAutofillStorage;
+  let storage;
   try {
-    formAutofillStorage = ChromeUtils.import("resource://formautofill/FormAutofillStorage.jsm", {})
-                                .formAutofillStorage;
-    formAutofillStorage.initialize();
+    storage = ChromeUtils.import("resource://formautofill/FormAutofillStorage.jsm", {})
+                         .formAutofillStorage;
+    storage.initialize();
   } catch (ex) {
-    formAutofillStorage = null;
+    storage = null;
     Cu.reportError(ex);
   }
 
-  return formAutofillStorage;
+  return storage;
 });
 
 var paymentDialogWrapper = {
   componentsLoaded: new Map(),
   frame: null,
   mm: null,
   request: null,
 
rename from toolkit/components/payments/content/paymentDialogWrapper.xhtml
rename to browser/components/payments/content/paymentDialogWrapper.xhtml
rename from toolkit/components/payments/docs/index.rst
rename to browser/components/payments/docs/index.rst
--- a/toolkit/components/payments/docs/index.rst
+++ b/browser/components/payments/docs/index.rst
@@ -21,17 +21,17 @@ Web Payments `does not work without e10s
 Logging
 -------
 
 Set the pref ``dom.payments.loglevel`` to "Debug" to increase the verbosity of console messages.
 
 Unprivileged UI Development
 ---------------------------
 During development of the unprivileged custom elements, you can load the dialog from a
-local server without even requiring a build. Simply run `./mach python toolkit/components/payments/server.py`
+local server without even requiring a build. Simply run `./mach python browser/components/payments/server.py`
 then load `http://localhost:8000/paymentRequest.xhtml?debug=1` in the browser.
 Use the debugging console to load sample data.
 
 Debugging Console
 -----------------
 
 To open the debugging console in the dialog, use the keyboard shortcut
 **Ctrl-Alt-d (Ctrl-Option-d on macOS)**. While loading `paymentRequest.xhtml` directly in the
rename from toolkit/components/payments/jar.mn
rename to browser/components/payments/jar.mn
--- a/toolkit/components/payments/jar.mn
+++ b/browser/components/payments/jar.mn
@@ -1,13 +1,13 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
-toolkit.jar:
+browser.jar:
 %   content payments %content/payments/
     content/payments/paymentDialogFrameScript.js      (content/paymentDialogFrameScript.js)
     content/payments/paymentDialogWrapper.css         (content/paymentDialogWrapper.css)
     content/payments/paymentDialogWrapper.js          (content/paymentDialogWrapper.js)
     content/payments/paymentDialogWrapper.xhtml       (content/paymentDialogWrapper.xhtml)
 
 %   resource payments %res/payments/
     res/payments                                      (res/paymentRequest.*)
rename from toolkit/components/payments/moz.build
rename to browser/components/payments/moz.build
rename from toolkit/components/payments/paymentUIService.js
rename to browser/components/payments/paymentUIService.js
rename from toolkit/components/payments/payments.manifest
rename to browser/components/payments/payments.manifest
rename from toolkit/components/payments/res/PaymentsStore.js
rename to browser/components/payments/res/PaymentsStore.js
rename from toolkit/components/payments/res/components/address-option.css
rename to browser/components/payments/res/components/address-option.css
rename from toolkit/components/payments/res/components/address-option.js
rename to browser/components/payments/res/components/address-option.js
rename from toolkit/components/payments/res/components/basic-card-option.css
rename to browser/components/payments/res/components/basic-card-option.css
rename from toolkit/components/payments/res/components/basic-card-option.js
rename to browser/components/payments/res/components/basic-card-option.js
rename from toolkit/components/payments/res/components/currency-amount.js
rename to browser/components/payments/res/components/currency-amount.js
rename from toolkit/components/payments/res/components/labelled-checkbox.js
rename to browser/components/payments/res/components/labelled-checkbox.js
rename from toolkit/components/payments/res/components/payment-details-item.css
rename to browser/components/payments/res/components/payment-details-item.css
rename from toolkit/components/payments/res/components/payment-details-item.js
rename to browser/components/payments/res/components/payment-details-item.js
rename from toolkit/components/payments/res/components/rich-option.js
rename to browser/components/payments/res/components/rich-option.js
rename from toolkit/components/payments/res/components/rich-select.css
rename to browser/components/payments/res/components/rich-select.css
rename from toolkit/components/payments/res/components/rich-select.js
rename to browser/components/payments/res/components/rich-select.js
rename from toolkit/components/payments/res/components/shipping-option.css
rename to browser/components/payments/res/components/shipping-option.css
rename from toolkit/components/payments/res/components/shipping-option.js
rename to browser/components/payments/res/components/shipping-option.js
rename from toolkit/components/payments/res/containers/address-picker.js
rename to browser/components/payments/res/containers/address-picker.js
rename from toolkit/components/payments/res/containers/basic-card-form.js
rename to browser/components/payments/res/containers/basic-card-form.js
rename from toolkit/components/payments/res/containers/order-details.css
rename to browser/components/payments/res/containers/order-details.css
rename from toolkit/components/payments/res/containers/order-details.js
rename to browser/components/payments/res/containers/order-details.js
rename from toolkit/components/payments/res/containers/payment-dialog.js
rename to browser/components/payments/res/containers/payment-dialog.js
rename from toolkit/components/payments/res/containers/payment-method-picker.js
rename to browser/components/payments/res/containers/payment-method-picker.js
rename from toolkit/components/payments/res/containers/shipping-option-picker.js
rename to browser/components/payments/res/containers/shipping-option-picker.js
rename from toolkit/components/payments/res/debugging.css
rename to browser/components/payments/res/debugging.css
rename from toolkit/components/payments/res/debugging.html
rename to browser/components/payments/res/debugging.html
rename from toolkit/components/payments/res/debugging.js
rename to browser/components/payments/res/debugging.js
rename from toolkit/components/payments/res/mixins/ObservedPropertiesMixin.js
rename to browser/components/payments/res/mixins/ObservedPropertiesMixin.js
rename from toolkit/components/payments/res/mixins/PaymentStateSubscriberMixin.js
rename to browser/components/payments/res/mixins/PaymentStateSubscriberMixin.js
rename from toolkit/components/payments/res/paymentRequest.css
rename to browser/components/payments/res/paymentRequest.css
rename from toolkit/components/payments/res/paymentRequest.js
rename to browser/components/payments/res/paymentRequest.js
rename from toolkit/components/payments/res/paymentRequest.xhtml
rename to browser/components/payments/res/paymentRequest.xhtml
rename from toolkit/components/payments/res/unprivileged-fallbacks.js
rename to browser/components/payments/res/unprivileged-fallbacks.js
rename from toolkit/components/payments/res/vendor/custom-elements.min.js
rename to browser/components/payments/res/vendor/custom-elements.min.js
rename from toolkit/components/payments/res/vendor/custom-elements.min.js.map
rename to browser/components/payments/res/vendor/custom-elements.min.js.map
rename from toolkit/components/payments/server.py
rename to browser/components/payments/server.py
--- a/toolkit/components/payments/server.py
+++ b/browser/components/payments/server.py
@@ -4,15 +4,15 @@ from SimpleHTTPServer import SimpleHTTPR
 
 class RequestHandler(SimpleHTTPRequestHandler, object):
     def translate_path(self, path):
         # Map autofill paths to their own directory
         autofillPath = "/formautofill"
         if (path.startswith(autofillPath)):
             path = "browser/extensions/formautofill/content" + path[len(autofillPath):]
         else:
-            path = "toolkit/components/payments/res" + path
+            path = "browser/components/payments/res" + path
 
         return super(RequestHandler, self).translate_path(path)
 
 
 if __name__ == '__main__':
     BaseHTTPServer.test(RequestHandler, BaseHTTPServer.HTTPServer)
rename from toolkit/components/payments/test/PaymentTestUtils.jsm
rename to browser/components/payments/test/PaymentTestUtils.jsm
rename from toolkit/components/payments/test/browser/.eslintrc.js
rename to browser/components/payments/test/browser/.eslintrc.js
rename from toolkit/components/payments/test/browser/blank_page.html
rename to browser/components/payments/test/browser/blank_page.html
rename from toolkit/components/payments/test/browser/browser.ini
rename to browser/components/payments/test/browser/browser.ini
rename from toolkit/components/payments/test/browser/browser_card_edit.js
rename to browser/components/payments/test/browser/browser_card_edit.js
--- a/toolkit/components/payments/test/browser/browser_card_edit.js
+++ b/browser/components/payments/test/browser/browser_card_edit.js
@@ -1,8 +1,10 @@
+/* eslint-disable no-shadow */
+
 "use strict";
 
 add_task(async function test_add_link() {
   const args = {
     methodData: [PTU.MethodData.basicCard],
     details: PTU.Details.total60USD,
   };
   await spawnInDialogForMerchantTask(PTU.ContentTasks.createRequest, async function check() {
rename from toolkit/components/payments/test/browser/browser_change_shipping.js
rename to browser/components/payments/test/browser/browser_change_shipping.js
rename from toolkit/components/payments/test/browser/browser_host_name.js
rename to browser/components/payments/test/browser/browser_host_name.js
rename from toolkit/components/payments/test/browser/browser_profile_storage.js
rename to browser/components/payments/test/browser/browser_profile_storage.js
--- a/toolkit/components/payments/test/browser/browser_profile_storage.js
+++ b/browser/components/payments/test/browser/browser_profile_storage.js
@@ -1,8 +1,10 @@
+/* eslint-disable no-shadow */
+
 "use strict";
 
 const methodData = [PTU.MethodData.basicCard];
 const details = PTU.Details.total60USD;
 
 add_task(async function test_initial_state() {
   let onChanged = TestUtils.topicObserved("formautofill-storage-changed",
                                           (subject, data) => data == "add");
rename from toolkit/components/payments/test/browser/browser_request_serialization.js
rename to browser/components/payments/test/browser/browser_request_serialization.js
rename from toolkit/components/payments/test/browser/browser_request_shipping.js
rename to browser/components/payments/test/browser/browser_request_shipping.js
rename from toolkit/components/payments/test/browser/browser_request_summary.js
rename to browser/components/payments/test/browser/browser_request_summary.js
rename from toolkit/components/payments/test/browser/browser_shippingaddresschange_error.js
rename to browser/components/payments/test/browser/browser_shippingaddresschange_error.js
rename from toolkit/components/payments/test/browser/browser_show_dialog.js
rename to browser/components/payments/test/browser/browser_show_dialog.js
rename from toolkit/components/payments/test/browser/browser_total.js
rename to browser/components/payments/test/browser/browser_total.js
rename from toolkit/components/payments/test/browser/head.js
rename to browser/components/payments/test/browser/head.js
--- a/toolkit/components/payments/test/browser/head.js
+++ b/browser/components/payments/test/browser/head.js
@@ -3,17 +3,17 @@
 /* eslint
   "no-unused-vars": ["error", {
     vars: "local",
     args: "none",
   }],
 */
 
 
-const BLANK_PAGE_PATH = "/browser/toolkit/components/payments/test/browser/blank_page.html";
+const BLANK_PAGE_PATH = "/browser/browser/components/payments/test/browser/blank_page.html";
 const BLANK_PAGE_URL = "https://example.com" + BLANK_PAGE_PATH;
 
 const paymentSrv = Cc["@mozilla.org/dom/payments/payment-request-service;1"]
                      .getService(Ci.nsIPaymentRequestService);
 const paymentUISrv = Cc["@mozilla.org/dom/payments/payment-ui-service;1"]
                      .getService().wrappedJSObject;
 const {formAutofillStorage} = ChromeUtils.import(
   "resource://formautofill/FormAutofillStorage.jsm", {});
rename from toolkit/components/payments/test/mochitest/.eslintrc.js
rename to browser/components/payments/test/mochitest/.eslintrc.js
rename from toolkit/components/payments/test/mochitest/formautofill/mochitest.ini
rename to browser/components/payments/test/mochitest/formautofill/mochitest.ini
rename from toolkit/components/payments/test/mochitest/formautofill/test_editCreditCard.html
rename to browser/components/payments/test/mochitest/formautofill/test_editCreditCard.html
rename from toolkit/components/payments/test/mochitest/mochitest.ini
rename to browser/components/payments/test/mochitest/mochitest.ini
rename from toolkit/components/payments/test/mochitest/payments_common.js
rename to browser/components/payments/test/mochitest/payments_common.js
rename from toolkit/components/payments/test/mochitest/test_ObservedPropertiesMixin.html
rename to browser/components/payments/test/mochitest/test_ObservedPropertiesMixin.html
rename from toolkit/components/payments/test/mochitest/test_PaymentStateSubscriberMixin.html
rename to browser/components/payments/test/mochitest/test_PaymentStateSubscriberMixin.html
rename from toolkit/components/payments/test/mochitest/test_PaymentsStore.html
rename to browser/components/payments/test/mochitest/test_PaymentsStore.html
rename from toolkit/components/payments/test/mochitest/test_address_picker.html
rename to browser/components/payments/test/mochitest/test_address_picker.html
rename from toolkit/components/payments/test/mochitest/test_basic_card_form.html
rename to browser/components/payments/test/mochitest/test_basic_card_form.html
rename from toolkit/components/payments/test/mochitest/test_currency_amount.html
rename to browser/components/payments/test/mochitest/test_currency_amount.html
rename from toolkit/components/payments/test/mochitest/test_labelled_checkbox.html
rename to browser/components/payments/test/mochitest/test_labelled_checkbox.html
rename from toolkit/components/payments/test/mochitest/test_order_details.html
rename to browser/components/payments/test/mochitest/test_order_details.html
rename from toolkit/components/payments/test/mochitest/test_payer_address_picker.html
rename to browser/components/payments/test/mochitest/test_payer_address_picker.html
rename from toolkit/components/payments/test/mochitest/test_payment_details_item.html
rename to browser/components/payments/test/mochitest/test_payment_details_item.html
rename from toolkit/components/payments/test/mochitest/test_payment_dialog.html
rename to browser/components/payments/test/mochitest/test_payment_dialog.html
rename from toolkit/components/payments/test/mochitest/test_payment_method_picker.html
rename to browser/components/payments/test/mochitest/test_payment_method_picker.html
rename from toolkit/components/payments/test/mochitest/test_rich_select.html
rename to browser/components/payments/test/mochitest/test_rich_select.html
rename from toolkit/components/payments/test/mochitest/test_shipping_option_picker.html
rename to browser/components/payments/test/mochitest/test_shipping_option_picker.html
rename from toolkit/components/payments/test/unit/.eslintrc.js
rename to browser/components/payments/test/unit/.eslintrc.js
rename from toolkit/components/payments/test/unit/head.js
rename to browser/components/payments/test/unit/head.js
rename from toolkit/components/payments/test/unit/test_response_creation.js
rename to browser/components/payments/test/unit/test_response_creation.js
rename from toolkit/components/payments/test/unit/xpcshell.ini
rename to browser/components/payments/test/unit/xpcshell.ini
--- a/toolkit/components/payments/test/unit/xpcshell.ini
+++ b/browser/components/payments/test/unit/xpcshell.ini
@@ -1,4 +1,5 @@
 [DEFAULT]
+firefox-appdir = browser
 head = head.js
 
 [test_response_creation.js]
--- a/browser/components/places/tests/unit/test_clearHistory_shutdown.js
+++ b/browser/components/places/tests/unit/test_clearHistory_shutdown.js
@@ -132,17 +132,17 @@ function storeCache(aURL, aContent) {
       onCacheEntryCheck(entry, appcache) {
         return Ci.nsICacheEntryOpenCallback.ENTRY_WANTED;
       },
 
       onCacheEntryAvailable(entry, isnew, appcache, status) {
         Assert.equal(status, Cr.NS_OK);
 
         entry.setMetaDataElement("servertype", "0");
-        var os = entry.openOutputStream(0);
+        var os = entry.openOutputStream(0, -1);
 
         var written = os.write(aContent, aContent.length);
         if (written != aContent.length) {
           do_throw("os.write has not written all data!\n" +
                    "  Expected: " + written + "\n" +
                    "  Actual: " + aContent.length + "\n");
         }
         os.close();
--- a/browser/extensions/formautofill/test/browser/browser.ini
+++ b/browser/extensions/formautofill/test/browser/browser.ini
@@ -19,8 +19,9 @@ skip-if = (os == "linux") || (os == "mac
 [browser_editCreditCardDialog.js]
 [browser_first_time_use_doorhanger.js]
 [browser_insecure_form.js]
 [browser_manageAddressesDialog.js]
 [browser_manageCreditCardsDialog.js]
 [browser_privacyPreferences.js]
 [browser_submission_in_private_mode.js]
 [browser_update_doorhanger.js]
+skip-if = (os == "linux") || (os == "mac" && debug) || (os == "win") # bug 1426981
\ No newline at end of file
--- a/browser/installer/allowed-dupes.mn
+++ b/browser/installer/allowed-dupes.mn
@@ -140,11 +140,11 @@ res/table-remove-row-hover.gif
 res/table-remove-row.gif
 res/multilocale.txt
 update.locale
 # Aurora branding
 browser/chrome/browser/content/branding/icon128.png
 browser/chrome/devtools/content/framework/dev-edition-promo/dev-edition-logo.png
 # Bug 1451016 - Nightly-only PaymentRequest & Form Autofill code sharing.
 browser/features/formautofill@mozilla.org/chrome/content/editCreditCard.xhtml
-chrome/toolkit/res/payments/formautofill/editCreditCard.xhtml
+browser/chrome/browser/res/payments/formautofill/editCreditCard.xhtml
 browser/features/formautofill@mozilla.org/chrome/content/autofillEditForms.js
-chrome/toolkit/res/payments/formautofill/autofillEditForms.js
+browser/chrome/browser/res/payments/formautofill/autofillEditForms.js
--- a/browser/installer/package-manifest.in
+++ b/browser/installer/package-manifest.in
@@ -201,18 +201,18 @@
 @RESPATH@/components/PageThumbsComponents.manifest
 @RESPATH@/components/crashmonitor.manifest
 @RESPATH@/components/nsCrashMonitor.js
 @RESPATH@/components/toolkitsearch.manifest
 @RESPATH@/components/nsSearchService.js
 @RESPATH@/components/nsSearchSuggestions.js
 @RESPATH@/components/nsSidebar.js
 #ifdef NIGHTLY_BUILD
-@RESPATH@/components/payments.manifest
-@RESPATH@/components/paymentUIService.js
+@RESPATH@/browser/components/payments.manifest
+@RESPATH@/browser/components/paymentUIService.js
 #endif
 @RESPATH@/components/passwordmgr.manifest
 @RESPATH@/components/nsLoginInfo.js
 @RESPATH@/components/nsLoginManager.js
 @RESPATH@/components/nsLoginManagerPrompter.js
 @RESPATH@/components/storage-json.js
 @RESPATH@/components/crypto-SDR.js
 @RESPATH@/components/TooltipTextProvider.js
--- a/devtools/server/actors/highlighters/css-grid.js
+++ b/devtools/server/actors/highlighters/css-grid.js
@@ -1377,17 +1377,16 @@ class CssGridHighlighter extends AutoRef
     let canvasY = Math.round(this._canvasPosition.y * devicePixelRatio);
 
     linePos = Math.round(linePos);
     startPos = Math.round(startPos);
     endPos = Math.round(endPos);
 
     this.ctx.save();
     this.ctx.setLineDash(GRID_LINES_PROPERTIES[lineType].lineDash);
-    this.ctx.beginPath();
     this.ctx.translate(offset - canvasX, offset - canvasY);
 
     let lineOptions = {
       matrix: this.currentMatrix
     };
 
     if (this.options.showInfiniteLines) {
       lineOptions.extendToBoundaries = [canvasX, canvasY, canvasX + CANVAS_SIZE,
--- a/devtools/server/actors/highlighters/utils/canvas.js
+++ b/devtools/server/actors/highlighters/utils/canvas.js
@@ -166,17 +166,16 @@ function drawLine(ctx, x1, y1, x2, y2, o
       y2 = options.extendToBoundaries[3];
       x2 = (p2[0] - p1[0]) * (y2 - p1[1]) / (p2[1] - p1[1]) + p1[0];
     }
   }
 
   ctx.beginPath();
   ctx.moveTo(Math.round(x1), Math.round(y1));
   ctx.lineTo(Math.round(x2), Math.round(y2));
-  ctx.closePath();
 }
 
 /**
  * Draws a rect to the context given and applies a transformation matrix if passed.
  * The coordinates are the start and end points of the rectangle's diagonal.
  *
  * @param  {CanvasRenderingContext2D} ctx
  *         The 2D canvas context.
--- a/dom/base/nsFocusManager.cpp
+++ b/dom/base/nsFocusManager.cpp
@@ -3255,34 +3255,113 @@ nsFocusManager::GetNextTabbableContentIn
     }
   }
 
   //
   // Iterate contents in scope
   //
   ScopedContentTraversal contentTraversal(aStartContent, aOwner);
   nsCOMPtr<nsIContent> iterContent;
+  nsIContent* firstNonChromeOnly = aStartContent->IsInNativeAnonymousSubtree() ?
+    aStartContent->FindFirstNonChromeOnlyAccessContent() : nullptr;
   while (1) {
     // Iterate tab index to find corresponding contents in scope
 
     while (1) {
       // Iterate remaining contents in scope to find next content to focus
 
       // Get next content
       aForward ? contentTraversal.Next() : contentTraversal.Prev();
       iterContent = contentTraversal.GetCurrent();
+
+      if (firstNonChromeOnly && firstNonChromeOnly == iterContent) {
+        // We just broke out from the native anonynous content, so move
+        // to the previous/next node of the native anonymous owner.
+        if (aForward) {
+          contentTraversal.Next();
+        } else {
+          contentTraversal.Prev();
+        }
+        iterContent = contentTraversal.GetCurrent();
+      }
       if (!iterContent) {
         // Reach the end
         break;
       }
 
-      // Get tab index of next content
+      // Get the tab index of the next element. For NAC we rely on frames.
+      //XXXsmaug we should probably use frames also for Shadow DOM and special
+      //         case only display:contents elements.
       int32_t tabIndex = 0;
-      iterContent->IsFocusable(&tabIndex);
+      if (iterContent->IsInNativeAnonymousSubtree() &&
+          iterContent->GetPrimaryFrame()) {
+        iterContent->GetPrimaryFrame()->IsFocusable(&tabIndex);
+      } else {
+        iterContent->IsFocusable(&tabIndex);
+      }
       if (tabIndex < 0 || !(aIgnoreTabIndex || tabIndex == aCurrentTabIndex)) {
+        // If the element has native anonymous content, we may need to
+        // focus some NAC element, even if the element itself isn't focusable.
+        // This happens for example with <input type="date">.
+        // So, try to find NAC and then traverse the frame tree to find elements
+        // to focus.
+        nsIFrame* possibleAnonOwnerFrame = iterContent->GetPrimaryFrame();
+        nsIAnonymousContentCreator* anonCreator =
+          do_QueryFrame(possibleAnonOwnerFrame);
+        if (anonCreator && !iterContent->IsInNativeAnonymousSubtree()) {
+          nsIFrame* frame = nullptr;
+          // Find the first or last frame in tree order so that
+          // we can scope frame traversing to NAC.
+          if (aForward) {
+            frame = possibleAnonOwnerFrame->PrincipalChildList().FirstChild();
+          } else {
+            frame = possibleAnonOwnerFrame->PrincipalChildList().LastChild();
+            nsIFrame* last = frame;
+            while (last) {
+              frame = last;
+              last = frame->PrincipalChildList().LastChild();
+            }
+          };
+
+          nsCOMPtr<nsIFrameEnumerator> frameTraversal;
+          nsresult rv = NS_NewFrameTraversal(getter_AddRefs(frameTraversal),
+                                             iterContent->OwnerDoc()->
+                                               GetShell()->GetPresContext(),
+                                             frame,
+                                             ePreOrder,
+                                             false, // aVisual
+                                             false, // aLockInScrollView
+                                             true, // aFollowOOFs
+                                             true,  // aSkipPopupChecks
+                                             false // aSkipShadow
+                                             );
+          if (NS_SUCCEEDED(rv)) {
+            nsIFrame* frame =
+              static_cast<nsIFrame*>(frameTraversal->CurrentItem());
+            while (frame) {
+              int32_t tabIndex;
+              frame->IsFocusable(&tabIndex, 0);
+              if (tabIndex >= 0 &&
+                  (aIgnoreTabIndex || aCurrentTabIndex == tabIndex)) {
+                return frame->GetContent();
+              }
+
+              if (aForward) {
+                frameTraversal->Next();
+              } else {
+                frameTraversal->Prev();
+              }
+              frame = static_cast<nsIFrame*>(frameTraversal->CurrentItem());
+              if (frame == possibleAnonOwnerFrame) {
+                break;
+              }
+            }
+          }
+        }
+
         continue;
       }
 
       if (!IsHostOrSlot(iterContent)) {
         // Found content to focus
         return iterContent;
       }
 
--- a/dom/base/test/file_bug1453693.html
+++ b/dom/base/test/file_bug1453693.html
@@ -26,16 +26,22 @@
         shadowAnchor.onfocus = focusLogger;
         shadowAnchor.textContent = "in shadow DOM";
         sr.appendChild(shadowAnchor);
         var shadowInput = document.createElement("input");
         shadowInput.onfocus = focusLogger;
         shadowInput.tabIndex = 1;
         sr.appendChild(shadowInput);
 
+        var shadowDate = document.createElement("input");
+        shadowDate.type = "date";
+        shadowDate.onfocus = focusLogger;
+        shadowDate.tabIndex = 1;
+        sr.appendChild(shadowDate);
+
         var input = document.createElement("input");
         input.onfocus = focusLogger;
         input.tabIndex = 1;
         document.body.appendChild(input);
 
         var input2 = document.createElement("input");
         input2.onfocus = focusLogger;
         document.body.appendChild(input2);
@@ -44,24 +50,36 @@
 
         synthesizeKey("KEY_Tab");
         opener.is(lastFocusTarget, input, "Should have focused input element. (3)");
         synthesizeKey("KEY_Tab");
         opener.is(lastFocusTarget, anchor, "Should have focused anchor element. (3)");
         synthesizeKey("KEY_Tab");
         opener.is(lastFocusTarget, shadowInput, "Should have focused input element in shadow DOM. (3)");
         synthesizeKey("KEY_Tab");
+        opener.is(lastFocusTarget, shadowDate, "Should have focused date element in shadow DOM. (3)");
+        synthesizeKey("KEY_Tab");
+        opener.is(lastFocusTarget, shadowDate, "Should have focused date element in shadow DOM. (3)");
+        synthesizeKey("KEY_Tab");
+        opener.is(lastFocusTarget, shadowDate, "Should have focused date element in shadow DOM. (3)");
+        synthesizeKey("KEY_Tab");
         opener.is(lastFocusTarget, shadowAnchor, "Should have focused anchor element in shadow DOM. (3)");
         synthesizeKey("KEY_Tab");
         opener.is(lastFocusTarget, input2, "Should have focused input[2] element. (3)");
 
         // Backwards
         synthesizeKey("KEY_Tab", {shiftKey: true});
         opener.is(lastFocusTarget, shadowAnchor, "Should have focused anchor element in shadow DOM. (4)");
         synthesizeKey("KEY_Tab", {shiftKey: true});
+        opener.is(lastFocusTarget, shadowDate, "Should have focused date element in shadow DOM. (4)");
+        synthesizeKey("KEY_Tab", {shiftKey: true});
+        opener.is(lastFocusTarget, shadowDate, "Should have focused date element in shadow DOM. (4)");
+        synthesizeKey("KEY_Tab", {shiftKey: true});
+        opener.is(lastFocusTarget, shadowDate, "Should have focused date element in shadow DOM. (4)");
+        synthesizeKey("KEY_Tab", {shiftKey: true});
         opener.is(lastFocusTarget, shadowInput, "Should have focused input element in shadow DOM. (4)");
         synthesizeKey("KEY_Tab", {shiftKey: true});
         opener.is(lastFocusTarget, anchor, "Should have focused anchor element. (4)");
         synthesizeKey("KEY_Tab", {shiftKey: true});
         opener.is(lastFocusTarget, input, "Should have focused input element. (4)");
 
         document.body.innerHTML = null;
       }
--- a/dom/script/ScriptLoader.cpp
+++ b/dom/script/ScriptLoader.cpp
@@ -2484,16 +2484,17 @@ ScriptLoader::EncodeRequestBytecode(JSCo
     return;
   }
 
   // Open the output stream to the cache entry alternate data storage. This
   // might fail if the stream is already open by another request, in which
   // case, we just ignore the current one.
   nsCOMPtr<nsIOutputStream> output;
   rv = aRequest->mCacheInfo->OpenAlternativeOutputStream(nsContentUtils::JSBytecodeMimeType(),
+                                                         aRequest->mScriptBytecode.length(),
                                                          getter_AddRefs(output));
   if (NS_FAILED(rv)) {
     LOG(("ScriptLoadRequest (%p): Cannot open bytecode cache (rv = %X, output = %p)",
          aRequest, unsigned(rv), output.get()));
     AccumulateCategorical(LABELS_DOM_SCRIPT_ENCODING_STATUS::OpenFailure);
     return;
   }
   MOZ_ASSERT(output);
--- a/gfx/2d/DrawTargetD2D1.cpp
+++ b/gfx/2d/DrawTargetD2D1.cpp
@@ -1015,21 +1015,23 @@ DrawTargetD2D1::CreateGradientStops(Grad
 
   D2D1_GRADIENT_STOP *stops = new D2D1_GRADIENT_STOP[aNumStops];
 
   for (uint32_t i = 0; i < aNumStops; i++) {
     stops[i].position = rawStops[i].offset;
     stops[i].color = D2DColor(rawStops[i].color);
   }
 
-  RefPtr<ID2D1GradientStopCollection> stopCollection;
+  RefPtr<ID2D1GradientStopCollection1> stopCollection;
 
   HRESULT hr =
     mDC->CreateGradientStopCollection(stops, aNumStops,
-                                      D2D1_GAMMA_2_2, D2DExtend(aExtendMode, Axis::BOTH),
+                                      D2D1_COLOR_SPACE_SRGB, D2D1_COLOR_SPACE_SRGB,
+                                      D2D1_BUFFER_PRECISION_8BPC_UNORM, D2DExtend(aExtendMode, Axis::BOTH),
+                                      D2D1_COLOR_INTERPOLATION_MODE_PREMULTIPLIED,
                                       getter_AddRefs(stopCollection));
   delete [] stops;
 
   if (FAILED(hr)) {
     gfxWarning() << *this << ": Failed to create GradientStopCollection. Code: " << hexa(hr);
     return nullptr;
   }
 
--- a/gfx/layers/wr/WebRenderCommandBuilder.cpp
+++ b/gfx/layers/wr/WebRenderCommandBuilder.cpp
@@ -10,16 +10,18 @@
 #include "mozilla/AutoRestore.h"
 #include "mozilla/gfx/2D.h"
 #include "mozilla/gfx/Types.h"
 #include "mozilla/layers/ImageClient.h"
 #include "mozilla/layers/WebRenderBridgeChild.h"
 #include "mozilla/layers/WebRenderLayerManager.h"
 #include "mozilla/layers/IpcResourceUpdateQueue.h"
 #include "mozilla/layers/ScrollingLayersHelper.h"
+#include "mozilla/layers/SharedSurfacesChild.h"
+#include "mozilla/layers/SourceSurfaceSharedData.h"
 #include "mozilla/layers/StackingContextHelper.h"
 #include "mozilla/layers/UpdateImageHelper.h"
 #include "mozilla/layers/WebRenderDrawEventRecorder.h"
 #include "UnitTransforms.h"
 #include "gfxEnv.h"
 #include "nsDisplayListInvalidation.h"
 #include "WebRenderCanvasRenderer.h"
 #include "LayersLogging.h"
@@ -169,16 +171,39 @@ DestroyBlobGroupDataProperty(nsTArray<Bl
 {
   for (BlobItemData* item : *aArray) {
     GP("DestroyBlobGroupDataProperty: %p-%d\n", item->mFrame, item->mDisplayItemKey);
     item->mFrame = nullptr;
   }
   delete aArray;
 }
 
+static void
+TakeExternalSurfaces(WebRenderDrawEventRecorder* aRecorder,
+                     std::vector<RefPtr<SourceSurface>> aExternalSurfaces,
+                     WebRenderLayerManager* aManager,
+                     wr::IpcResourceUpdateQueue& aResources)
+{
+  aRecorder->TakeExternalSurfaces(aExternalSurfaces);
+
+  for (auto& surface : aExternalSurfaces) {
+    if (surface->GetType() != SurfaceType::DATA_SHARED) {
+      MOZ_ASSERT_UNREACHABLE("External surface that is not a shared surface!");
+      continue;
+    }
+
+    // While we don't use the image key with the surface, because the blob image
+    // renderer doesn't have easy access to the resource set, we still want to
+    // ensure one is generated. That will ensure the surface remains alive until
+    // at least the last epoch which the blob image could be used in.
+    wr::ImageKey key;
+    auto sharedSurface = static_cast<SourceSurfaceSharedData*>(surface.get());
+    SharedSurfacesChild::Share(sharedSurface, aManager, aResources, key);
+  }
+}
 
 struct DIGroup;
 struct Grouper
 {
   explicit Grouper(ScrollingLayersHelper& aScrollingHelper)
    : mScrollingHelper(aScrollingHelper)
   {}
 
@@ -584,17 +609,17 @@ struct DIGroup
       return;
     }
 
     PaintItemRange(aGrouper, aStartItem, aEndItem, context, recorder);
 
     // XXX: set this correctly perhaps using aItem->GetOpaqueRegion(aDisplayListBuilder, &snapped).Contains(paintBounds);?
     bool isOpaque = false;
 
-    recorder->TakeExternalSurfaces(mExternalSurfaces);
+    TakeExternalSurfaces(recorder, mExternalSurfaces, aWrManager, aResources);
     bool hasItems = recorder->Finish();
     GP("%d Finish\n", hasItems);
     Range<uint8_t> bytes((uint8_t*)recorder->mOutputStream.mData, recorder->mOutputStream.mLength);
     if (!mKey) {
       if (!hasItems) // we don't want to send a new image that doesn't have any items in it
         return;
       wr::ImageKey key = aWrManager->WrBridge()->GetNextImageKey();
       GP("No previous key making new one %d\n", key.mHandle);
@@ -1669,17 +1694,17 @@ WebRenderCommandBuilder::GenerateFallbac
         gfx::Factory::CreateDrawTarget(gfx::BackendType::SKIA, gfx::IntSize(1, 1), format);
       RefPtr<gfx::DrawTarget> dt = gfx::Factory::CreateRecordingDrawTarget(recorder, dummyDt, dtSize.ToUnknownSize());
       if (!fallbackData->mBasicLayerManager) {
         fallbackData->mBasicLayerManager = new BasicLayerManager(BasicLayerManager::BLM_INACTIVE);
       }
       bool isInvalidated = PaintItemByDrawTarget(aItem, dt, paintRect, offset, aDisplayListBuilder,
                                                  fallbackData->mBasicLayerManager, scale, highlight);
       recorder->FlushItem(IntRect(0, 0, paintSize.width, paintSize.height));
-      recorder->TakeExternalSurfaces(fallbackData->mExternalSurfaces);
+      TakeExternalSurfaces(recorder, fallbackData->mExternalSurfaces, mManager, aResources);
       recorder->Finish();
 
       if (isInvalidated) {
         Range<uint8_t> bytes((uint8_t *)recorder->mOutputStream.mData, recorder->mOutputStream.mLength);
         wr::ImageKey key = mManager->WrBridge()->GetNextImageKey();
         wr::ImageDescriptor descriptor(dtSize.ToUnknownSize(), 0, dt->GetFormat(), isOpaque);
         if (!aResources.AddBlobImage(key, descriptor, bytes)) {
           return nullptr;
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -135,16 +135,24 @@ GetBuildConfiguration(JSContext* cx, uns
 #ifdef RELEASE_OR_BETA
     value = BooleanValue(true);
 #else
     value = BooleanValue(false);
 #endif
     if (!JS_SetProperty(cx, info, "release_or_beta", value))
         return false;
 
+#ifdef MOZ_CODE_COVERAGE
+    value = BooleanValue(true);
+#else
+    value = BooleanValue(false);
+#endif
+    if (!JS_SetProperty(cx, info, "coverage", value))
+        return false;
+
 #ifdef JS_HAS_CTYPES
     value = BooleanValue(true);
 #else
     value = BooleanValue(false);
 #endif
     if (!JS_SetProperty(cx, info, "has-ctypes", value))
         return false;
 
--- a/layout/painting/nsCSSRenderingGradients.cpp
+++ b/layout/painting/nsCSSRenderingGradients.cpp
@@ -418,18 +418,20 @@ static const float kAlphaIncrementPerGra
 static void
 ResolvePremultipliedAlpha(nsTArray<ColorStop>& aStops)
 {
   for (size_t x = 1; x < aStops.Length(); x++) {
     const ColorStop leftStop = aStops[x - 1];
     const ColorStop rightStop = aStops[x];
 
     // if the left and right stop have the same alpha value, we don't need
-    // to do anything
-    if (leftStop.mColor.a == rightStop.mColor.a) {
+    // to do anything. Hardstops should be instant, and also should never
+    // require dealing with interpolation.
+    if (leftStop.mColor.a == rightStop.mColor.a ||
+        leftStop.mPosition == rightStop.mPosition) {
       continue;
     }
 
     // Is the stop on the left 100% transparent? If so, have it adopt the color
     // of the right stop
     if (leftStop.mColor.a == 0) {
       aStops[x - 1].mColor = TransparentColor(rightStop.mColor);
       continue;
--- a/layout/reftests/css-blending/reftest.list
+++ b/layout/reftests/css-blending/reftest.list
@@ -83,17 +83,17 @@ pref(layout.css.background-blend-mode.en
 pref(layout.css.background-blend-mode.enabled,true) == background-blending-background-origin-border-box.html background-blending-background-origin-ref.html
 pref(layout.css.background-blend-mode.enabled,true) == background-blending-background-origin-content-box.html background-blending-background-origin-ref.html
 
 # Test plan 5.3.11 background-blend-mode for an element with background-attachement
 pref(layout.css.background-blend-mode.enabled,true) == background-blending-background-attachement-fixed.html background-blending-background-attachement-fixed-ref.html
 pref(layout.css.background-blend-mode.enabled,true) == background-blending-background-attachement-fixed-scroll.html background-blending-background-attachement-fixed-scroll-ref.html
 
 pref(layout.css.background-blend-mode.enabled,true) == background-blend-mode-body-image.html background-blend-mode-body-image-ref.html
-fuzzy-if(/^Windows\x20NT\x2010\.0/.test(http.oscpu),1,16408) fuzzy-if(Android,4,768) fuzzy-if(gtkWidget,1,132) fuzzy-if(skiaContent,1,800) pref(layout.css.background-blend-mode.enabled,true) == background-blend-mode-body-transparent-image.html background-blend-mode-body-transparent-image-ref.html
+fuzzy-if(/^Windows\x20NT\x2010\.0/.test(http.oscpu),1,16408) fuzzy-if(Android,4,768) fuzzy-if(gtkWidget,1,132) fuzzy-if(skiaContent,1,800) fuzzy-if(d2d,1,33208) pref(layout.css.background-blend-mode.enabled,true) == background-blend-mode-body-transparent-image.html background-blend-mode-body-transparent-image-ref.html
 
 pref(layout.css.background-blend-mode.enabled,true) == background-blending-moz-element.html background-blending-moz-element-ref.html
 
 fuzzy(1,40000) pref(layout.css.background-blend-mode.enabled,true) == mix-blend-mode-soft-light.html mix-blend-mode-soft-light-ref.html
 
 # Test plan 4.4.2 element with isolation:isolate creates an isolated group for blended children
 pref(layout.css.isolation.enabled,true) == blend-isolation.html blend-isolation-ref.html
 
--- a/netwerk/base/nsICacheInfoChannel.idl
+++ b/netwerk/base/nsICacheInfoChannel.idl
@@ -101,11 +101,19 @@ interface nsICacheInfoChannel : nsISuppo
 
   /**
    * Opens and returns an output stream that a consumer may use to save an
    * alternate representation of the data.
    * Must be called after the OnStopRequest that delivered the real data.
    * The consumer may choose to replace the saved alt representation.
    * Opening the output stream will fail if there are any open input streams
    * reading the already saved alt representation.
+   *
+   * @param type
+   *        type of the alternative data representation
+   * @param predictedSize
+   *        Predicted size of the data that will be written. It's used to decide
+   *        whether the resulting entry would exceed size limit, in which case
+   *        an error is thrown. If the size isn't known in advance, -1 should be
+   *        passed.
    */
-  nsIOutputStream openAlternativeOutputStream(in ACString type);
+  nsIOutputStream openAlternativeOutputStream(in ACString type, in long long predictedSize);
 };
--- a/netwerk/cache2/CacheEntry.cpp
+++ b/netwerk/cache2/CacheEntry.cpp
@@ -232,17 +232,16 @@ CacheEntry::CacheEntry(const nsACString&
 , mSecurityInfoLoaded(false)
 , mPreventCallbacks(false)
 , mHasData(false)
 , mPinned(aPin)
 , mPinningKnown(false)
 , mState(NOTLOADED)
 , mRegistration(NEVERREGISTERED)
 , mWriter(nullptr)
-, mPredictedDataSize(0)
 , mUseCount(0)
 , mCacheEntryId(GetNextId())
 {
   LOG(("CacheEntry::CacheEntry [this=%p]", this));
 
   mService = CacheStorageService::Self();
 
   CacheStorageService::Self()->RecordMemoryOnlyEntry(
@@ -1230,26 +1229,31 @@ nsresult CacheEntry::OpenInputStreamInte
     rv = OpenOutputStreamInternal(0, getter_AddRefs(mOutputStream));
     NS_ENSURE_SUCCESS(rv, rv);
   }
 
   stream.forget(_retval);
   return NS_OK;
 }
 
-nsresult CacheEntry::OpenOutputStream(int64_t offset, nsIOutputStream * *_retval)
+nsresult CacheEntry::OpenOutputStream(int64_t offset, int64_t predictedSize, nsIOutputStream * *_retval)
 {
   LOG(("CacheEntry::OpenOutputStream [this=%p]", this));
 
   nsresult rv;
 
   mozilla::MutexAutoLock lock(mLock);
 
   MOZ_ASSERT(mState > EMPTY);
 
+  if (mFile->EntryWouldExceedLimit(0, predictedSize, false)) {
+    LOG(("  entry would exceed size limit"));
+    return NS_ERROR_FILE_TOO_BIG;
+  }
+
   if (mOutputStream && !mIsDoomed) {
     LOG(("  giving phantom output stream"));
     mOutputStream.forget(_retval);
   }
   else {
     rv = OpenOutputStreamInternal(offset, _retval);
     if (NS_FAILED(rv)) return rv;
   }
@@ -1259,30 +1263,35 @@ nsresult CacheEntry::OpenOutputStream(in
     mState = READY;
 
   // Invoke any pending readers now.
   InvokeCallbacks();
 
   return NS_OK;
 }
 
-nsresult CacheEntry::OpenAlternativeOutputStream(const nsACString & type, nsIOutputStream * *_retval)
+nsresult CacheEntry::OpenAlternativeOutputStream(const nsACString & type, int64_t predictedSize, nsIOutputStream * *_retval)
 {
   LOG(("CacheEntry::OpenAlternativeOutputStream [this=%p, type=%s]", this,
        PromiseFlatCString(type).get()));
 
   nsresult rv;
 
   mozilla::MutexAutoLock lock(mLock);
 
   if (!mHasData || mState < READY || mOutputStream || mIsDoomed) {
     LOG(("  entry not in state to write alt-data"));
     return NS_ERROR_NOT_AVAILABLE;
   }
 
+  if (mFile->EntryWouldExceedLimit(0, predictedSize, true)) {
+    LOG(("  entry would exceed size limit"));
+    return NS_ERROR_FILE_TOO_BIG;
+  }
+
   nsCOMPtr<nsIOutputStream> stream;
   rv = mFile->OpenAlternativeOutputStream(nullptr,
                                           PromiseFlatCString(type).get(),
                                           getter_AddRefs(stream));
   NS_ENSURE_SUCCESS(rv, rv);
 
   stream.swap(*_retval);
   return NS_OK;
@@ -1328,35 +1337,16 @@ nsresult CacheEntry::OpenOutputStreamInt
 
   // Prevent opening output stream again.
   mHasData = true;
 
   stream.swap(*_retval);
   return NS_OK;
 }
 
-nsresult CacheEntry::GetPredictedDataSize(int64_t *aPredictedDataSize)
-{
-  *aPredictedDataSize = mPredictedDataSize;
-  return NS_OK;
-}
-nsresult CacheEntry::SetPredictedDataSize(int64_t aPredictedDataSize)
-{
-  mPredictedDataSize = aPredictedDataSize;
-
-  if (!mSkipSizeCheck && CacheObserver::EntryIsTooBig(mPredictedDataSize, mUseDisk)) {
-    LOG(("CacheEntry::SetPredictedDataSize [this=%p] too big, dooming", this));
-    AsyncDoom(nullptr);
-
-    return NS_ERROR_FILE_TOO_BIG;
-  }
-
-  return NS_OK;
-}
-
 nsresult CacheEntry::GetSecurityInfo(nsISupports * *aSecurityInfo)
 {
   {
     mozilla::MutexAutoLock lock(mLock);
     if (mSecurityInfoLoaded) {
       NS_IF_ADDREF(*aSecurityInfo = mSecurityInfo);
       return NS_OK;
     }
--- a/netwerk/cache2/CacheEntry.h
+++ b/netwerk/cache2/CacheEntry.h
@@ -74,33 +74,31 @@ public:
   nsresult GetExpirationTime(uint32_t *aExpirationTime);
   nsresult SetExpirationTime(uint32_t expirationTime);
   nsresult GetOnStartTime(uint64_t *aOnStartTime);
   nsresult GetOnStopTime(uint64_t *aOnStopTime);
   nsresult SetNetworkTimes(uint64_t onStartTime, uint64_t onStopTime);
   nsresult ForceValidFor(uint32_t aSecondsToTheFuture);
   nsresult GetIsForcedValid(bool *aIsForcedValid);
   nsresult OpenInputStream(int64_t offset, nsIInputStream * *_retval);
-  nsresult OpenOutputStream(int64_t offset, nsIOutputStream * *_retval);
-  nsresult GetPredictedDataSize(int64_t *aPredictedDataSize);
-  nsresult SetPredictedDataSize(int64_t aPredictedDataSize);
+  nsresult OpenOutputStream(int64_t offset, int64_t predictedSize, nsIOutputStream * *_retval);
   nsresult GetSecurityInfo(nsISupports * *aSecurityInfo);
   nsresult SetSecurityInfo(nsISupports *aSecurityInfo);
   nsresult GetStorageDataSize(uint32_t *aStorageDataSize);
   nsresult AsyncDoom(nsICacheEntryDoomCallback *listener);
   nsresult GetMetaDataElement(const char * key, char * *_retval);
   nsresult SetMetaDataElement(const char * key, const char * value);
   nsresult VisitMetaData(nsICacheEntryMetaDataVisitor *visitor);
   nsresult MetaDataReady(void);
   nsresult SetValid(void);
   nsresult GetDiskStorageSizeInKB(uint32_t *aDiskStorageSizeInKB);
   nsresult Recreate(bool aMemoryOnly, nsICacheEntry * *_retval);
   nsresult GetDataSize(int64_t *aDataSize);
   nsresult GetAltDataSize(int64_t *aAltDataSize);
-  nsresult OpenAlternativeOutputStream(const nsACString & type, nsIOutputStream * *_retval);
+  nsresult OpenAlternativeOutputStream(const nsACString & type, int64_t predictedSize, nsIOutputStream * *_retval);
   nsresult OpenAlternativeInputStream(const nsACString & type, nsIInputStream * *_retval);
   nsresult GetLoadContextInfo(nsILoadContextInfo * *aLoadContextInfo);
   nsresult Close(void);
   nsresult MarkValid(void);
   nsresult MaybeMarkValid(void);
   nsresult HasWriteAccess(bool aWriteAllowed, bool *_retval);
 
 public:
@@ -414,17 +412,16 @@ private:
     Ops() : mFlags(0) { }
     uint32_t Grab() { uint32_t flags = mFlags; mFlags = 0; return flags; }
     bool Set(uint32_t aFlags) { if (mFlags & aFlags) return false; mFlags |= aFlags; return true; }
   private:
     uint32_t mFlags;
   } mBackgroundOperations;
 
   nsCOMPtr<nsISupports> mSecurityInfo;
-  int64_t mPredictedDataSize;
   mozilla::TimeStamp mLoadStart;
   uint32_t mUseCount;
 
   const uint64_t mCacheEntryId;
 };
 
 
 class CacheEntryHandle final : public nsICacheEntry
@@ -445,33 +442,31 @@ public:
   NS_IMETHOD GetExpirationTime(uint32_t *aExpirationTime) override { return mEntry->GetExpirationTime(aExpirationTime); }
   NS_IMETHOD SetExpirationTime(uint32_t expirationTime) override { return mEntry->SetExpirationTime(expirationTime); }
   NS_IMETHOD GetOnStartTime(uint64_t *aOnStartTime) override { return mEntry->GetOnStartTime(aOnStartTime); }
   NS_IMETHOD GetOnStopTime(uint64_t *aOnStopTime) override { return mEntry->GetOnStopTime(aOnStopTime); }
   NS_IMETHOD SetNetworkTimes(uint64_t onStartTime, uint64_t onStopTime) override { return mEntry->SetNetworkTimes(onStartTime, onStopTime); }
   NS_IMETHOD ForceValidFor(uint32_t aSecondsToTheFuture) override { return mEntry->ForceValidFor(aSecondsToTheFuture); }
   NS_IMETHOD GetIsForcedValid(bool *aIsForcedValid) override { return mEntry->GetIsForcedValid(aIsForcedValid); }
   NS_IMETHOD OpenInputStream(int64_t offset, nsIInputStream * *_retval) override { return mEntry->OpenInputStream(offset, _retval); }
-  NS_IMETHOD OpenOutputStream(int64_t offset, nsIOutputStream * *_retval) override { return mEntry->OpenOutputStream(offset, _retval); }
-  NS_IMETHOD GetPredictedDataSize(int64_t *aPredictedDataSize) override { return mEntry->GetPredictedDataSize(aPredictedDataSize); }
-  NS_IMETHOD SetPredictedDataSize(int64_t aPredictedDataSize) override { return mEntry->SetPredictedDataSize(aPredictedDataSize); }
+  NS_IMETHOD OpenOutputStream(int64_t offset, int64_t predictedSize, nsIOutputStream * *_retval) override { return mEntry->OpenOutputStream(offset, predictedSize, _retval); }
   NS_IMETHOD GetSecurityInfo(nsISupports * *aSecurityInfo) override { return mEntry->GetSecurityInfo(aSecurityInfo); }
   NS_IMETHOD SetSecurityInfo(nsISupports *aSecurityInfo) override { return mEntry->SetSecurityInfo(aSecurityInfo); }
   NS_IMETHOD GetStorageDataSize(uint32_t *aStorageDataSize) override { return mEntry->GetStorageDataSize(aStorageDataSize); }
   NS_IMETHOD AsyncDoom(nsICacheEntryDoomCallback *listener) override { return mEntry->AsyncDoom(listener); }
   NS_IMETHOD GetMetaDataElement(const char * key, char * *_retval) override { return mEntry->GetMetaDataElement(key, _retval); }
   NS_IMETHOD SetMetaDataElement(const char * key, const char * value) override { return mEntry->SetMetaDataElement(key, value); }
   NS_IMETHOD VisitMetaData(nsICacheEntryMetaDataVisitor *visitor) override { return mEntry->VisitMetaData(visitor); }
   NS_IMETHOD MetaDataReady(void) override { return mEntry->MetaDataReady(); }
   NS_IMETHOD SetValid(void) override { return mEntry->SetValid(); }
   NS_IMETHOD GetDiskStorageSizeInKB(uint32_t *aDiskStorageSizeInKB) override { return mEntry->GetDiskStorageSizeInKB(aDiskStorageSizeInKB); }
   NS_IMETHOD Recreate(bool aMemoryOnly, nsICacheEntry * *_retval) override { return mEntry->Recreate(aMemoryOnly, _retval); }
   NS_IMETHOD GetDataSize(int64_t *aDataSize) override { return mEntry->GetDataSize(aDataSize); }
   NS_IMETHOD GetAltDataSize(int64_t *aAltDataSize) override { return mEntry->GetAltDataSize(aAltDataSize); }
-  NS_IMETHOD OpenAlternativeOutputStream(const nsACString & type, nsIOutputStream * *_retval) override { return mEntry->OpenAlternativeOutputStream(type, _retval); }
+  NS_IMETHOD OpenAlternativeOutputStream(const nsACString & type, int64_t predictedSize, nsIOutputStream * *_retval) override { return mEntry->OpenAlternativeOutputStream(type, predictedSize, _retval); }
   NS_IMETHOD OpenAlternativeInputStream(const nsACString & type, nsIInputStream * *_retval) override { return mEntry->OpenAlternativeInputStream(type, _retval); }
   NS_IMETHOD GetLoadContextInfo(nsILoadContextInfo * *aLoadContextInfo) override { return mEntry->GetLoadContextInfo(aLoadContextInfo); }
   NS_IMETHOD Close(void) override { return mEntry->Close(); }
   NS_IMETHOD MarkValid(void) override { return mEntry->MarkValid(); }
   NS_IMETHOD MaybeMarkValid(void) override { return mEntry->MaybeMarkValid(); }
   NS_IMETHOD HasWriteAccess(bool aWriteAllowed, bool *_retval) override { return mEntry->HasWriteAccess(aWriteAllowed, _retval); }
 
   // Specific implementation:
--- a/netwerk/cache2/CacheFile.cpp
+++ b/netwerk/cache2/CacheFile.cpp
@@ -2057,16 +2057,23 @@ CacheFile::Truncate(int64_t aOffset)
       if (!mChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
         return NS_ERROR_UNEXPECTED;
       }
 
       LOG(("CacheFile::Truncate() - New last chunk %p got from preloader.",
            chunk.get()));
     }
 
+    rv = chunk->GetStatus();
+    if (NS_FAILED(rv)) {
+      LOG(("CacheFile::Truncate() - New last chunk is failed [status=0x%08"
+           PRIx32 "]", static_cast<uint32_t>(rv)));
+      return rv;
+    }
+
     rv = chunk->Truncate(bytesInNewLastChunk);
     if (NS_FAILED(rv)) {
       return rv;
     }
 
     // If the chunk is ready set the new hash now. If it's still being loaded
     // CacheChunk::Truncate() made the chunk dirty and the hash will be updated
     // in OnChunkWritten().
@@ -2141,16 +2148,18 @@ CacheFile::RemoveInput(CacheFileInputStr
   return NS_OK;
 }
 
 nsresult
 CacheFile::RemoveOutput(CacheFileOutputStream *aOutput, nsresult aStatus)
 {
   AssertOwnsLock();
 
+  nsresult rv;
+
   LOG(("CacheFile::RemoveOutput() [this=%p, output=%p, status=0x%08" PRIx32 "]", this,
        aOutput, static_cast<uint32_t>(aStatus)));
 
   if (mOutput != aOutput) {
     LOG(("CacheFile::RemoveOutput() - This output was already removed, ignoring"
          " call [this=%p]", this));
     return NS_OK;
   }
@@ -2162,17 +2171,43 @@ CacheFile::RemoveOutput(CacheFileOutputS
 
   if (!mMemoryOnly)
     WriteMetadataIfNeededLocked();
 
   // Make sure the CacheFile status is set to a failure when the output stream
   // is closed with a fatal error.  This way we propagate correctly and w/o any
   // windows the failure state of this entry to end consumers.
   if (NS_SUCCEEDED(mStatus) && NS_FAILED(aStatus) && aStatus != NS_BASE_STREAM_CLOSED) {
-    mStatus = aStatus;
+    if (aOutput->IsAlternativeData()) {
+      MOZ_ASSERT(mAltDataOffset != -1);
+      // If there is no alt-data input stream truncate only alt-data, otherwise
+      // doom the entry.
+      bool altDataInputExists = false;
+      for (uint32_t i = 0; i < mInputs.Length(); ++i) {
+        if (mInputs[i]->IsAlternativeData()) {
+          altDataInputExists = true;
+          break;
+        }
+      }
+      if (altDataInputExists) {
+        SetError(aStatus);
+      } else {
+        rv = Truncate(mAltDataOffset);
+        if (NS_FAILED(rv)) {
+          LOG(("CacheFile::RemoveOutput() - Truncating alt-data failed "
+               "[rv=0x%08" PRIx32 "]", static_cast<uint32_t>(rv)));
+          SetError(aStatus);
+        } else {
+          SetAltMetadata(nullptr);
+          mAltDataOffset = -1;
+        }
+      }
+    } else {
+      SetError(aStatus);
+    }
   }
 
   // Notify close listener as the last action
   aOutput->NotifyCloseListener();
 
   Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_OUTPUT_STREAM_STATUS,
                         StatusToTelemetryEnum(aStatus));
 
@@ -2378,16 +2413,36 @@ CacheFile::IsWriteInProgress()
            mOpeningFile ||
            mOutput ||
            mChunks.Count();
 
   return result;
 }
 
 bool
+CacheFile::EntryWouldExceedLimit(int64_t aOffset, int64_t aSize, bool aIsAltData)
+{
+  if (mSkipSizeCheck || aSize < 0) {
+    return false;
+  }
+
+  int64_t totalSize = aOffset + aSize;
+  if (aIsAltData) {
+    totalSize += (mAltDataOffset == -1) ? mDataSize : mAltDataOffset;
+  }
+
+  if (CacheObserver::EntryIsTooBig(totalSize, !mMemoryOnly)) {
+    return true;
+  }
+
+  return false;
+}
+
+
+bool
 CacheFile::IsDirty()
 {
   return mDataIsDirty || mMetadata->IsDirty();
 }
 
 void
 CacheFile::WriteMetadataIfNeeded()
 {
--- a/netwerk/cache2/CacheFile.h
+++ b/netwerk/cache2/CacheFile.h
@@ -113,16 +113,17 @@ public:
   // i.e. delivered to the consumer.
   nsresult OnFetched();
 
   bool DataSize(int64_t* aSize);
   void Key(nsACString& aKey) { aKey = mKey; }
   bool IsDoomed();
   bool IsPinned() const { return mPinned; }
   bool IsWriteInProgress();
+  bool EntryWouldExceedLimit(int64_t aOffset, int64_t aSize, bool aIsAltData);
 
   // Memory reporting
   size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
   size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
 
 private:
   friend class CacheFileIOManager;
   friend class CacheFileChunk;
--- a/netwerk/cache2/CacheFileChunk.cpp
+++ b/netwerk/cache2/CacheFileChunk.cpp
@@ -775,18 +775,16 @@ CacheFileChunk::IsDirty() const
   AssertOwnsLock();
 
   return mIsDirty;
 }
 
 nsresult
 CacheFileChunk::GetStatus()
 {
-  AssertOwnsLock();
-
   return mStatus;
 }
 
 void
 CacheFileChunk::SetError(nsresult aStatus)
 {
   LOG(("CacheFileChunk::SetError() [this=%p, status=0x%08" PRIx32 "]",
        this, static_cast<uint32_t>(aStatus)));
--- a/netwerk/cache2/CacheFileOutputStream.cpp
+++ b/netwerk/cache2/CacheFileOutputStream.cpp
@@ -94,32 +94,28 @@ CacheFileOutputStream::Write(const char 
   if (mClosed) {
     LOG(("CacheFileOutputStream::Write() - Stream is closed. [this=%p, "
          "status=0x%08" PRIx32"]", this, static_cast<uint32_t>(mStatus)));
 
     return NS_FAILED(mStatus) ? mStatus : NS_BASE_STREAM_CLOSED;
   }
 
   if (!mFile->mSkipSizeCheck && CacheObserver::EntryIsTooBig(mPos + aCount, !mFile->mMemoryOnly)) {
-    LOG(("CacheFileOutputStream::Write() - Entry is too big, failing and "
-         "dooming the entry. [this=%p]", this));
+    LOG(("CacheFileOutputStream::Write() - Entry is too big. [this=%p]", this));
 
-    mFile->DoomLocked(nullptr);
     CloseWithStatusLocked(NS_ERROR_FILE_TOO_BIG);
     return NS_ERROR_FILE_TOO_BIG;
   }
 
   // We use 64-bit offset when accessing the file, unfortunately we use 32-bit
   // metadata offset, so we cannot handle data bigger than 4GB.
   if (mPos + aCount > PR_UINT32_MAX) {
-    LOG(("CacheFileOutputStream::Write() - Entry's size exceeds 4GB while it "
-         "isn't too big according to CacheObserver::EntryIsTooBig(). Failing "
-         "and dooming the entry. [this=%p]", this));
+    LOG(("CacheFileOutputStream::Write() - Entry's size exceeds 4GB. [this=%p]",
+         this));
 
-    mFile->DoomLocked(nullptr);
     CloseWithStatusLocked(NS_ERROR_FILE_TOO_BIG);
     return NS_ERROR_FILE_TOO_BIG;
   }
 
   *_retval = aCount;
 
   while (aCount) {
     EnsureCorrectChunk(false);
@@ -367,16 +363,25 @@ void CacheFileOutputStream::NotifyCloseL
 }
 
 void
 CacheFileOutputStream::ReleaseChunk()
 {
   LOG(("CacheFileOutputStream::ReleaseChunk() [this=%p, idx=%d]",
        this, mChunk->Index()));
 
+  // If the chunk didn't write any data we need to remove hash for this chunk
+  // that was added when the chunk was created in CacheFile::GetChunkLocked.
+  if (mChunk->DataSize() == 0) {
+    // It must be due to a failure, we don't create a new chunk when we don't
+    // have data to write.
+    MOZ_ASSERT(NS_FAILED(mChunk->GetStatus()));
+    mFile->mMetadata->RemoveHash(mChunk->Index());
+  }
+
   mFile->ReleaseOutsideLock(mChunk.forget());
 }
 
 void
 CacheFileOutputStream::EnsureCorrectChunk(bool aReleaseOnly)
 {
   mFile->AssertOwnsLock();
 
--- a/netwerk/cache2/OldWrappers.cpp
+++ b/netwerk/cache2/OldWrappers.cpp
@@ -443,16 +443,17 @@ NS_IMETHODIMP _OldCacheEntryWrapper::Ope
                                                      nsIInputStream * *_retval)
 {
   if (offset > PR_UINT32_MAX)
     return NS_ERROR_INVALID_ARG;
 
   return OpenInputStream(uint32_t(offset), _retval);
 }
 NS_IMETHODIMP _OldCacheEntryWrapper::OpenOutputStream(int64_t offset,
+                                                      int64_t predictedSize,
                                                       nsIOutputStream * *_retval)
 {
   if (offset > PR_UINT32_MAX)
     return NS_ERROR_INVALID_ARG;
 
   return OpenOutputStream(uint32_t(offset), _retval);
 }
 
--- a/netwerk/cache2/OldWrappers.h
+++ b/netwerk/cache2/OldWrappers.h
@@ -40,35 +40,25 @@ public:
     return !mOldDesc ? NS_ERROR_NULL_POINTER :
                        mOldDesc->OpenInputStream(offset, _retval);
   }
   nsresult OpenOutputStream(uint32_t offset, nsIOutputStream * *_retval)
   {
     return !mOldDesc ? NS_ERROR_NULL_POINTER :
                        mOldDesc->OpenOutputStream(offset, _retval);
   }
-  NS_IMETHOD OpenAlternativeOutputStream(const nsACString & type, nsIOutputStream * *_retval) override
+  NS_IMETHOD OpenAlternativeOutputStream(const nsACString & type, int64_t predictedSize, nsIOutputStream * *_retval) override
   {
     return NS_ERROR_NOT_IMPLEMENTED;
   }
   NS_IMETHOD OpenAlternativeInputStream(const nsACString & type, nsIInputStream * *_retval) override
   {
     return NS_ERROR_NOT_IMPLEMENTED;
   }
 
-  NS_IMETHOD GetPredictedDataSize(int64_t *aPredictedDataSize) override
-  {
-    return !mOldDesc ? NS_ERROR_NULL_POINTER :
-                       mOldDesc->GetPredictedDataSize(aPredictedDataSize);
-  }
-  NS_IMETHOD SetPredictedDataSize(int64_t aPredictedDataSize) override
-  {
-    return !mOldDesc ? NS_ERROR_NULL_POINTER :
-                       mOldDesc->SetPredictedDataSize(aPredictedDataSize);
-  }
   NS_IMETHOD GetSecurityInfo(nsISupports * *aSecurityInfo) override
   {
     return !mOldDesc ? NS_ERROR_NULL_POINTER :
                        mOldDesc->GetSecurityInfo(aSecurityInfo);
   }
   NS_IMETHOD SetSecurityInfo(nsISupports *aSecurityInfo) override
   {
     return !mOldDesc ? NS_ERROR_NULL_POINTER :
@@ -166,17 +156,17 @@ public:
   NS_IMETHOD GetIsForcedValid(bool *aIsForcedValid) override;
   NS_IMETHOD ForceValidFor(uint32_t aSecondsToTheFuture) override;
   NS_IMETHOD SetValid() override { return NS_OK; }
   NS_IMETHOD MetaDataReady() override { return NS_OK; }
   NS_IMETHOD Recreate(bool, nsICacheEntry**) override;
   NS_IMETHOD GetDataSize(int64_t *size) override;
   NS_IMETHOD GetAltDataSize(int64_t *size) override;
   NS_IMETHOD OpenInputStream(int64_t offset, nsIInputStream * *_retval) override;
-  NS_IMETHOD OpenOutputStream(int64_t offset, nsIOutputStream * *_retval) override;
+  NS_IMETHOD OpenOutputStream(int64_t offset, int64_t predictedSize, nsIOutputStream * *_retval) override;
   NS_IMETHOD MaybeMarkValid() override;
   NS_IMETHOD HasWriteAccess(bool aWriteOnly, bool *aWriteAccess) override;
   NS_IMETHOD VisitMetaData(nsICacheEntryMetaDataVisitor*) override;
 
   explicit _OldCacheEntryWrapper(nsICacheEntryDescriptor* desc);
   explicit _OldCacheEntryWrapper(nsICacheEntryInfo* info);
 
 private:
--- a/netwerk/cache2/nsICacheEntry.idl
+++ b/netwerk/cache2/nsICacheEntry.idl
@@ -123,28 +123,25 @@ interface nsICacheEntry : nsISupports
    * MAY implement nsISeekableStream.
    *
    * If opening an output stream to existing cached data, the data will be
    * truncated to the specified offset.
    *
    * @param offset
    *        write starting from this offset into the cached data.  an offset
    *        beyond the end of the stream has undefined consequences.
+   * @param predictedSize
+   *        Predicted size of the data that will be written. It's used to decide
+   *        whether the resulting entry would exceed size limit, in which case
+   *        an error is thrown. If the size isn't known in advance, -1 should be
+   *        passed.
    *
    * @return blocking, buffered output stream.
    */
-  nsIOutputStream openOutputStream(in long long offset);
-
-  /**
-    * Stores the Content-Length specified in the HTTP header for this
-    * entry. Checked before we write to the cache entry, to prevent ever
-    * taking up space in the cache for an entry that we know up front
-    * is going to have to be evicted anyway. See bug 588507.
-    */
-  attribute int64_t predictedDataSize;
+  nsIOutputStream openOutputStream(in long long offset, in long long predictedSize);
 
   /**
    * Get/set security info on the cache entry for this descriptor.
    */
   attribute nsISupports securityInfo;
 
   /**
    * Get the size of the cache entry data, as stored. This may differ
@@ -240,24 +237,33 @@ interface nsICacheEntry : nsISupports
                               content or alt data).
   *    - NS_ERROR_NOT_AVAILABLE if alt data does not exist.
   */
   readonly attribute long long altDataSize;
 
   /**
    * Opens and returns an output stream that a consumer may use to save an
    * alternate representation of the data.
+   *
+   * @param type
+   *        type of the alternative data representation
+   * @param predictedSize
+   *        Predicted size of the data that will be written. It's used to decide
+   *        whether the resulting entry would exceed size limit, in which case
+   *        an error is thrown. If the size isn't known in advance, -1 should be
+   *        passed.
+   *
    * @throws
    *    - NS_ERROR_NOT_AVAILABLE if the real data hasn't been written.
    *    - NS_ERROR_IN_PROGRESS when the writing regular content or alt-data to
    *      the cache entry is still in progress.
    *
    * If there is alt-data already saved, it will be overwritten.
    */
-  nsIOutputStream openAlternativeOutputStream(in ACString type);
+  nsIOutputStream openAlternativeOutputStream(in ACString type, in long long predictedSize);
 
   /**
    * Opens and returns an input stream that can be used to read the alternative
    * representation previously saved in the cache.
    * If this call is made while writing alt-data is still in progress, it is
    * still possible to read content from the input stream as it's being written.
    * @throws
    *    - NS_ERROR_NOT_AVAILABLE if the alt-data representation doesn't exist at
--- a/netwerk/ipc/NeckoChild.cpp
+++ b/netwerk/ipc/NeckoChild.cpp
@@ -109,16 +109,17 @@ NeckoChild::DeallocPStunAddrsRequestChil
   p->ReleaseIPDLReference();
 #endif
   return true;
 }
 
 PAltDataOutputStreamChild*
 NeckoChild::AllocPAltDataOutputStreamChild(
         const nsCString& type,
+        const int64_t& predictedSize,
         PHttpChannelChild* channel)
 {
   // We don't allocate here: see HttpChannelChild::OpenAlternativeOutputStream()
   NS_NOTREACHED("AllocPAltDataOutputStreamChild should not be called");
   return nullptr;
 }
 
 bool
--- a/netwerk/ipc/NeckoChild.h
+++ b/netwerk/ipc/NeckoChild.h
@@ -29,17 +29,17 @@ protected:
     AllocPHttpChannelChild(const PBrowserOrId&, const SerializedLoadContext&,
                            const HttpChannelCreationArgs& aOpenArgs) override;
   virtual bool DeallocPHttpChannelChild(PHttpChannelChild*) override;
 
   virtual PStunAddrsRequestChild* AllocPStunAddrsRequestChild() override;
   virtual bool
     DeallocPStunAddrsRequestChild(PStunAddrsRequestChild* aActor) override;
 
-  virtual PAltDataOutputStreamChild* AllocPAltDataOutputStreamChild(const nsCString& type, PHttpChannelChild* channel) override;
+  virtual PAltDataOutputStreamChild* AllocPAltDataOutputStreamChild(const nsCString& type, const int64_t& predictedSize, PHttpChannelChild* channel) override;
   virtual bool DeallocPAltDataOutputStreamChild(PAltDataOutputStreamChild* aActor) override;
 
   virtual PCookieServiceChild* AllocPCookieServiceChild() override;
   virtual bool DeallocPCookieServiceChild(PCookieServiceChild*) override;
   virtual PWyciwygChannelChild* AllocPWyciwygChannelChild() override;
   virtual bool DeallocPWyciwygChannelChild(PWyciwygChannelChild*) override;
   virtual PFTPChannelChild*
     AllocPFTPChannelChild(const PBrowserOrId& aBrowser,
--- a/netwerk/ipc/NeckoParent.cpp
+++ b/netwerk/ipc/NeckoParent.cpp
@@ -350,21 +350,22 @@ NeckoParent::DeallocPStunAddrsRequestPar
   p->Release();
 #endif
   return true;
 }
 
 PAltDataOutputStreamParent*
 NeckoParent::AllocPAltDataOutputStreamParent(
         const nsCString& type,
+        const int64_t& predictedSize,
         PHttpChannelParent* channel)
 {
   HttpChannelParent* chan = static_cast<HttpChannelParent*>(channel);
   nsCOMPtr<nsIOutputStream> stream;
-  nsresult rv = chan->OpenAlternativeOutputStream(type, getter_AddRefs(stream));
+  nsresult rv = chan->OpenAlternativeOutputStream(type, predictedSize, getter_AddRefs(stream));
   AltDataOutputStreamParent* parent = new AltDataOutputStreamParent(stream);
   parent->AddRef();
   // If the return value was not NS_OK, the error code will be sent
   // asynchronously to the child, after receiving the first message.
   parent->SetError(rv);
   return parent;
 }
 
--- a/netwerk/ipc/NeckoParent.h
+++ b/netwerk/ipc/NeckoParent.h
@@ -104,17 +104,17 @@ protected:
       const HttpChannelCreationArgs& aOpenArgs) override;
   virtual bool DeallocPHttpChannelParent(PHttpChannelParent*) override;
 
   virtual PStunAddrsRequestParent* AllocPStunAddrsRequestParent() override;
   virtual bool
     DeallocPStunAddrsRequestParent(PStunAddrsRequestParent* aActor) override;
 
   virtual PAltDataOutputStreamParent* AllocPAltDataOutputStreamParent(
-    const nsCString& type, PHttpChannelParent* channel) override;
+    const nsCString& type, const int64_t& predictedSize, PHttpChannelParent* channel) override;
   virtual bool DeallocPAltDataOutputStreamParent(
     PAltDataOutputStreamParent* aActor) override;
 
   virtual bool DeallocPCookieServiceParent(PCookieServiceParent*) override;
   virtual PWyciwygChannelParent* AllocPWyciwygChannelParent() override;
   virtual bool DeallocPWyciwygChannelParent(PWyciwygChannelParent*) override;
   virtual PFTPChannelParent*
     AllocPFTPChannelParent(const PBrowserOrId& aBrowser,
--- a/netwerk/ipc/PNecko.ipdl
+++ b/netwerk/ipc/PNecko.ipdl
@@ -116,17 +116,17 @@ parent:
   async OnAuthAvailable(uint64_t callbackId, nsString user,
                         nsString password, nsString domain);
   async OnAuthCancelled(uint64_t callbackId, bool userCancel);
 
   async RequestContextLoadBegin(uint64_t rcid);
   async RequestContextAfterDOMContentLoaded(uint64_t rcid);
   async RemoveRequestContext(uint64_t rcid);
 
-  async PAltDataOutputStream(nsCString type, PHttpChannel channel);
+  async PAltDataOutputStream(nsCString type, int64_t predictedSize, PHttpChannel channel);
 
   async PStunAddrsRequest();
 
   /**
    * WebExtension-specific remote resource loading
    */
   async GetExtensionStream(URIParams uri) returns (nsIInputStream stream);
   async GetExtensionFD(URIParams uri) returns (FileDescriptor fd);
--- a/netwerk/protocol/http/HttpBaseChannel.cpp
+++ b/netwerk/protocol/http/HttpBaseChannel.cpp
@@ -4285,17 +4285,17 @@ HttpBaseChannel::GetPerformanceStorage()
 void
 HttpBaseChannel::MaybeReportTimingData()
 {
   // We don't need to report the resource timing entry for a TYPE_DOCUMENT load.
   // But for the case that Server-Timing headers are existed for
   // a document load, we have to create the document entry early
   // with the timed channel. This is the only way to make
   // server timing data availeble in the document entry.
-  if (mLoadInfo->GetExternalContentPolicyType() == nsIContentPolicy::TYPE_DOCUMENT) {
+  if (mLoadInfo && mLoadInfo->GetExternalContentPolicyType() == nsIContentPolicy::TYPE_DOCUMENT) {
     if ((mResponseHead && mResponseHead->HasHeader(nsHttp::Server_Timing)) ||
         (mResponseTrailers && mResponseTrailers->HasHeader(nsHttp::Server_Timing))) {
       mozilla::dom::PerformanceStorage* documentPerformance = GetPerformanceStorage();
       if (documentPerformance) {
         documentPerformance->CreateDocumentEntry(this);
       }
     }
     return;
--- a/netwerk/protocol/http/HttpChannelChild.cpp
+++ b/netwerk/protocol/http/HttpChannelChild.cpp
@@ -3096,22 +3096,22 @@ HttpChannelChild::GetAlternativeDataType
     return NS_ERROR_NOT_AVAILABLE;
   }
 
   aType = mAvailableCachedAltDataType;
   return NS_OK;
 }
 
 NS_IMETHODIMP
-HttpChannelChild::OpenAlternativeOutputStream(const nsACString & aType, nsIOutputStream * *_retval)
+HttpChannelChild::OpenAlternativeOutputStream(const nsACString & aType, int64_t aPredictedSize, nsIOutputStream * *_retval)
 {
   MOZ_ASSERT(NS_IsMainThread(), "Main thread only");
 
   if (mSynthesizedCacheInfo) {
-    return mSynthesizedCacheInfo->OpenAlternativeOutputStream(aType, _retval);
+    return mSynthesizedCacheInfo->OpenAlternativeOutputStream(aType, aPredictedSize, _retval);
   }
 
   if (!mIPCOpen) {
     return NS_ERROR_NOT_AVAILABLE;
   }
   if (static_cast<ContentChild*>(gNeckoChild->Manager())->IsShuttingDown()) {
     return NS_ERROR_NOT_AVAILABLE;
   }
@@ -3121,16 +3121,17 @@ HttpChannelChild::OpenAlternativeOutputS
 
   RefPtr<AltDataOutputStreamChild> stream = new AltDataOutputStreamChild();
   stream->AddIPDLReference();
 
   gNeckoChild->SetEventTargetForActor(stream, neckoTarget);
 
   if (!gNeckoChild->SendPAltDataOutputStreamConstructor(stream,
                                                         nsCString(aType),
+                                                        aPredictedSize,
                                                         this)) {
     return NS_ERROR_FAILURE;
   }
 
   stream.forget(_retval);
   return NS_OK;
 }
 
--- a/netwerk/protocol/http/HttpChannelParent.cpp
+++ b/netwerk/protocol/http/HttpChannelParent.cpp
@@ -2247,24 +2247,24 @@ HttpChannelParent::NotifyDiversionFailed
   // DoSendDeleteSelf will need channel Id to remove the strong reference in
   // BackgroundChannelRegistrar if channel pairing is aborted.
   // Thus we need to keep mChannel until DoSendDeleteSelf is done.
   mParentListener = nullptr;
   mChannel = nullptr;
 }
 
 nsresult
-HttpChannelParent::OpenAlternativeOutputStream(const nsACString & type, nsIOutputStream * *_retval)
+HttpChannelParent::OpenAlternativeOutputStream(const nsACString & type, int64_t predictedSize, nsIOutputStream * *_retval)
 {
   // We need to make sure the child does not call SendDocumentChannelCleanup()
   // before opening the altOutputStream, because that clears mCacheEntry.
   if (!mCacheEntry) {
     return NS_ERROR_NOT_AVAILABLE;
   }
-  nsresult rv = mCacheEntry->OpenAlternativeOutputStream(type, _retval);
+  nsresult rv = mCacheEntry->OpenAlternativeOutputStream(type, predictedSize, _retval);
   if (NS_SUCCEEDED(rv)) {
     mCacheEntry->SetMetaDataElement("alt-data-from-child", "1");
   }
   return rv;
 }
 
 NS_IMETHODIMP
 HttpChannelParent::GetAuthPrompt(uint32_t aPromptReason, const nsIID& iid,
--- a/netwerk/protocol/http/HttpChannelParent.h
+++ b/netwerk/protocol/http/HttpChannelParent.h
@@ -97,16 +97,17 @@ public:
   // Forwarded to nsHttpChannel::SetApplyConversion.
   void SetApplyConversion(bool aApplyConversion) {
     if (mChannel) {
       mChannel->SetApplyConversion(aApplyConversion);
     }
   }
 
   MOZ_MUST_USE nsresult OpenAlternativeOutputStream(const nsACString & type,
+                                                    int64_t predictedSize,
                                                     nsIOutputStream * *_retval);
 
   // Callbacks for each asynchronous tasks required in AsyncOpen
   // procedure, will call InvokeAsyncOpen when all the expected
   // tasks is finished successfully or when any failure happened.
   // @see mAsyncOpenBarrier.
   void TryInvokeAsyncOpen(nsresult aRv);
 
--- a/netwerk/protocol/http/InterceptedHttpChannel.cpp
+++ b/netwerk/protocol/http/InterceptedHttpChannel.cpp
@@ -1313,20 +1313,20 @@ InterceptedHttpChannel::GetAlternativeDa
 {
   if (mSynthesizedCacheInfo) {
     return mSynthesizedCacheInfo->GetAlternativeDataType(aType);
   }
   return NS_ERROR_NOT_AVAILABLE;
 }
 
 NS_IMETHODIMP
-InterceptedHttpChannel::OpenAlternativeOutputStream(const nsACString & type, nsIOutputStream * *_retval)
+InterceptedHttpChannel::OpenAlternativeOutputStream(const nsACString & type, int64_t predictedSize, nsIOutputStream * *_retval)
 {
   if (mSynthesizedCacheInfo) {
-    return mSynthesizedCacheInfo->OpenAlternativeOutputStream(type, _retval);
+    return mSynthesizedCacheInfo->OpenAlternativeOutputStream(type, predictedSize, _retval);
   }
   return NS_ERROR_NOT_AVAILABLE;
 }
 
 NS_IMETHODIMP
 InterceptedHttpChannel::GetCacheKey(uint32_t* key)
 {
   if (mSynthesizedCacheInfo) {
--- a/netwerk/protocol/http/nsHttpChannel.cpp
+++ b/netwerk/protocol/http/nsHttpChannel.cpp
@@ -1568,29 +1568,16 @@ nsHttpChannel::CallOnStartRequest()
                 }
             }
         }
     }
 
     if (mResponseHead && !mResponseHead->HasContentCharset())
         mResponseHead->SetContentCharset(mContentCharsetHint);
 
-    if (mResponseHead && mCacheEntry) {
-        // If we have a cache entry, set its predicted size to TotalEntitySize to
-        // avoid caching an entry that will exceed the max size limit.
-        rv = mCacheEntry->SetPredictedDataSize(
-            mResponseHead->TotalEntitySize());
-        if (NS_ERROR_FILE_TOO_BIG == rv) {
-          // Don't throw the entry away, we will need it later.
-          LOG(("  entry too big"));
-        } else {
-          NS_ENSURE_SUCCESS(rv, rv);
-        }
-    }
-
     LOG(("  calling mListener->OnStartRequest [this=%p, listener=%p]\n", this, mListener.get()));
 
     // About to call OnStartRequest, dismiss the guard object.
     onStartGuard.release();
 
     if (mListener) {
         MOZ_ASSERT(!mOnStartRequestCalled,
                    "We should not call OsStartRequest twice");
@@ -5423,25 +5410,34 @@ nsHttpChannel::InstallCacheListener(int6
 
     LOG(("Trading cache input stream for output stream [channel=%p]", this));
 
     // We must close the input stream first because cache entries do not
     // correctly handle having an output stream and input streams open at
     // the same time.
     mCacheInputStream.CloseAndRelease();
 
+    int64_t predictedSize = mResponseHead->TotalEntitySize();
+    if (predictedSize != -1) {
+        predictedSize -= offset;
+    }
+
     nsCOMPtr<nsIOutputStream> out;
-    rv = mCacheEntry->OpenOutputStream(offset, getter_AddRefs(out));
+    rv = mCacheEntry->OpenOutputStream(offset, predictedSize, getter_AddRefs(out));
     if (rv == NS_ERROR_NOT_AVAILABLE) {
         LOG(("  entry doomed, not writing it [channel=%p]", this));
         // Entry is already doomed.
         // This may happen when expiration time is set to past and the entry
         // has been removed by the background eviction logic.
         return NS_OK;
     }
+    if (rv == NS_ERROR_FILE_TOO_BIG) {
+        LOG(("  entry would exceed max allowed size, not writing it [channel=%p]", this));
+        return NS_OK;
+    }
     if (NS_FAILED(rv)) return rv;
 
     if (mCacheOnlyMetadata) {
         LOG(("Not storing content, cacheOnlyMetadata set"));
         // We must open and then close the output stream of the cache entry.
         // This way we indicate the content has been written (despite with zero
         // length) and the entry is now in the ready state with "having data".
 
@@ -5476,17 +5472,17 @@ nsHttpChannel::InstallOfflineCacheListen
 
     LOG(("Preparing to write data into the offline cache [uri=%s]\n",
          mSpec.get()));
 
     MOZ_ASSERT(mOfflineCacheEntry);
     MOZ_ASSERT(mListener);
 
     nsCOMPtr<nsIOutputStream> out;
-    rv = mOfflineCacheEntry->OpenOutputStream(offset, getter_AddRefs(out));
+    rv = mOfflineCacheEntry->OpenOutputStream(offset, -1, getter_AddRefs(out));
     if (NS_FAILED(rv)) return rv;
 
     nsCOMPtr<nsIStreamListenerTee> tee =
         do_CreateInstance(kStreamListenerTeeCID, &rv);
     if (NS_FAILED(rv)) return rv;
 
     rv = tee->Init(mListener, out, nullptr);
     if (NS_FAILED(rv)) return rv;
@@ -7913,25 +7909,25 @@ nsHttpChannel::GetAlternativeDataType(ns
     if (!mAfterOnStartRequestBegun) {
         return NS_ERROR_NOT_AVAILABLE;
     }
     aType = mAvailableCachedAltDataType;
     return NS_OK;
 }
 
 NS_IMETHODIMP
-nsHttpChannel::OpenAlternativeOutputStream(const nsACString & type, nsIOutputStream * *_retval)
+nsHttpChannel::OpenAlternativeOutputStream(const nsACString & type, int64_t predictedSize, nsIOutputStream * *_retval)
 {
     // OnStopRequest will clear mCacheEntry, but we may use mAltDataCacheEntry
     // if the consumer called PreferAlternativeDataType()
     nsCOMPtr<nsICacheEntry> cacheEntry = mCacheEntry ? mCacheEntry : mAltDataCacheEntry;
     if (!cacheEntry) {
         return NS_ERROR_NOT_AVAILABLE;
     }
-    nsresult rv = cacheEntry->OpenAlternativeOutputStream(type, _retval);
+    nsresult rv = cacheEntry->OpenAlternativeOutputStream(type, predictedSize, _retval);
     if (NS_SUCCEEDED(rv)) {
         // Clear this metadata flag in case it exists.
         // The caller of this method may set it again.
         cacheEntry->SetMetaDataElement("alt-data-from-child", nullptr);
     }
     return rv;
 }
 
--- a/netwerk/protocol/wyciwyg/nsWyciwygChannel.cpp
+++ b/netwerk/protocol/wyciwyg/nsWyciwygChannel.cpp
@@ -447,17 +447,17 @@ nsWyciwygChannel::WriteToCacheEntry(cons
   if (mNeedToWriteCharset) {
     WriteCharsetAndSourceToCache(mCharsetSource, mCharset);
     mNeedToWriteCharset = false;
   }
 
   uint32_t out;
   if (!mCacheOutputStream) {
     // Get the outputstream from the cache entry.
-    rv = mCacheEntry->OpenOutputStream(0, getter_AddRefs(mCacheOutputStream));
+    rv = mCacheEntry->OpenOutputStream(0, -1, getter_AddRefs(mCacheOutputStream));
     if (NS_FAILED(rv)) return rv;
 
     // Write out a Byte Order Mark, so that we'll know if the data is
     // BE or LE when we go to read it.
     char16_t bom = 0xFEFF;
     rv = mCacheOutputStream->Write((char *)&bom, sizeof(bom), &out);
     if (NS_FAILED(rv)) return rv;
   }
--- a/netwerk/test/unit/head_cache2.js
+++ b/netwerk/test/unit/head_cache2.js
@@ -193,34 +193,34 @@ OpenCallback.prototype =
             self.goon(entry);
 
           return;
         }
         executeSoon(function() { // emulate more network latency
           if (self.behavior & DOOMED) {
             LOG_C2(self, "checking doom state");
             try {
-              var os = entry.openOutputStream(0);
+              var os = entry.openOutputStream(0, -1);
               // Unfortunately, in the undetermined state we cannot even check whether the entry
               // is actually doomed or not.
               os.close();
               Assert.ok(!!(self.behavior & MAYBE_NEW));
             } catch (ex) {
               Assert.ok(true);
             }
             if (self.behavior & WAITFORWRITE)
               self.goon(entry);
             return;
           }
 
           var offset = (self.behavior & PARTIAL)
             ? entry.dataSize
             : 0;
           LOG_C2(self, "openOutputStream @ " + offset);
-          var os = entry.openOutputStream(offset);
+          var os = entry.openOutputStream(offset, -1);
           LOG_C2(self, "writing data");
           var wrt = os.write(self.workingData, self.workingData.length);
           Assert.equal(wrt, self.workingData.length);
           os.close();
           if (self.behavior & WAITFORWRITE)
             self.goon(entry);
 
           entry.close();
--- a/netwerk/test/unit/test_alt-data_cross_process.js
+++ b/netwerk/test/unit/test_alt-data_cross_process.js
@@ -94,17 +94,17 @@ function readServerContent(request, buff
 {
   var cc = request.QueryInterface(Ci.nsICacheInfoChannel);
 
   Assert.equal(buffer, responseContent);
   Assert.equal(cc.alternativeDataType, "");
   check_has_alt_data_in_index(false);
 
   executeSoon(() => {
-    var os = cc.openAlternativeOutputStream(altContentType);
+    var os = cc.openAlternativeOutputStream(altContentType, altContent.length);
     os.write(altContent, altContent.length);
     os.close();
 
     executeSoon(flushAndOpenAltChannel);
   });
 }
 
 function flushAndOpenAltChannel()
--- a/netwerk/test/unit/test_alt-data_overwrite.js
+++ b/netwerk/test/unit/test_alt-data_overwrite.js
@@ -74,17 +74,17 @@ function run_test()
 function readServerContent(request, buffer)
 {
   let cc = request.QueryInterface(Ci.nsICacheInfoChannel);
 
   Assert.equal(buffer, responseContent);
   Assert.equal(cc.alternativeDataType, "");
 
   executeSoon(() => {
-    let os = cc.openAlternativeOutputStream(altContentType);
+    let os = cc.openAlternativeOutputStream(altContentType, altContent.length);
     os.write(altContent, altContent.length);
     os.close();
 
     executeSoon(flushAndOpenAltChannel);
   });
 }
 
 function flushAndOpenAltChannel()
@@ -122,17 +122,17 @@ function readServerContent2(request, buf
   Cu.forceShrinkingGC();
   let cc = request.QueryInterface(Ci.nsICacheInfoChannel);
 
   Assert.equal(fromCache || servedNotModified, true);
   Assert.equal(buffer, responseContent);
   Assert.equal(cc.alternativeDataType, "");
 
   executeSoon(() => {
-    let os = cc.openAlternativeOutputStream(altContentType);
+    let os = cc.openAlternativeOutputStream(altContentType, altContent.length);
     os.write(altContent, altContent.length);
     os.close();
 
     executeSoon(flushAndOpenAltChannel2);
   });
 }
 
 function flushAndOpenAltChannel2()
@@ -160,17 +160,17 @@ function readAltContent2(request, buffer
 
   Assert.equal(servedNotModified || fromCache, true);
   Assert.equal(cc.alternativeDataType, altContentType);
   Assert.equal(buffer, altContent);
 
   executeSoon(() => {
     Cu.forceShrinkingGC();
     info("writing other content\n");
-    let os = cc.openAlternativeOutputStream(altContentType2);
+    let os = cc.openAlternativeOutputStream(altContentType2, altContent2.length);
     os.write(altContent2, altContent2.length);
     os.close();
 
     executeSoon(flushAndOpenAltChannel3);
   });
 }
 
 function flushAndOpenAltChannel3()
--- a/netwerk/test/unit/test_alt-data_simple.js
+++ b/netwerk/test/unit/test_alt-data_simple.js
@@ -100,17 +100,17 @@ function readServerContent(request, buff
 {
   var cc = request.QueryInterface(Ci.nsICacheInfoChannel);
 
   Assert.equal(buffer, responseContent);
   Assert.equal(cc.alternativeDataType, "");
   check_has_alt_data_in_index(false);
 
   executeSoon(() => {
-    var os = cc.openAlternativeOutputStream(altContentType);
+    var os = cc.openAlternativeOutputStream(altContentType, altContent.length);
     os.write(altContent, altContent.length);
     os.close();
 
     executeSoon(flushAndOpenAltChannel);
   });
 }
 
 // needs to be rooted
--- a/netwerk/test/unit/test_alt-data_stream.js
+++ b/netwerk/test/unit/test_alt-data_stream.js
@@ -72,17 +72,17 @@ var os;
 function readServerContent(request, buffer)
 {
   var cc = request.QueryInterface(Ci.nsICacheInfoChannel);
 
   Assert.equal(buffer, responseContent);
   Assert.equal(cc.alternativeDataType, "");
 
   executeSoon(() => {
-    os = cc.openAlternativeOutputStream(altContentType);
+    os = cc.openAlternativeOutputStream(altContentType, altContent.length);
     // Write a quarter of the alt data content
     os.write(altContent, firstChunkSize);
 
     executeSoon(openAltChannel);
   });
 }
 
 function openAltChannel()
new file mode 100644
--- /dev/null
+++ b/netwerk/test/unit/test_alt-data_too_big.js
@@ -0,0 +1,100 @@
+/**
+ * Test for handling too big alternative data
+ *
+ *  - first we try to open an output stream for too big alt-data which must fail
+ *    and leave original data intact
+ *
+ *  - then we open the output stream without passing predicted data size which
+ *    succeeds but writing must fail later at the size limit and the original
+ *    data must be kept
+ */
+
+ChromeUtils.import("resource://gre/modules/Services.jsm");
+
+var data = "data    ";
+var altData = "alt-data"
+
+function run_test()
+{
+  do_get_profile();
+
+  // Expand both data to 1MB
+  for (i = 0; i < 17; i++) {
+    data += data;
+    altData += altData;
+  }
+
+  // Set the limit so that the data fits but alt-data doesn't.
+  Services.prefs.setIntPref("browser.cache.disk.max_entry_size", 1800);
+
+  write_data();
+
+  do_test_pending();
+}
+
+function write_data()
+{
+  asyncOpenCacheEntry("http://data/", "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null, function (status, entry) {
+    Assert.equal(status, Cr.NS_OK);
+
+    var os = entry.openOutputStream(0, -1);
+    var written = os.write(data, data.length);
+    Assert.equal(written, data.length);
+    os.close();
+
+    open_big_altdata_output(entry);
+  });
+}
+
+function open_big_altdata_output(entry)
+{
+  try {
+    var os = entry.openAlternativeOutputStream("text/binary", altData.length);
+  } catch (e) {
+    Assert.equal(e.result, Cr.NS_ERROR_FILE_TOO_BIG);
+  }
+  entry.close();
+
+  check_entry(write_big_altdata);
+}
+
+function write_big_altdata()
+{
+  asyncOpenCacheEntry("http://data/", "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null, function (status, entry) {
+    Assert.equal(status, Cr.NS_OK);
+
+    var os = entry.openAlternativeOutputStream("text/binary", -1);
+    try {
+      os.write(altData, altData.length);
+    } catch (e) {
+      Assert.equal(e.result, Cr.NS_ERROR_FILE_TOO_BIG);
+    }
+    os.close();
+    entry.close();
+
+    check_entry(do_test_finished);
+  });
+}
+
+function check_entry(cb)
+{
+  asyncOpenCacheEntry("http://data/", "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null, function (status, entry) {
+    Assert.equal(status, Cr.NS_OK);
+
+    var is = null;
+    try {
+      is = entry.openAlternativeInputStream("text/binary");
+    } catch (e) {
+      Assert.equal(e.result, Cr.NS_ERROR_NOT_AVAILABLE);
+    }
+
+    is = entry.openInputStream(0);
+    pumpReadStream(is, function(read) {
+      Assert.equal(read.length, data.length);
+      is.close();
+      entry.close();
+
+      executeSoon(cb);
+    });
+  });
+}
--- a/netwerk/test/unit/test_bug248970_cache.js
+++ b/netwerk/test/unit/test_bug248970_cache.js
@@ -50,17 +50,17 @@ function store_entries(cb)
                       Services.loadContextInfo.custom(false,
                         {privateBrowsingId : entries[store_idx][3] ? 0 : 1}),
                       store_data,
                       appCache);
 }
 
 var store_data = function(status, entry) {
   Assert.equal(status, Cr.NS_OK);
-  var os = entry.openOutputStream(0);
+  var os = entry.openOutputStream(0, entries[store_idx][1].length);
 
   var written = os.write(entries[store_idx][1], entries[store_idx][1].length);
   if (written != entries[store_idx][1].length) {
     do_throw("os.write has not written all data!\n" +
              "  Expected: " + entries[store_idx][1].length  + "\n" +
              "  Actual: " + written + "\n");
   }
   os.close();
--- a/netwerk/test/unit/test_bug482601.js
+++ b/netwerk/test/unit/test_bug482601.js
@@ -84,17 +84,17 @@ function makeChan(url) {
                 .QueryInterface(Ci.nsIHttpChannel);
 }
 
 function storeCache(aCacheEntry, aResponseHeads, aContent) {
   aCacheEntry.setMetaDataElement("request-method", "GET");
   aCacheEntry.setMetaDataElement("response-head", aResponseHeads);
   aCacheEntry.setMetaDataElement("charset", "ISO-8859-1");
 
-  var oStream = aCacheEntry.openOutputStream(0);
+  var oStream = aCacheEntry.openOutputStream(0, aContent.length);
   var written = oStream.write(aContent, aContent.length);
   if (written != aContent.length) {
     do_throw("oStream.write has not written all data!\n" +
              "  Expected: " + written  + "\n" +
              "  Actual: " + aContent.length + "\n");
   }
   oStream.close();
   aCacheEntry.close();
--- a/netwerk/test/unit/test_bug654926.js
+++ b/netwerk/test/unit/test_bug654926.js
@@ -24,18 +24,18 @@ function write_and_check(str, data, len)
              "  Expected: " + len  + "\n" +
              "  Actual: " + written + "\n");
   }
 }
 
 function write_datafile(status, entry)
 {
   Assert.equal(status, Cr.NS_OK);
-  var os = entry.openOutputStream(0);
   var data = gen_1MiB();
+  var os = entry.openOutputStream(0, data.length);
 
   // write 2MiB
   var i;
   for (i=0 ; i<2 ; i++)
     write_and_check(os, data, data.length);
 
   os.close();
   entry.close();
@@ -47,17 +47,17 @@ function write_datafile(status, entry)
   asyncOpenCacheEntry("http://data/",
                       "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null,
                       append_datafile);
 }
 
 function append_datafile(status, entry)
 {
   Assert.equal(status, Cr.NS_OK);
-  var os = entry.openOutputStream(entry.dataSize);
+  var os = entry.openOutputStream(entry.dataSize, -1);
   var data = gen_1MiB();
 
   // append 1MiB
   try {
     write_and_check(os, data, data.length);
     do_throw();
   }
   catch (ex) { }
--- a/netwerk/test/unit/test_bug654926_doom_and_read.js
+++ b/netwerk/test/unit/test_bug654926_doom_and_read.js
@@ -22,36 +22,36 @@ function make_input_stream_scriptable(in
                 createInstance(Ci.nsIScriptableInputStream);
   wrapper.init(input);
   return wrapper;
 }
 
 function write_datafile(status, entry)
 {
   Assert.equal(status, Cr.NS_OK);
-  var os = entry.openOutputStream(0);
   var data = gen_1MiB();
+  var os = entry.openOutputStream(0, data.length);
 
   write_and_check(os, data, data.length);
 
   os.close();
   entry.close();
 
   // open, doom, append, read
   asyncOpenCacheEntry("http://data/",
                       "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null,
                       test_read_after_doom);
 
 }
 
 function test_read_after_doom(status, entry)
 {
   Assert.equal(status, Cr.NS_OK);
-  var os = entry.openOutputStream(entry.dataSize);
   var data = gen_1MiB();
+  var os = entry.openOutputStream(entry.dataSize, data.length);
 
   entry.asyncDoom(null);
   write_and_check(os, data, data.length);
 
   os.close();
 
   var is = entry.openInputStream(0);
   pumpReadStream(is, function(read) {
--- a/netwerk/test/unit/test_bug654926_test_seek.js
+++ b/netwerk/test/unit/test_bug654926_test_seek.js
@@ -15,34 +15,34 @@ function write_and_check(str, data, len)
              "  Expected: " + len  + "\n" +
              "  Actual: " + written + "\n");
   }
 }
 
 function write_datafile(status, entry)
 {
   Assert.equal(status, Cr.NS_OK);
-  var os = entry.openOutputStream(0);
   var data = gen_1MiB();
+  var os = entry.openOutputStream(0, data.length);
 
   write_and_check(os, data, data.length);
 
   os.close();
   entry.close();
 
   // try to open the entry for appending
   asyncOpenCacheEntry("http://data/",
                       "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null,
                       open_for_readwrite);
 }
 
 function open_for_readwrite(status, entry)
 {
   Assert.equal(status, Cr.NS_OK);
-  var os = entry.openOutputStream(entry.dataSize);
+  var os = entry.openOutputStream(entry.dataSize, -1);
 
   // Opening the entry for appending data calls nsDiskCacheStreamIO::Seek()
   // which initializes mFD. If no data is written then mBufDirty is false and
   // mFD won't be closed in nsDiskCacheStreamIO::Flush().
 
   os.close();
   entry.close();
 
--- a/netwerk/test/unit/test_cache-entry-id.js
+++ b/netwerk/test/unit/test_cache-entry-id.js
@@ -76,17 +76,17 @@ function check(response, content, prefer
   Assert.ok(!cacheEntryIdChecker || cacheEntryIdChecker(response.cacheEntryId));
 
   return response;
 }
 
 function writeAltData(request)
 {
   var cc = request.QueryInterface(Ci.nsICacheInfoChannel);
-  var os = cc.openAlternativeOutputStream(altContentType);
+  var os = cc.openAlternativeOutputStream(altContentType, altContent.length);
   os.write(altContent, altContent.length);
   os.close();
   gc(); // We need to do a GC pass to ensure the cache entry has been freed.
 
   return new Promise(resolve => {
     if (isParentProcess()) {
       Services.cache2.QueryInterface(Ci.nsICacheTesting)
               .flush(resolve);
--- a/netwerk/test/unit/test_cache2-25-chunk-memory-limit.js
+++ b/netwerk/test/unit/test_cache2-25-chunk-memory-limit.js
@@ -20,24 +20,24 @@ function run_test()
                      getService(Ci.nsIPrefBranch);
 
   // set max chunks memory so that only one full chunk fits within the limit
   prefBranch.setIntPref("browser.cache.disk.max_chunks_memory_usage", 300);
 
   asyncOpenCacheEntry("http://a/", "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null,
     function(status, entry) {
       Assert.equal(status, Cr.NS_OK);
-      oStr = entry.openOutputStream(0);
       var data = gen_200k();
+      oStr = entry.openOutputStream(0, data.length);
       Assert.equal(data.length, oStr.write(data, data.length));
 
       asyncOpenCacheEntry("http://b/", "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null,
         function(status, entry) {
           Assert.equal(status, Cr.NS_OK);
-          var oStr2 = entry.openOutputStream(0);
+          var oStr2 = entry.openOutputStream(0, data.length);
           do_check_throws_nsIException(() => oStr2.write(data, data.length), 'NS_ERROR_OUT_OF_MEMORY');
           finish_cache2_test();
         }
       );
     }
   );
 
   do_test_pending();
--- a/netwerk/test/unit/test_compressappend.js
+++ b/netwerk/test/unit/test_compressappend.js
@@ -30,28 +30,28 @@ TestAppend.prototype = {
                         "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null,
                         this.writeData.bind(this));
   },
 
   writeData: function(status, entry) {
     Assert.equal(status, Cr.NS_OK);
     if (this._compress)
       entry.setMetaDataElement("uncompressed-len", "0");
-    var os = entry.openOutputStream(0);
+    var os = entry.openOutputStream(0, 5);
     write_and_check(os, "12345", 5);
     os.close();
     entry.close();
     asyncOpenCacheEntry("http://data/",
                         "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null,
                         this.appendData.bind(this));
   },
 
   appendData: function(status, entry) {
     Assert.equal(status, Cr.NS_OK);
-    var os = entry.openOutputStream(entry.storageDataSize);
+    var os = entry.openOutputStream(entry.storageDataSize, 5);
     write_and_check(os, "abcde", 5);
     os.close();
     entry.close();
 
     asyncOpenCacheEntry("http://data/",
                         "disk", Ci.nsICacheStorage.OPEN_READONLY, null,
                         this.checkData.bind(this));
   },
--- a/netwerk/test/unit/test_doomentry.js
+++ b/netwerk/test/unit/test_doomentry.js
@@ -25,17 +25,17 @@ function write_and_check(str, data, len)
              "  Expected: " + len  + "\n" +
              "  Actual: " + written + "\n");
   }
 }
 
 function write_entry()
 {
   asyncOpenCacheEntry("http://testentry/", "disk", Ci.nsICacheStorage.OPEN_TRUNCATE, null, function(status, entry) {
-    write_entry_cont(entry, entry.openOutputStream(0));
+    write_entry_cont(entry, entry.openOutputStream(0, -1));
   });
 }
 
 function write_entry_cont(entry, ostream)
 {
   var data = "testdata";
   write_and_check(ostream, data, data.length);
   ostream.close();
@@ -48,17 +48,17 @@ function check_doom1(status)
   Assert.equal(status, Cr.NS_OK);
   doom("http://nonexistententry/", check_doom2);
 }
 
 function check_doom2(status)
 {
   Assert.equal(status, Cr.NS_ERROR_NOT_AVAILABLE);
   asyncOpenCacheEntry("http://testentry/", "disk", Ci.nsICacheStorage.OPEN_TRUNCATE, null, function(status, entry) {
-    write_entry2(entry, entry.openOutputStream(0));
+    write_entry2(entry, entry.openOutputStream(0, -1));
   });
 }
 
 var gEntry;
 var gOstream;
 function write_entry2(entry, ostream)
 {
   // write some data and doom the entry while it is active
--- a/netwerk/test/unit/test_http2.js
+++ b/netwerk/test/unit/test_http2.js
@@ -1031,17 +1031,17 @@ Http2DiskCachePushListener.onStopRequest
     chan.loadGroup = loadGroup;
     chan.asyncOpen2(listener);
 };
 
 function continue_test_http2_disk_cache_push(status, entry, appCache) {
   // TODO - store stuff in cache entry, then open an h2 channel that will push
   // this, once that completes, open a channel for the cache entry we made and
   // ensure it came from disk cache, not the push cache.
-  var outputStream = entry.openOutputStream(0);
+  var outputStream = entry.openOutputStream(0, -1);
   outputStream.write(DISK_CACHE_DATA, DISK_CACHE_DATA.length);
 
   // Now we open our URL that will push data for the URL above
   var chan = makeChan("https://localhost:" + serverPort + "/pushindisk");
   var listener = new Http2DiskCachePushListener();
   chan.loadGroup = loadGroup;
   chan.asyncOpen2(listener);
 }
--- a/netwerk/test/unit/xpcshell.ini
+++ b/netwerk/test/unit/xpcshell.ini
@@ -388,16 +388,17 @@ skip-if = os == "android"
 [test_dns_disable_ipv6.js]
 [test_bug1195415.js]
 [test_cookie_blacklist.js]
 [test_getHost.js]
 [test_bug412457.js]
 [test_bug464591.js]
 [test_alt-data_simple.js]
 [test_alt-data_stream.js]
+[test_alt-data_too_big.js]
 [test_alt-data_overwrite.js]
 [test_cache-control_request.js]
 [test_bug1279246.js]
 [test_throttlequeue.js]
 [test_throttlechannel.js]
 [test_throttling.js]
 [test_separate_connections.js]
 [test_trackingProtection_annotateChannels.js]
--- a/netwerk/test/unit_ipc/test_alt-data_cross_process_wrap.js
+++ b/netwerk/test/unit_ipc/test_alt-data_cross_process_wrap.js
@@ -46,17 +46,17 @@ function readTextData(request, buffer)
   // Since we are in a different process from what that generated the alt-data,
   // we should receive the original data, not processed content.
   Assert.equal(cc.alternativeDataType, "");
   Assert.equal(buffer, "response body");
 
   // Now let's generate some alt-data in the parent, and make sure we can get it
   var altContent = "altContentParentGenerated";
   executeSoon(() => {
-    var os = cc.openAlternativeOutputStream("text/parent-binary");
+    var os = cc.openAlternativeOutputStream("text/parent-binary", altContent.length);
     os.write(altContent, altContent.length);
     os.close();
 
     executeSoon(() => {
       Services.cache2.QueryInterface(Ci.nsICacheTesting).flush(cacheFlushObserver2);
     });
   });
 }
--- a/servo/ports/geckolib/Cargo.toml
+++ b/servo/ports/geckolib/Cargo.toml
@@ -12,17 +12,16 @@ crate-type = ["staticlib", "rlib"]
 [features]
 bindgen = ["style/use_bindgen"]
 gecko_debug = ["style/gecko_debug"]
 
 [dependencies]
 atomic_refcell = "0.1"
 cssparser = "0.23.0"
 cstr = "0.1.2"
-env_logger = {version = "0.5", default-features = false} # disable `regex` to reduce code size
 libc = "0.2"
 log = {version = "0.4", features = ["release_max_level_info"]}
 malloc_size_of = {path = "../../components/malloc_size_of"}
 nsstring = {path = "../../support/gecko/nsstring"}
 parking_lot = "0.5"
 selectors = {path = "../../components/selectors"}
 servo_arc = {path = "../../components/servo_arc"}
 smallvec = "0.6"
--- a/servo/ports/geckolib/glue.rs
+++ b/servo/ports/geckolib/glue.rs
@@ -1,23 +1,21 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 use cssparser::{ParseErrorKind, Parser, ParserInput, SourceLocation};
 use cssparser::ToCss as ParserToCss;
-use env_logger::Builder;
 use malloc_size_of::MallocSizeOfOps;
 use nsstring::nsCString;
 use selectors::{NthIndexCache, SelectorList};
 use selectors::matching::{MatchingContext, MatchingMode, matches_selector};
 use servo_arc::{Arc, ArcBorrow, RawOffsetArc};
 use smallvec::SmallVec;
 use std::cell::RefCell;
-use std::env;
 use std::fmt::Write;
 use std::iter;
 use std::mem;
 use std::os::raw::c_void;
 use std::ptr;
 use style::applicable_declarations::ApplicableDeclarationBlock;
 use style::author_styles::AuthorStyles;
 use style::context::{CascadeInputs, QuirksMode, SharedStyleContext, StyleContext};
@@ -179,24 +177,16 @@ use super::stylesheet_loader::{AsyncStyl
 // A dummy url data for where we don't pass url data in.
 // We need to get rid of this sooner than later.
 static mut DUMMY_URL_DATA: *mut URLExtraData = 0 as *mut URLExtraData;
 
 #[no_mangle]
 pub extern "C" fn Servo_Initialize(dummy_url_data: *mut URLExtraData) {
     use style::gecko_bindings::sugar::origin_flags;
 
-    // Initialize logging.
-    let mut builder = Builder::new();
-    let default_level = if cfg!(debug_assertions) { "warn" } else { "error" };
-    match env::var("RUST_LOG") {
-      Ok(v) => builder.parse(&v).init(),
-      _ => builder.parse(default_level).init(),
-    };
-
     // Pretend that we're a Servo Layout thread, to make some assertions happy.
     thread_state::initialize(thread_state::ThreadState::LAYOUT);
 
     // Perform some debug-only runtime assertions.
     restyle_hints::assert_restyle_hints_match();
     origin_flags::assert_flags_match();
     parser::assert_parsing_mode_match();
     traversal_flags::assert_traversal_flags_match();
--- a/servo/ports/geckolib/lib.rs
+++ b/servo/ports/geckolib/lib.rs
@@ -1,16 +1,15 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 
 extern crate cssparser;
 #[macro_use] extern crate cstr;
-extern crate env_logger;
 extern crate libc;
 #[macro_use] extern crate log;
 extern crate malloc_size_of;
 extern crate nsstring;
 extern crate selectors;
 extern crate servo_arc;
 extern crate smallvec;
 #[macro_use] extern crate style;
--- a/servo/ports/geckolib/tests/lib.rs
+++ b/servo/ports/geckolib/tests/lib.rs
@@ -9,17 +9,16 @@
 // On Linux and OS X linking succeeds anyway.
 // Presumably these symbol declarations don’t need to be resolved
 // as they’re not used in any code called from this crate.
 #![cfg(any(linking_with_gecko, not(windows)))]
 
 extern crate atomic_refcell;
 extern crate cssparser;
 #[macro_use] extern crate cstr;
-extern crate env_logger;
 extern crate geckoservo;
 #[macro_use] extern crate log;
 extern crate malloc_size_of;
 extern crate selectors;
 extern crate smallvec;
 #[macro_use] extern crate size_of_test;
 #[macro_use] extern crate style;
 extern crate style_traits;
--- a/testing/config/tooltool-manifests/linux32/hostutils.manifest
+++ b/testing/config/tooltool-manifests/linux32/hostutils.manifest
@@ -1,10 +1,10 @@
 [
   {
-    "size": 76254067,
+    "size": 75344735,
     "visibility": "public",
-    "digest": "4371be5328a0bc01ff98aeb753e8d70677712fabd748aa871402152bf8416e703fbaec9bbe1b6e7cb21d30ff5e7071441b5783264df01de77f83a86e95c25348",
+    "digest": "cfbb0445c82b951e84c2999f509fd2574ab8ae66e0ff18025b97bc73c1858fe1d0b7695a862f9c5e065db1434b486cd51615df9ba2560f0b68dba967bc6db557",
     "algorithm": "sha512",
-    "filename": "host-utils-60.0a2.en-US.linux-i686.tar.gz",
+    "filename": "host-utils-61.0a1.en-US.linux-i686.tar.gz",
     "unpack": true
   }
 ]
--- a/testing/config/tooltool-manifests/linux64/hostutils.manifest
+++ b/testing/config/tooltool-manifests/linux64/hostutils.manifest
@@ -1,10 +1,10 @@
 [
   {
-    "size": 75035382,
+    "size": 74035066,
     "visibility": "public",
-    "digest": "3e16ca4a0dea9522a8e917b49ee700a162e83297a5c16485dd418c31b1b8bb977b2ca1ff16ccd05a1b0cf0d83d72bfbc92e12ba9f8e60c368cf1051639546472",
+    "digest": "a8a5e59cbd2c2a8102a5cb195254b1afeef067e17c5e77b30ea65e1a97508101cb17ee02f58b4126512af55f4eed8ea13beb29283538ca9fefc605398ed2d96a",
     "algorithm": "sha512",
-    "filename": "host-utils-60.0a3.en-US.linux-x86_64.tar.gz",
+    "filename": "host-utils-61.0a1.en-US.linux-x86_64.tar.gz",
     "unpack": true
   }
 ]
--- a/testing/web-platform/meta/mixed-content/link-prefetch-tag/no-opt-in/same-host-https/top-level/keep-scheme-redirect/allowed/allowed.https.html.ini
+++ b/testing/web-platform/meta/mixed-content/link-prefetch-tag/no-opt-in/same-host-https/top-level/keep-scheme-redirect/allowed/allowed.https.html.ini
@@ -1,6 +1,7 @@
 [allowed.https.html]
   disabled:
     if os == "win": https://bugzilla.mozilla.org/show_bug.cgi?id=1183627
+    if (os == "linux") and webrender and not debug: https://bugzilla.mozilla.org/show_bug.cgi?id=1451142
   [opt_in_method: no-opt-in\n                                 origin: same-host-https\n                                 source_scheme: https\n                                 context_nesting: top-level\n                                 redirection: keep-scheme-redirect\n                                 subresource: link-prefetch-tag\n                                 expectation: allowed]
     bug: the test case uses "no-cache" HTTP header. send an error until we have conclusion at https://github.com/w3c/resource-hints/issues/62
 
--- a/toolkit/components/moz.build
+++ b/toolkit/components/moz.build
@@ -104,13 +104,10 @@ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'wind
     DIRS += ['gfx']
 
 if CONFIG['MOZ_WIDGET_TOOLKIT'] != 'android':
     EXTRA_COMPONENTS += [
         'nsDefaultCLH.js',
         'nsDefaultCLH.manifest',
     ]
 
-if CONFIG['NIGHTLY_BUILD'] and CONFIG['MOZ_BUILD_APP'] == 'browser':
-    DIRS += ['payments']
-
 if CONFIG['MOZ_BUILD_APP'] == 'browser':
     DIRS += ['normandy']
--- a/toolkit/components/prompts/src/nsPrompter.js
+++ b/toolkit/components/prompts/src/nsPrompter.js
@@ -435,18 +435,23 @@ function openRemotePrompt(domWin, args, 
     let messageManager = docShell.QueryInterface(Ci.nsIInterfaceRequestor)
                                  .getInterface(Ci.nsITabChild)
                                  .messageManager;
 
     let inPermitUnload = docShell.contentViewer && docShell.contentViewer.inPermitUnload;
     let eventDetail = Cu.cloneInto({tabPrompt, inPermitUnload}, domWin);
     PromptUtils.fireDialogEvent(domWin, "DOMWillOpenModalDialog", null, eventDetail);
 
-    let winUtils = domWin.QueryInterface(Ci.nsIInterfaceRequestor)
-                         .getInterface(Ci.nsIDOMWindowUtils);
+    // If domWin is reloaded while we're showing a remote modal
+    // dialog, it is possible to detach domWin from its tree, and make
+    // it impossible to reach its scriptable top,
+    // a.k.a. window.top. To prevent this, make sure to enter/exit
+    // modal state beginning from top.
+    let winUtils = domWin.top.QueryInterface(Ci.nsIInterfaceRequestor)
+                             .getInterface(Ci.nsIDOMWindowUtils);
     winUtils.enterModalState();
     let closed = false;
 
     let frameMM = docShell.getInterface(Ci.nsIContentFrameMessageManager);
 
     // It should be hard or impossible to cause a window to create multiple
     // prompts, but just in case, give our prompt an ID.
     let id = "id" + Cc["@mozilla.org/uuid-generator;1"]
--- a/toolkit/components/telemetry/Histograms.json
+++ b/toolkit/components/telemetry/Histograms.json
@@ -5727,19 +5727,20 @@
     "expires_in_version": "50",
     "kind": "exponential",
     "high": 1000,
     "n_buckets": 30,
     "description": "Firefox: Time taken to kick off image compression of the canvas that will be used during swiping through history (ms)."
   },
   "FX_TAB_CLOSE_TIME_ANIM_MS": {
     "record_in_processes": ["main", "content"],
-    "alert_emails": ["mconley@mozilla.com", "hkirschner@mozilla.com"],
+    "alert_emails": ["mconley@mozilla.com", "hkirschner@mozilla.com", "sphilp@mozilla.com"],
     "bug_numbers": [1340842],
     "expires_in_version": "65",
+    "releaseChannelCollection": "opt-out",
     "kind": "exponential",
     "high": 10000,
     "n_buckets": 50,
     "description": "Firefox: Time taken from the point of closing a tab (with animation), to the browser element being removed from the DOM. (ms)."
   },
   "FX_TAB_CLOSE_TIME_NO_ANIM_MS": {
     "record_in_processes": ["main", "content"],
     "alert_emails": ["mconley@mozilla.com", "hkirschner@mozilla.com"],
@@ -5887,16 +5888,18 @@
     "record_in_processes": ["main", "content"],
     "expires_in_version": "46",
     "kind": "count",
     "description": "The number of times that a session enters browser fullscreen (f11-fullscreen)"
   },
   "FX_NEW_WINDOW_MS": {
     "record_in_processes": ["main", "content"],
     "expires_in_version": "default",
+    "alert_emails": ["mconley@mozilla.com", "hkirschner@mozilla.com", "sphilp@mozilla.com"],
+    "releaseChannelCollection": "opt-out",
     "kind": "exponential",
     "high": 10000,
     "n_buckets": 20,
     "description": "Firefox: Time taken to open a new browser window (ms)"
   },
   "FX_PAGE_LOAD_MS": {
     "record_in_processes": ["main", "content"],
     "expires_in_version": "default",
--- a/toolkit/components/telemetry/docs/collection/histograms.rst
+++ b/toolkit/components/telemetry/docs/collection/histograms.rst
@@ -102,27 +102,29 @@ Declaring a Histogram
 
 Histograms should be declared in the `Histograms.json <https://dxr.mozilla.org/mozilla-central/source/toolkit/components/telemetry/Histograms.json>`_ file. These declarations are checked for correctness at `compile time <https://dxr.mozilla.org/mozilla-central/source/toolkit/components/telemetry/gen_histogram_data.py>`_ and used to generate C++ code.
 
 The following is a sample histogram declaration from ``Histograms.json`` for a histogram named ``MEMORY_RESIDENT`` which tracks the amount of resident memory used by a process:
 
 
 .. code-block:: json
 
+  {
     "MEMORY_RESIDENT": {
       "record_in_processes": ["main", "content"],
       "alert_emails": ["team@mozilla.xyz"],
       "expires_in_version": "never",
       "kind": "exponential",
       "low": 32768,
       "high": 1048576,
       "n_buckets": 50,
       "bug_numbers": [12345],
       "description": "Resident memory size (KB)"
-    },
+    }
+  }
 
 Histograms which track timings in milliseconds or microseconds should suffix their names with ``"_MS"`` and ``"_US"`` respectively. Flag-type histograms should have the suffix ``"_FLAG"`` in their name.
 
 The possible fields in a histogram declaration are listed below.
 
 ``record_in_processes``
 -----------------------
 Required. This field is a list of processes this histogram can be recorded in. Currently-supported values are:
--- a/toolkit/components/telemetry/docs/collection/hybrid-content.rst
+++ b/toolkit/components/telemetry/docs/collection/hybrid-content.rst
@@ -24,17 +24,17 @@ 3. registering the probes after the libr
 4. using the API to send Telemetry.
 
 Granting the privileges
 -----------------------
 For security/privacy reasons `Mozilla.ContentTelemetry` will only work on a list of allowed secure origins. The list of allowed origins can be found in `browser/app/permissions <https://dxr.mozilla.org/mozilla-central/source/browser/app/permissions>`_ . A host needs to be given the ``hc_telemetry`` permission in order to be whitelisted.
 
 Example:
 
-.. code-block:: csv
+::
 
   origin  hc_telemetry  1 https://discovery.addons.mozilla.org
 
 Adding an entry to the ``permissions`` file requires riding the trains. If "go-faster" content requires
 granting permissions to a Mozilla page, it can do so by using the `permission manager <https://developer.mozilla.org/en-US/docs/Mozilla/Tech/XPCOM/Reference/Interface/nsIPermissionManager>`_
 
 .. code-block:: js
 
--- a/toolkit/components/telemetry/docs/data/health-ping.rst
+++ b/toolkit/components/telemetry/docs/data/health-ping.rst
@@ -76,17 +76,17 @@ This field lists the number of failed pi
 
 The recorded failure types are:
 
 * "eOK" - No error.
 * "eRequest" - There was some error in the request before we started to service it.
 * "eUnreachable" - The remote server was unreachable.
 * "eChannelOpen" - The connection failed when we tried to open the channel.
 * "eRedirect" - The connection failed when being redirected.
-* "abort" - What XMLHttpRequest means by "abort" (see `MDN <https://developer.mozilla.org/en-US/docs/Web/Events/abort>`_)
-* "timeout" - What XMLHttpRequest means by "timeout" (see `MDN <https://developer.mozilla.org/en-US/docs/Web/Events/timeout>`_)
+* "abort" - What XMLHttpRequest means by "abort" (see `MDN <https://developer.mozilla.org/en-US/docs/Web/Events/abort>`__)
+* "timeout" - What XMLHttpRequest means by "timeout" (see `MDN <https://developer.mozilla.org/en-US/docs/Web/Events/timeout>`__)
 
 This field is optional.
 
 .. note::
 
     Although both ``pingDiscardedForSize`` and ``sendFailure`` fields are optional, the health ping will only
     be submitted if one of this field not empty.
--- a/toolkit/components/telemetry/docs/fhr/dataformat.rst
+++ b/toolkit/components/telemetry/docs/fhr/dataformat.rst
@@ -1982,16 +1982,17 @@ numSuccessfulFills
 
 numTotalLoginsEncountered
     Number of times a login form was encountered by the user in the session.
 
 Example
 ^^^^^^^
 
 ::
+
     "org.mozilla.passwordmgr.passwordmgr": {
       "_v": 2,
       "numSavedPasswords": 32,
       "enabled": 1,
       "numNewSavedPasswords": 5,
       "numSuccessfulFills": 11,
       "numTotalLoginsEncountered": 23,
     }
--- a/toolkit/components/telemetry/docs/start/adding-a-new-probe.rst
+++ b/toolkit/components/telemetry/docs/start/adding-a-new-probe.rst
@@ -1,33 +1,33 @@
 ============================
 Adding a new Telemetry probe
 ============================
 
 In Firefox, the Telemetry system collects various measures of Firefox performance, hardware, usage and customizations and submit it to Mozilla. This article provides an overview of what is needed to add any new Telemetry data collection.
 
 .. important::
 
-    Every new data collection in Firefox needs a `data collection review <https://wiki.mozilla.org/Firefox/Data_Collection#Requesting_Approval>`_ from a data collection peer. Just set the feedback? flag for one of the data peers. They try to reply within a business day.
+    Every new data collection in Firefox needs a `data collection review <https://wiki.mozilla.org/Firefox/Data_Collection#Requesting_Approval>`__ from a data collection peer. Just set the feedback? flag for one of the data peers. They try to reply within a business day.
 
 What is your goal?
 ==================
 
 We have various :doc:`data collection tools <../collection/index>` available, each serving different needs. Before diving right into technical details, it is best to take a step back and consider what you need to achieve.
 
 Your goal could be to answer product questions like “how many people use feature X?” or “what is the error rate of service Y?”.
 You could also be focused more on answering engineering questions, say “which web features are most used?” or “how is the performance of engine Z?”.
 
 From there, questions you should ask are:
 
 - What is the minimum data that can answer your questions?
 - How many people do you need this data from?
 - Is data from the pre-release channels sufficient?
 
-This also informs the `data collection review <https://wiki.mozilla.org/Firefox/Data_Collection>`_, which requires a plan for how to use the data. Data collection review is required for all new data collection.
+This also informs the `data collection review <https://wiki.mozilla.org/Firefox/Data_Collection>`__, which requires a plan for how to use the data. Data collection review is required for all new data collection.
 
 Data collection levels
 ======================
 
 Most of our data collection falls into one of two levels, *release* and *pre-release*.
 
 **Release data** is recorded by default on all channels, users need to explicitly opt out to disable it. This has `stricter constraints <https://wiki.mozilla.org/Firefox/Data_Collection#Requirements>`_ for what data we can collect. "Most" users submit this data.
 
--- a/toolkit/components/telemetry/histogram-whitelists.json
+++ b/toolkit/components/telemetry/histogram-whitelists.json
@@ -172,17 +172,16 @@
     "FONTLIST_INITOTHERFAMILYNAMES",
     "FONTLIST_INITOTHERFAMILYNAMES_NO_DEFERRING",
     "FONT_CACHE_HIT",
     "FORCED_DEVICE_RESET_REASON",
     "FX_BOOKMARKS_TOOLBAR_INIT_MS",
     "FX_BROWSER_FULLSCREEN_USED",
     "FX_GESTURE_COMPRESS_SNAPSHOT_OF_PAGE",
     "FX_GESTURE_INSTALL_SNAPSHOT_OF_PAGE",
-    "FX_NEW_WINDOW_MS",
     "FX_PAGE_LOAD_MS",
     "FX_SESSION_RESTORE_NUMBER_OF_EAGER_TABS_RESTORED",
     "FX_SESSION_RESTORE_NUMBER_OF_TABS_RESTORED",
     "FX_SESSION_RESTORE_NUMBER_OF_WINDOWS_RESTORED",
     "FX_TABLETMODE_PAGE_LOAD",
     "FX_TAB_CLICK_MS",
     "FX_TAB_SWITCH_SPINNER_VISIBLE_MS",
     "FX_THUMBNAILS_BG_CAPTURE_CANVAS_DRAW_TIME_MS",
--- a/toolkit/content/license.html
+++ b/toolkit/content/license.html
@@ -1,9 +1,9 @@
-<!DOCTYPE HTML>
+<!DOCTYPE HTML>
 <!-- This Source Code Form is subject to the terms of the Mozilla Public
    - License, v. 2.0. If a copy of the MPL was not distributed with this file,
    - You can obtain one at http://mozilla.org/MPL/2.0/.  -->
 
 <html lang="en">
   <head>
     <meta http-equiv="Content-Security-Policy" content="default-src 'none'; style-src chrome:; img-src chrome:" />
     <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
@@ -5304,17 +5304,17 @@ SOFTWARE.
 </pre>
 
 
     <hr>
 
     <h1><a id="polymer"></a>Polymer License</h1>
 
     <p>This license applies to the file
-    <code>toolkit/components/payments/res/vendor/custom-elements.min.js</code>.</p>
+    <code>browser/components/payments/res/vendor/custom-elements.min.js</code>.</p>
 
 <pre>
 Copyright (c) 2014 The Polymer Authors. All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
 met:
 
--- a/toolkit/library/rust/shared/Cargo.toml
+++ b/toolkit/library/rust/shared/Cargo.toml
@@ -19,17 +19,18 @@ cubeb-pulse = { path = "../../../../medi
 cubeb-sys = { version = "0.5.0", optional = true, features=["gecko-in-tree"] }
 encoding_c = "0.8.0"
 encoding_glue = { path = "../../../../intl/encoding_glue" }
 audioipc-client = { path = "../../../../media/audioipc/client", optional = true }
 audioipc-server = { path = "../../../../media/audioipc/server", optional = true }
 u2fhid = { path = "../../../../dom/webauthn/u2f-hid-rs" }
 rsdparsa_capi = { path = "../../../../media/webrtc/signaling/src/sdp/rsdparsa_capi" }
 # We have these to enforce common feature sets for said crates.
-log = {version = "0.3", features = ["release_max_level_info"]}
+log = {version = "0.4", features = ["release_max_level_info"]}
+env_logger = {version = "0.5", default-features = false} # disable `regex` to reduce code size
 cose-c = { version = "0.1.5" }
 
 [features]
 default = []
 bindgen = ["geckoservo/bindgen"]
 servo = ["geckoservo"]
 quantum_render = ["webrender_bindings"]
 cubeb-remoting = ["cubeb-sys", "audioipc-client", "audioipc-server"]
--- a/toolkit/library/rust/shared/lib.rs
+++ b/toolkit/library/rust/shared/lib.rs
@@ -17,27 +17,99 @@ extern crate webrender_bindings;
 #[cfg(feature = "cubeb_pulse_rust")]
 extern crate cubeb_pulse;
 extern crate encoding_c;
 extern crate encoding_glue;
 #[cfg(feature = "cubeb-remoting")]
 extern crate audioipc_client;
 #[cfg(feature = "cubeb-remoting")]
 extern crate audioipc_server;
+extern crate env_logger;
 extern crate u2fhid;
 extern crate log;
 extern crate cosec;
 extern crate rsdparsa_capi;
 
 use std::boxed::Box;
-use std::ffi::CStr;
+use std::env;
+use std::ffi::{CStr, CString};
 use std::os::raw::c_char;
 use std::panic;
 
+extern "C" {
+    fn gfx_critical_note(msg: *const c_char);
+}
 
+struct GeckoLogger {
+    logger: env_logger::Logger
+}
+
+impl GeckoLogger {
+    fn new() -> GeckoLogger {
+        let mut builder = env_logger::Builder::new();
+        let default_level = if cfg!(debug_assertions) { "warn" } else { "error" };
+        let logger = match env::var("RUST_LOG") {
+            Ok(v) => builder.parse(&v).build(),
+            _ => builder.parse(default_level).build(),
+        };
+
+        GeckoLogger {
+            logger
+        }
+    }
+
+    fn init() -> Result<(), log::SetLoggerError> {
+        let gecko_logger = Self::new();
+
+        log::set_max_level(gecko_logger.logger.filter());
+        log::set_boxed_logger(Box::new(gecko_logger))
+    }
+
+    fn should_log_to_gfx_critical_note(record: &log::Record) -> bool {
+        if record.level() == log::Level::Error &&
+           record.target().contains("webrender") {
+            true
+        } else {
+            false
+        }
+    }
+
+    fn maybe_log_to_gfx_critical_note(&self, record: &log::Record) {
+        if Self::should_log_to_gfx_critical_note(record) {
+            let msg = CString::new(format!("{}", record.args())).unwrap();
+            unsafe {
+                gfx_critical_note(msg.as_ptr());
+            }
+        }
+    }
+}
+
+impl log::Log for GeckoLogger {
+    fn enabled(&self, metadata: &log::Metadata) -> bool {
+        self.logger.enabled(metadata)
+    }
+
+    fn log(&self, record: &log::Record) {
+        // Forward log to gfxCriticalNote, if the log should be in gfx crash log.
+        self.maybe_log_to_gfx_critical_note(record);
+        self.logger.log(record);
+    }
+
+    fn flush(&self) { }
+}
+
+#[no_mangle]
+pub extern "C" fn GkRust_Init() {
+    // Initialize logging.
+    let _ = GeckoLogger::init();
+}
+
+#[no_mangle]
+pub extern "C" fn GkRust_Shutdown() {
+}
 
 /// Used to implement `nsIDebug2::RustPanic` for testing purposes.
 #[no_mangle]
 pub extern "C" fn intentional_panic(message: *const c_char) {
     panic!("{}", unsafe { CStr::from_ptr(message) }.to_string_lossy());
 }
 
 /// Contains the panic message, if set.
--- a/tools/profiler/core/PlatformMacros.h
+++ b/tools/profiler/core/PlatformMacros.h
@@ -10,29 +10,29 @@
 // else to this file, so it can remain freestanding. The primary factorisation
 // is on (ARCH,OS) pairs ("PLATforms") but ARCH_ and OS_ macros are defined
 // too, since they are sometimes convenient.
 //
 // Note: "GP" is short for "Gecko Profiler".
 
 #undef GP_PLAT_x86_android
 #undef GP_PLAT_arm_android
-#undef GP_PLAT_aarch64_android
+#undef GP_PLAT_arm64_android
 #undef GP_PLAT_x86_linux
 #undef GP_PLAT_amd64_linux
 #undef GP_PLAT_arm_linux
 #undef GP_PLAT_mips64_linux
 #undef GP_PLAT_amd64_darwin
 #undef GP_PLAT_x86_windows
 #undef GP_PLAT_amd64_windows
 
 #undef GP_ARCH_x86
 #undef GP_ARCH_amd64
 #undef GP_ARCH_arm
-#undef GP_ARCH_aarch64
+#undef GP_ARCH_arm64
 #undef GP_ARCH_mips64
 
 #undef GP_OS_android
 #undef GP_OS_linux
 #undef GP_OS_darwin
 #undef GP_OS_windows
 
 // We test __ANDROID__ before __linux__ because __linux__ is defined on both
@@ -44,18 +44,18 @@
 # define GP_OS_android 1
 
 #elif defined(__ANDROID__) && defined(__arm__)
 # define GP_PLAT_arm_android 1
 # define GP_ARCH_arm 1
 # define GP_OS_android 1
 
 #elif defined(__ANDROID__) && defined(__aarch64__)
-# define GP_PLAT_aarch64_android 1
-# define GP_ARCH_aarch64 1
+# define GP_PLAT_arm64_android 1
+# define GP_ARCH_arm64 1
 # define GP_OS_android 1
 
 #elif defined(__linux__) && defined(__i386__)
 # define GP_PLAT_x86_linux 1
 # define GP_ARCH_x86 1
 # define GP_OS_linux 1
 
 #elif defined(__linux__) && defined(__x86_64__)
@@ -63,16 +63,21 @@
 # define GP_ARCH_amd64 1
 # define GP_OS_linux 1
 
 #elif defined(__linux__) && defined(__arm__)
 # define GP_PLAT_arm_linux 1
 # define GP_ARCH_arm 1
 # define GP_OS_linux 1
 
+#elif defined(__linux__) && defined(__aarch64__)
+# define GP_PLAT_arm64_linux 1
+# define GP_ARCH_arm64 1
+# define GP_OS_linux 1
+
 #elif defined(__linux__) && defined(__mips64)
 # define GP_PLAT_mips64_linux 1
 # define GP_ARCH_mips64 1
 # define GP_OS_linux 1
 
 #elif defined(__APPLE__) && defined(__x86_64__)
 # define GP_PLAT_amd64_darwin 1
 # define GP_ARCH_amd64 1
--- a/tools/profiler/core/platform-linux-android.cpp
+++ b/tools/profiler/core/platform-linux-android.cpp
@@ -98,17 +98,17 @@ PopulateRegsFromContext(Registers& aRegs
   aRegs.mSP = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
   aRegs.mFP = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
   aRegs.mLR = 0;
 #elif defined(GP_ARCH_arm)
   aRegs.mPC = reinterpret_cast<Address>(mcontext.arm_pc);
   aRegs.mSP = reinterpret_cast<Address>(mcontext.arm_sp);
   aRegs.mFP = reinterpret_cast<Address>(mcontext.arm_fp);
   aRegs.mLR = reinterpret_cast<Address>(mcontext.arm_lr);
-#elif defined(GP_ARCH_aarch64)
+#elif defined(GP_ARCH_arm64)
   aRegs.mPC = reinterpret_cast<Address>(mcontext.pc);
   aRegs.mSP = reinterpret_cast<Address>(mcontext.sp);
   aRegs.mFP = reinterpret_cast<Address>(mcontext.regs[29]);
   aRegs.mLR = reinterpret_cast<Address>(mcontext.regs[30]);
 #elif defined(GP_ARCH_mips64)
   aRegs.mPC = reinterpret_cast<Address>(mcontext.pc);
   aRegs.mSP = reinterpret_cast<Address>(mcontext.gregs[29]);
   aRegs.mFP = reinterpret_cast<Address>(mcontext.gregs[30]);
--- a/tools/profiler/core/platform.cpp
+++ b/tools/profiler/core/platform.cpp
@@ -107,17 +107,18 @@
 #if defined(GP_PLAT_arm_linux) || defined(GP_PLAT_arm_android)
 # define HAVE_NATIVE_UNWIND
 # define USE_EHABI_STACKWALK
 # include "EHABIStackWalk.h"
 #endif
 
 // Linux builds use LUL, which uses DWARF info to unwind stacks.
 #if defined(GP_PLAT_amd64_linux) || defined(GP_PLAT_x86_linux) || \
-    defined(GP_PLAT_mips64_linux)
+    defined(GP_PLAT_mips64_linux) || defined(GP_PLAT_arm64_linux) || \
+    defined(GP_PLAT_arm64_android)
 # define HAVE_NATIVE_UNWIND
 # define USE_LUL_STACKWALK
 # include "lul/LulMain.h"
 # include "lul/platform-linux-lul.h"
 
 // On linux we use LUL for periodic samples and synchronous samples, but we use
 // FramePointerStackWalk for backtrace samples when MOZ_PROFILING is enabled.
 // (See the comment at the top of the file for a definition of
@@ -1207,16 +1208,21 @@ DoLULBacktrace(PSLockRef aLock, const Re
   startRegs.xbp = lul::TaggedUWord(mc->gregs[REG_RBP]);
 #elif defined(GP_PLAT_arm_linux) || defined(GP_PLAT_arm_android)
   startRegs.r15 = lul::TaggedUWord(mc->arm_pc);
   startRegs.r14 = lul::TaggedUWord(mc->arm_lr);
   startRegs.r13 = lul::TaggedUWord(mc->arm_sp);
   startRegs.r12 = lul::TaggedUWord(mc->arm_ip);
   startRegs.r11 = lul::TaggedUWord(mc->arm_fp);
   startRegs.r7  = lul::TaggedUWord(mc->arm_r7);
+#elif defined(GP_PLAT_arm64_linux) || defined(GP_PLAT_arm64_android)
+  startRegs.pc  = lul::TaggedUWord(mc->pc);
+  startRegs.x29 = lul::TaggedUWord(mc->regs[29]);
+  startRegs.x30 = lul::TaggedUWord(mc->regs[30]);
+  startRegs.sp  = lul::TaggedUWord(mc->sp);
 #elif defined(GP_PLAT_x86_linux) || defined(GP_PLAT_x86_android)
   startRegs.xip = lul::TaggedUWord(mc->gregs[REG_EIP]);
   startRegs.xsp = lul::TaggedUWord(mc->gregs[REG_ESP]);
   startRegs.xbp = lul::TaggedUWord(mc->gregs[REG_EBP]);
 #elif defined(GP_PLAT_mips64_linux)
   startRegs.pc = lul::TaggedUWord(mc->pc);
   startRegs.sp = lul::TaggedUWord(mc->gregs[29]);
   startRegs.fp = lul::TaggedUWord(mc->gregs[30]);
@@ -1259,16 +1265,19 @@ DoLULBacktrace(PSLockRef aLock, const Re
 
   {
 #if defined(GP_PLAT_amd64_linux)
     uintptr_t rEDZONE_SIZE = 128;
     uintptr_t start = startRegs.xsp.Value() - rEDZONE_SIZE;
 #elif defined(GP_PLAT_arm_linux) || defined(GP_PLAT_arm_android)
     uintptr_t rEDZONE_SIZE = 0;
     uintptr_t start = startRegs.r13.Value() - rEDZONE_SIZE;
+#elif defined(GP_PLAT_arm64_linux) || defined(GP_PLAT_arm64_android)
+    uintptr_t rEDZONE_SIZE = 0;
+    uintptr_t start = startRegs.sp.Value() - rEDZONE_SIZE;
 #elif defined(GP_PLAT_x86_linux) || defined(GP_PLAT_x86_android)
     uintptr_t rEDZONE_SIZE = 0;
     uintptr_t start = startRegs.xsp.Value() - rEDZONE_SIZE;
 #elif defined(GP_PLAT_mips64_linux)
     uintptr_t rEDZONE_SIZE = 0;
     uintptr_t start = startRegs.sp.Value() - rEDZONE_SIZE;
 #else
 #   error "Unknown plat"
--- a/tools/profiler/core/shared-libraries-linux.cc
+++ b/tools/profiler/core/shared-libraries-linux.cc
@@ -15,44 +15,73 @@
 #include <unistd.h>
 #include <fstream>
 #include "platform.h"
 #include "shared-libraries.h"
 #include "mozilla/Sprintf.h"
 #include "mozilla/Unused.h"
 #include "nsDebug.h"
 #include "nsNativeCharsetUtils.h"
+#include <nsTArray.h>
 
 #include "common/linux/file_id.h"
 #include <algorithm>
 #include <dlfcn.h>
 #include <features.h>
 #include <sys/types.h>
 
 #if defined(GP_OS_linux)
 # include <link.h>      // dl_phdr_info
 #elif defined(GP_OS_android)
+# include "AutoObjectMapper.h"
 # include "ElfLoader.h" // dl_phdr_info
 extern "C" MOZ_EXPORT __attribute__((weak))
 int dl_iterate_phdr(
           int (*callback)(struct dl_phdr_info *info, size_t size, void *data),
           void *data);
 #else
 # error "Unexpected configuration"
 #endif
 
+struct LoadedLibraryInfo
+{
+  LoadedLibraryInfo(const char* aName, unsigned long aStart, unsigned long aEnd)
+    : mName(aName)
+    , mStart(aStart)
+    , mEnd(aEnd)
+  {
+  }
+
+  nsCString mName;
+  unsigned long mStart;
+  unsigned long mEnd;
+};
+
 // Get the breakpad Id for the binary file pointed by bin_name
 static std::string getId(const char *bin_name)
 {
   using namespace google_breakpad;
   using namespace std;
 
   PageAllocator allocator;
   auto_wasteful_vector<uint8_t, sizeof(MDGUID)> identifier(&allocator);
 
+#if defined(GP_OS_android)
+  if (nsCString(bin_name).Find("!/") != kNotFound) {
+    AutoObjectMapperFaultyLib mapper(nullptr);
+    void* image = nullptr;
+    size_t size = 0;
+    if (mapper.Map(&image, &size, bin_name) && image && size) {
+      if (FileID::ElfFileIdentifierFromMappedFile(image, identifier)) {
+        return FileID::ConvertIdentifierToUUIDString(identifier) + "0";
+      }
+    }
+  }
+#endif
+
   FileID file_id(bin_name);
   if (file_id.ElfFileIdentifier(identifier)) {
     return FileID::ConvertIdentifierToUUIDString(identifier) + "0";
   }
 
   return "";
 }
 
@@ -74,17 +103,17 @@ SharedLibraryAtPath(const char* path, un
   return SharedLibrary(libStart, libEnd, offset, getId(path),
                        nameStr, pathStr, nameStr, pathStr,
                        "", "");
 }
 
 static int
 dl_iterate_callback(struct dl_phdr_info *dl_info, size_t size, void *data)
 {
-  SharedLibraryInfo& info = *reinterpret_cast<SharedLibraryInfo*>(data);
+  auto libInfoList = reinterpret_cast<nsTArray<LoadedLibraryInfo>*>(data);
 
   if (dl_info->dlpi_phnum <= 0)
     return 0;
 
   unsigned long libStart = -1;
   unsigned long libEnd = 0;
 
   for (size_t i = 0; i < dl_info->dlpi_phnum; i++) {
@@ -94,18 +123,18 @@ dl_iterate_callback(struct dl_phdr_info 
     unsigned long start = dl_info->dlpi_addr + dl_info->dlpi_phdr[i].p_vaddr;
     unsigned long end = start + dl_info->dlpi_phdr[i].p_memsz;
     if (start < libStart)
       libStart = start;
     if (end > libEnd)
       libEnd = end;
   }
 
-  info.AddSharedLibrary(
-    SharedLibraryAtPath(dl_info->dlpi_name, libStart, libEnd));
+  libInfoList->AppendElement(LoadedLibraryInfo(dl_info->dlpi_name,
+                                               libStart, libEnd));
 
   return 0;
 }
 
 SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf()
 {
   SharedLibraryInfo info;
 
@@ -182,18 +211,25 @@ SharedLibraryInfo SharedLibraryInfo::Get
         LOG("SharedLibraryInfo::GetInfoForSelf(): "
             "implausibly large number of mappings acquired");
         break;
       }
     }
 #endif
   }
 
+  nsTArray<LoadedLibraryInfo> libInfoList;
+
   // We collect the bulk of the library info using dl_iterate_phdr.
-  dl_iterate_phdr(dl_iterate_callback, &info);
+  dl_iterate_phdr(dl_iterate_callback, &libInfoList);
+
+  for (const auto& libInfo : libInfoList) {
+    info.AddSharedLibrary(
+      SharedLibraryAtPath(libInfo.mName.get(), libInfo.mStart, libInfo.mEnd));
+  }
 
 #if defined(GP_OS_linux)
   // Make another pass over the information we just harvested from
   // dl_iterate_phdr.  If we see a nameless object mapped at what we earlier
   // established to be the main executable's load address, attach the
   // executable's name to that entry.
   for (size_t i = 0; i < info.GetSize(); i++) {
     SharedLibrary& lib = info.GetMutableEntry(i);
--- a/tools/profiler/lul/LulDwarf.cpp
+++ b/tools/profiler/lul/LulDwarf.cpp
@@ -1897,16 +1897,35 @@ unsigned int DwarfCFIToModule::RegisterN
    8 "s8",  "s9",  "s10", "s11", "s12", "s13", "s14", "s15",
    8 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
    8 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
    8 "f0",  "f1",  "f2",  "f3",  "f4",  "f5",  "f6",  "f7"
   */
   return 13 * 8;
 }
 
+// Per ARM IHI 0057A, section 3.1
+unsigned int DwarfCFIToModule::RegisterNames::ARM64() {
+  /*
+   8 "x0",  "x1",  "x2",  "x3",  "x4",  "x5",  "x6",  "x7",
+   8 "x8",  "x9",  "x10", "x11", "x12", "x13", "x14", "x15",
+   8 "x16"  "x17", "x18", "x19", "x20", "x21", "x22", "x23",
+   8 "x24", "x25", "x26", "x27", "x28", "x29",  "x30","sp",
+   8 "",    "",    "",    "",    "",    "",    "",    "",
+   8 "",    "",    "",    "",    "",    "",    "",    "",
+   8 "",    "",    "",    "",    "",    "",    "",    "",
+   8 "",    "",    "",    "",    "",    "",    "",    "",
+   8 "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",  "v7",
+   8 "v8",  "v9",  "v10", "v11", "v12", "v13", "v14", "v15",
+   8 "v16", "v17", "v18", "v19", "v20", "v21", "v22,  "v23",
+   8 "v24", "x25", "x26,  "x27", "v28", "v29", "v30", "v31",
+  */
+  return 12 * 8;
+}
+
 unsigned int DwarfCFIToModule::RegisterNames::MIPS() {
   /*
    8 "$zero", "$at",  "$v0",  "$v1",  "$a0",   "$a1",  "$a2",  "$a3",
    8 "$t0",   "$t1",  "$t2",  "$t3",  "$t4",   "$t5",  "$t6",  "$t7",
    8 "$s0",   "$s1",  "$s2",  "$s3",  "$s4",   "$s5",  "$s6",  "$s7",
    8 "$t8",   "$t9",  "$k0",  "$k1",  "$gp",   "$sp",  "$fp",  "$ra",
    9 "$lo",   "$hi",  "$pc",  "$f0",  "$f1",   "$f2",  "$f3",  "$f4",  "$f5",
    8 "$f6",   "$f7",  "$f8",  "$f9",  "$f10",  "$f11", "$f12", "$f13",
--- a/tools/profiler/lul/LulDwarfExt.h
+++ b/tools/profiler/lul/LulDwarfExt.h
@@ -1212,16 +1212,19 @@ class DwarfCFIToModule: public CallFrame
     static unsigned int I386();
 
     // AMD x86_64, AMD64, Intel EM64T, or Intel 64
     static unsigned int X86_64();
 
     // ARM.
     static unsigned int ARM();
 
+    // AARCH64.
+    static unsigned int ARM64();
+
     // MIPS.
     static unsigned int MIPS();
   };
 
   // Create a handler for the dwarf2reader::CallFrameInfo parser that
   // records the stack unwinding information it receives in SUMM.
   //
   // Use REGISTER_NAMES[I] as the name of register number I; *this
--- a/tools/profiler/lul/LulDwarfSummariser.cpp
+++ b/tools/profiler/lul/LulDwarfSummariser.cpp
@@ -232,16 +232,90 @@ Summariser::Rule(uintptr_t aAddress, int
   mCurrRules.mR13expr = LExpr(NODEREF, DW_REG_CFA, 0);
 
   // If there's no information about R15 (the return address), say
   // it's a copy of R14 (the link register).
   if (mCurrRules.mR15expr.mHow == UNKNOWN) {
     mCurrRules.mR15expr = LExpr(NODEREF, DW_REG_ARM_R14, 0);
   }
 
+#elif defined(GP_ARCH_arm64)
+
+  // ----------------- arm64 ----------------- //
+
+  switch (aNewReg) {
+    case DW_REG_CFA:
+      if (how != NODEREF) {
+        reason1 = "rule for DW_REG_CFA: invalid |how|";
+        goto cant_summarise;
+      }
+      switch (oldReg) {
+        case DW_REG_AARCH64_X29:
+        case DW_REG_AARCH64_SP:
+          break;
+        default:
+          reason1 = "rule for DW_REG_CFA: invalid |oldReg|";
+          goto cant_summarise;
+      }
+      mCurrRules.mCfaExpr = LExpr(how, oldReg, offset);
+      break;
+
+    case DW_REG_AARCH64_X29:
+    case DW_REG_AARCH64_X30:
+    case DW_REG_AARCH64_SP: {
+      switch (how) {
+        case NODEREF:
+        case DEREF:
+          // Check the old register is one we're tracking.
+          if (!registerIsTracked((DW_REG_NUMBER)oldReg) &&
+              oldReg != DW_REG_CFA) {
+            reason1 = "rule for X29/X30/SP: uses untracked reg";
+            goto cant_summarise;
+          }
+          break;
+        case PFXEXPR: {
+          // Check that the prefix expression only mentions tracked registers.
+          const vector<PfxInstr>* pfxInstrs = mSecMap->GetPfxInstrs();
+          reason2 = checkPfxExpr(pfxInstrs, offset);
+          if (reason2) {
+            reason1 = "rule for X29/X30/SP: ";
+            goto cant_summarise;
+          }
+          break;
+        }
+        default:
+          goto cant_summarise;
+      }
+      LExpr expr = LExpr(how, oldReg, offset);
+      switch (aNewReg) {
+        case DW_REG_AARCH64_X29: mCurrRules.mX29expr = expr; break;
+        case DW_REG_AARCH64_X30: mCurrRules.mX30expr = expr; break;
+        case DW_REG_AARCH64_SP:  mCurrRules.mSPexpr  = expr; break;
+        default: MOZ_ASSERT(0);
+      }
+      break;
+    }
+    default:
+     // Leave |reason1| and |reason2| unset here, for the reasons explained
+     // in the analogous point
+     goto cant_summarise;
+  }
+
+  if (mCurrRules.mX29expr.mHow == UNKNOWN) {
+    mCurrRules.mX29expr = LExpr(NODEREF, DW_REG_AARCH64_X29, 0);
+  }
+  if (mCurrRules.mX30expr.mHow == UNKNOWN) {
+    mCurrRules.mX30expr = LExpr(NODEREF, DW_REG_AARCH64_X30, 0);
+  }
+  // On aarch64, it seems the old SP value before the call is always the
+  // same as the CFA.  Therefore, in the absence of any other way to
+  // recover the SP, specify that the CFA should be copied.
+  if (mCurrRules.mSPexpr.mHow == UNKNOWN) {
+    mCurrRules.mSPexpr = LExpr(NODEREF, DW_REG_CFA, 0);
+  }
 #elif defined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
 
   // ---------------- x64/x86 ---------------- //
 
   // Now, can we add the rule to our summary?  This depends on whether
   // the registers and the overall expression are representable.  This
   // is the heart of the summarisation process.
   switch (aNewReg) {
--- a/tools/profiler/lul/LulElf.cpp
+++ b/tools/profiler/lul/LulElf.cpp
@@ -68,16 +68,20 @@
 #include "LulMainInt.h"
 
 
 #if defined(GP_PLAT_arm_android) && !defined(SHT_ARM_EXIDX)
 // bionic and older glibsc don't define it
 # define SHT_ARM_EXIDX (SHT_LOPROC + 1)
 #endif
 
+// Old Linux header doesn't define EM_AARCH64
+#ifndef EM_AARCH64
+#define EM_AARCH64 183
+#endif
 
 // This namespace contains helper functions.
 namespace {
 
 using lul::DwarfCFIToModule;
 using lul::FindElfSectionByName;
 using lul::GetOffset;
 using lul::IsValidElf;
@@ -162,16 +166,19 @@ bool DwarfCFIRegisterNames(const typenam
       *num_dw_regnames = DwarfCFIToModule::RegisterNames::ARM();
       return true;
     case EM_X86_64:
       *num_dw_regnames = DwarfCFIToModule::RegisterNames::X86_64();
       return true;
     case EM_MIPS:
       *num_dw_regnames = DwarfCFIToModule::RegisterNames::MIPS();
       return true;
+    case EM_AARCH64:
+      *num_dw_regnames = DwarfCFIToModule::RegisterNames::ARM64();
+      return true;
     default:
       MOZ_ASSERT(0);
       return false;
   }
 }
 
 template<typename ElfClass>
 bool LoadDwarfCFI(const string& dwarf_filename,
@@ -449,16 +456,17 @@ bool LoadSymbols(const string& obj_file,
 // ELF_HEADER.
 template<typename ElfClass>
 const char* ElfArchitecture(const typename ElfClass::Ehdr* elf_header) {
   typedef typename ElfClass::Half Half;
   Half arch = elf_header->e_machine;
   switch (arch) {
     case EM_386:        return "x86";
     case EM_ARM:        return "arm";
+    case EM_AARCH64:    return "arm64";
     case EM_MIPS:       return "mips";
     case EM_PPC64:      return "ppc64";
     case EM_PPC:        return "ppc";
     case EM_S390:       return "s390";
     case EM_SPARC:      return "sparc";
     case EM_SPARCV9:    return "sparcv9";
     case EM_X86_64:     return "x86_64";
     default: return NULL;
--- a/tools/profiler/lul/LulMain.cpp
+++ b/tools/profiler/lul/LulMain.cpp
@@ -71,16 +71,20 @@ NameOf_DW_REG(int16_t aReg)
     case DW_REG_INTEL_XIP: return "xip";
 #elif defined(GP_ARCH_arm)
     case DW_REG_ARM_R7:    return "r7";
     case DW_REG_ARM_R11:   return "r11";
     case DW_REG_ARM_R12:   return "r12";
     case DW_REG_ARM_R13:   return "r13";
     case DW_REG_ARM_R14:   return "r14";
     case DW_REG_ARM_R15:   return "r15";
+#elif defined(GP_ARCH_arm64)
+    case DW_REG_AARCH64_X29: return "x29";
+    case DW_REG_AARCH64_X30: return "x30";
+    case DW_REG_AARCH64_SP:  return "sp";
 #elif defined(GP_ARCH_mips64)
     case DW_REG_MIPS_SP:   return "sp";
     case DW_REG_MIPS_FP:   return "fp";
     case DW_REG_MIPS_PC:   return "pc";
 #else
 # error "Unsupported arch"
 #endif
     default: return "???";
@@ -134,16 +138,20 @@ RuleSet::Print(void(*aLog)(const char*))
   res += mXbpExpr.ShowRule(" BP");
 #elif defined(GP_ARCH_arm)
   res += mR15expr.ShowRule(" R15");
   res += mR7expr .ShowRule(" R7" );
   res += mR11expr.ShowRule(" R11");
   res += mR12expr.ShowRule(" R12");
   res += mR13expr.ShowRule(" R13");
   res += mR14expr.ShowRule(" R14");
+#elif defined(GP_ARCH_arm64)
+  res += mX29expr.ShowRule(" X29");
+  res += mX30expr.ShowRule(" X30");
+  res += mSPexpr .ShowRule(" SP");
 #elif defined(GP_ARCH_mips64)
   res += mPCexpr.ShowRule(" PC");
   res += mSPexpr.ShowRule(" SP");
   res += mFPexpr.ShowRule(" FP");
 #else
 # error "Unsupported arch"
 #endif
   aLog(res.c_str());
@@ -159,16 +167,20 @@ RuleSet::ExprForRegno(DW_REG_NUMBER aReg
     case DW_REG_INTEL_XBP: return &mXbpExpr;
 #   elif defined(GP_ARCH_arm)
     case DW_REG_ARM_R15:   return &mR15expr;
     case DW_REG_ARM_R14:   return &mR14expr;
     case DW_REG_ARM_R13:   return &mR13expr;
     case DW_REG_ARM_R12:   return &mR12expr;
     case DW_REG_ARM_R11:   return &mR11expr;
     case DW_REG_ARM_R7:    return &mR7expr;
+#   elif defined(GP_ARCH_arm64)
+    case DW_REG_AARCH64_X29: return &mX29expr;
+    case DW_REG_AARCH64_X30: return &mX30expr;
+    case DW_REG_AARCH64_SP:  return &mSPexpr;
 #elif defined(GP_ARCH_mips64)
     case DW_REG_MIPS_SP:    return &mSPexpr;
     case DW_REG_MIPS_FP:    return &mFPexpr;
     case DW_REG_MIPS_PC:    return &mPCexpr;
 #   else
 #     error "Unknown arch"
 #   endif
     default: return nullptr;
@@ -912,16 +924,20 @@ TaggedUWord EvaluateReg(int16_t aReg, co
     case DW_REG_INTEL_XIP: return aOldRegs->xip;
 #elif defined(GP_ARCH_arm)
     case DW_REG_ARM_R7:    return aOldRegs->r7;
     case DW_REG_ARM_R11:   return aOldRegs->r11;
     case DW_REG_ARM_R12:   return aOldRegs->r12;
     case DW_REG_ARM_R13:   return aOldRegs->r13;
     case DW_REG_ARM_R14:   return aOldRegs->r14;
     case DW_REG_ARM_R15:   return aOldRegs->r15;
+#elif defined(GP_ARCH_arm64)
+    case DW_REG_AARCH64_X29: return aOldRegs->x29;
+    case DW_REG_AARCH64_X30: return aOldRegs->x30;
+    case DW_REG_AARCH64_SP:  return aOldRegs->sp;
 #elif defined(GP_ARCH_mips64)
     case DW_REG_MIPS_SP:   return aOldRegs->sp;
     case DW_REG_MIPS_FP:   return aOldRegs->fp;
     case DW_REG_MIPS_PC:   return aOldRegs->pc;
 #else
 # error "Unsupported arch"
 #endif
     default: MOZ_ASSERT(0); return TaggedUWord();
@@ -1108,16 +1124,21 @@ void UseRuleSet(/*MOD*/UnwindRegs* aRegs
   aRegs->xip = TaggedUWord();
 #elif defined(GP_ARCH_arm)
   aRegs->r7  = TaggedUWord();
   aRegs->r11 = TaggedUWord();
   aRegs->r12 = TaggedUWord();
   aRegs->r13 = TaggedUWord();
   aRegs->r14 = TaggedUWord();
   aRegs->r15 = TaggedUWord();
+#elif defined(GP_ARCH_arm64)
+  aRegs->x29 = TaggedUWord();
+  aRegs->x30 = TaggedUWord();
+  aRegs->sp  = TaggedUWord();
+  aRegs->pc  = TaggedUWord();
 #elif defined(GP_ARCH_mips64)
   aRegs->sp  = TaggedUWord();
   aRegs->fp  = TaggedUWord();
   aRegs->pc  = TaggedUWord();
 #else
 #  error "Unsupported arch"
 #endif
 
@@ -1149,16 +1170,23 @@ void UseRuleSet(/*MOD*/UnwindRegs* aRegs
   aRegs->r12
     = aRS->mR12expr.EvaluateExpr(&old_regs, cfa, aStackImg, aPfxInstrs);
   aRegs->r13
     = aRS->mR13expr.EvaluateExpr(&old_regs, cfa, aStackImg, aPfxInstrs);
   aRegs->r14
     = aRS->mR14expr.EvaluateExpr(&old_regs, cfa, aStackImg, aPfxInstrs);
   aRegs->r15
     = aRS->mR15expr.EvaluateExpr(&old_regs, cfa, aStackImg, aPfxInstrs);
+#elif defined(GP_ARCH_arm64)
+  aRegs->x29
+    = aRS->mX29expr.EvaluateExpr(&old_regs, cfa, aStackImg, aPfxInstrs);
+  aRegs->x30
+    = aRS->mX30expr.EvaluateExpr(&old_regs, cfa, aStackImg, aPfxInstrs);
+  aRegs->sp
+    = aRS->mSPexpr.EvaluateExpr(&old_regs, cfa, aStackImg, aPfxInstrs);
 #elif defined(GP_ARCH_mips64)
   aRegs->sp
     = aRS->mSPexpr.EvaluateExpr(&old_regs, cfa, aStackImg, aPfxInstrs);
   aRegs->fp
     = aRS->mFPexpr.EvaluateExpr(&old_regs, cfa, aStackImg, aPfxInstrs);
   aRegs->pc
     = aRS->mPCexpr.EvaluateExpr(&old_regs, cfa, aStackImg, aPfxInstrs);
 #else
@@ -1208,16 +1236,26 @@ LUL::Unwind(/*OUT*/uintptr_t* aFramePCs,
                      (int)regs.r15.Valid(), (unsigned long long int)regs.r15.Value(),
                      (int)regs.r7.Valid(),  (unsigned long long int)regs.r7.Value(),
                      (int)regs.r11.Valid(), (unsigned long long int)regs.r11.Value(),
                      (int)regs.r12.Valid(), (unsigned long long int)regs.r12.Value(),
                      (int)regs.r13.Valid(), (unsigned long long int)regs.r13.Value(),
                      (int)regs.r14.Valid(), (unsigned long long int)regs.r14.Value());
       buf[sizeof(buf)-1] = 0;
       mLog(buf);
+#elif defined(GP_ARCH_arm64)
+      SprintfLiteral(buf,
+                     "LoopTop: pc %d/%llx  x29 %d/%llx  x30 %d/%llx"
+                     "  sp %d/%llx\n",
+                     (int)regs.pc.Valid(), (unsigned long long int)regs.pc.Value(),
+                     (int)regs.x29.Valid(), (unsigned long long int)regs.x29.Value(),
+                     (int)regs.x30.Valid(), (unsigned long long int)regs.x30.Value(),
+                     (int)regs.sp.Valid(), (unsigned long long int)regs.sp.Value());
+      buf[sizeof(buf)-1] = 0;
+      mLog(buf);
 #elif defined(GP_ARCH_mips64)
       SprintfLiteral(buf,
                      "LoopTop: pc %d/%llx  sp %d/%llx  fp %d/%llx\n",
                      (int)regs.pc.Valid(), (unsigned long long int)regs.pc.Value(),
                      (int)regs.sp.Valid(), (unsigned long long int)regs.sp.Value(),
                      (int)regs.fp.Valid(), (unsigned long long int)regs.fp.Value());
       buf[sizeof(buf)-1] = 0;
       mLog(buf);
@@ -1227,16 +1265,19 @@ LUL::Unwind(/*OUT*/uintptr_t* aFramePCs,
     }
 
 #if defined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
     TaggedUWord ia = regs.xip;
     TaggedUWord sp = regs.xsp;
 #elif defined(GP_ARCH_arm)
     TaggedUWord ia = (*aFramesUsed == 0 ? regs.r15 : regs.r14);
     TaggedUWord sp = regs.r13;
+#elif defined(GP_ARCH_arm64)
+    TaggedUWord ia = (*aFramesUsed == 0 ? regs.pc : regs.x30);
+    TaggedUWord sp = regs.sp;
 #elif defined(GP_ARCH_mips64)
     TaggedUWord ia = regs.pc;
     TaggedUWord sp = regs.sp;
 #else
 # error "Unsupported arch"
 #endif
 
     if (*aFramesUsed >= aFramesAvail) {
@@ -1525,16 +1566,36 @@ bool GetAndCheckStackTrace(LUL* aLUL, co
   startRegs.r15 = TaggedUWord(block[0]);
   startRegs.r14 = TaggedUWord(block[1]);
   startRegs.r13 = TaggedUWord(block[2]);
   startRegs.r12 = TaggedUWord(block[3]);
   startRegs.r11 = TaggedUWord(block[4]);
   startRegs.r7  = TaggedUWord(block[5]);
   const uintptr_t REDZONE_SIZE = 0;
   uintptr_t start = block[1] - REDZONE_SIZE;
+#elif defined(GP_ARCH_arm64)
+  volatile uintptr_t block[4];
+  MOZ_ASSERT(sizeof(block) == 32);
+  __asm__ __volatile__(
+    "adr x0, . \n\t"
+    "str x0, [%0, #0] \n\t"
+    "str x29, [%0, #8] \n\t"
+    "str x30, [%0, #16] \n\t"
+    "mov x0, sp \n\t"
+    "str x0, [%0, #24] \n\t"
+    :
+    : "r"(&block[0])
+    : "memory", "x0"
+  );
+  startRegs.pc = TaggedUWord(block[0]);
+  startRegs.x29 = TaggedUWord(block[1]);
+  startRegs.x30 = TaggedUWord(block[2]);
+  startRegs.sp = TaggedUWord(block[3]);
+  const uintptr_t REDZONE_SIZE = 0;
+  uintptr_t start = block[1] - REDZONE_SIZE;
 #elif defined(GP_ARCH_mips64)
   volatile uintptr_t block[3];
   MOZ_ASSERT(sizeof(block) == 24);
   __asm__ __volatile__(
     "sd $29, 8(%0)     \n"
     "sd $30, 16(%0)    \n"
     :
     :"r"(block)
--- a/tools/profiler/lul/LulMain.h
+++ b/tools/profiler/lul/LulMain.h
@@ -143,16 +143,21 @@ private:
 struct UnwindRegs {
 #if defined(GP_ARCH_arm)
   TaggedUWord r7;
   TaggedUWord r11;
   TaggedUWord r12;
   TaggedUWord r13;
   TaggedUWord r14;
   TaggedUWord r15;
+#elif defined(GP_ARCH_arm64)
+  TaggedUWord x29;
+  TaggedUWord x30;
+  TaggedUWord sp;
+  TaggedUWord pc;
 #elif defined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
   TaggedUWord xbp;
   TaggedUWord xsp;
   TaggedUWord xip;
 #elif defined(GP_ARCH_mips64)
   TaggedUWord sp;
   TaggedUWord fp;
   TaggedUWord pc;
--- a/tools/profiler/lul/LulMainInt.h
+++ b/tools/profiler/lul/LulMainInt.h
@@ -38,16 +38,21 @@ enum DW_REG_NUMBER {
 #if defined(GP_ARCH_arm)
   // ARM registers
   DW_REG_ARM_R7  = 7,
   DW_REG_ARM_R11 = 11,
   DW_REG_ARM_R12 = 12,
   DW_REG_ARM_R13 = 13,
   DW_REG_ARM_R14 = 14,
   DW_REG_ARM_R15 = 15,
+#elif defined(GP_ARCH_arm64)
+  // aarch64 registers
+  DW_REG_AARCH64_X29 = 29,
+  DW_REG_AARCH64_X30 = 30,
+  DW_REG_AARCH64_SP  = 31,
 #elif defined(GP_ARCH_amd64)
   // Because the X86 (32 bit) and AMD64 (64 bit) summarisers are
   // combined, a merged set of register constants is needed.
   DW_REG_INTEL_XBP = 6,
   DW_REG_INTEL_XSP = 7,
   DW_REG_INTEL_XIP = 16,
 #elif defined(GP_ARCH_x86)
   DW_REG_INTEL_XBP = 5,
@@ -274,16 +279,20 @@ public:
   LExpr  mXbpExpr;
 #elif defined(GP_ARCH_arm)
   LExpr  mR15expr; // return address
   LExpr  mR14expr;
   LExpr  mR13expr;
   LExpr  mR12expr;
   LExpr  mR11expr;
   LExpr  mR7expr;
+#elif defined(GP_ARCH_arm64)
+  LExpr  mX29expr; // frame pointer register
+  LExpr  mX30expr; // link register
+  LExpr  mSPexpr;
 #elif defined(GP_ARCH_mips64)
   LExpr  mPCexpr;
   LExpr  mFPexpr;
   LExpr  mSPexpr;
 #else
 #   error "Unknown arch"
 #endif
 };
@@ -294,16 +303,19 @@ static inline bool registerIsTracked(DW_
   switch (reg) {
 #   if defined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
     case DW_REG_INTEL_XBP: case DW_REG_INTEL_XSP: case DW_REG_INTEL_XIP:
       return true;
 #   elif defined(GP_ARCH_arm)
     case DW_REG_ARM_R7:  case DW_REG_ARM_R11: case DW_REG_ARM_R12:
     case DW_REG_ARM_R13: case DW_REG_ARM_R14: case DW_REG_ARM_R15:
       return true;
+#   elif defined(GP_ARCH_arm64)
+    case DW_REG_AARCH64_X29:  case DW_REG_AARCH64_X30: case DW_REG_AARCH64_SP:
+      return true;
 #elif defined(GP_ARCH_mips64)
     case DW_REG_MIPS_FP:  case DW_REG_MIPS_SP: case DW_REG_MIPS_PC:
       return true;
 #   else
 #     error "Unknown arch"
 #   endif
     default:
       return false;
--- a/tools/profiler/moz.build
+++ b/tools/profiler/moz.build
@@ -43,17 +43,17 @@ if CONFIG['MOZ_GECKO_PROFILER']:
             'gecko/nsProfiler.cpp',
         ]
     else:
         UNIFIED_SOURCES += [
             'gecko/nsProfiler.cpp',
         ]
 
     if CONFIG['OS_TARGET'] in ('Android', 'Linux'):
-        if CONFIG['CPU_ARCH'] in ('arm', 'x86', 'x86_64', 'mips64'):
+        if CONFIG['CPU_ARCH'] in ('arm', 'aarch64', 'x86', 'x86_64', 'mips64'):
             UNIFIED_SOURCES += [
                 'lul/AutoObjectMapper.cpp',
                 'lul/LulCommon.cpp',
                 'lul/LulDwarf.cpp',
                 'lul/LulDwarfSummariser.cpp',
                 'lul/LulElf.cpp',
                 'lul/LulMain.cpp',
                 'lul/platform-linux-lul.cpp',
@@ -94,16 +94,17 @@ if CONFIG['MOZ_GECKO_PROFILER']:
     ]
 
     if CONFIG['OS_TARGET'] == 'Android':
         DEFINES['ANDROID_NDK_MAJOR_VERSION'] = CONFIG['ANDROID_NDK_MAJOR_VERSION']
         DEFINES['ANDROID_NDK_MINOR_VERSION'] = CONFIG['ANDROID_NDK_MINOR_VERSION']
         LOCAL_INCLUDES += [
             # We need access to Breakpad's getcontext(3) which is suitable for Android
             '/toolkit/crashreporter/google-breakpad/src/common/android/include',
+            'lul',
         ]
 
     if not CONFIG['MOZ_CRASHREPORTER'] and CONFIG['OS_TARGET'] == 'Android':
         SOURCES += ['/toolkit/crashreporter/google-breakpad/src/common/android/breakpad_getcontext.S']
 
     if CONFIG['ENABLE_TESTS']:
         DIRS += ['tests/gtest']
 
--- a/tools/profiler/tests/gtest/LulTestDwarf.cpp
+++ b/tools/profiler/tests/gtest/LulTestDwarf.cpp
@@ -2401,16 +2401,19 @@ TEST_F(LulDwarfExpr, ExpressionOverrun) 
   EXPECT_TRUE(ix == -1);
 }
 
 // We'll need to mention specific Dwarf registers in the EvaluatePfxExpr tests,
 // and those names are arch-specific, so a bit of macro magic is helpful.
 #if defined(GP_ARCH_arm)
 # define TESTED_REG_STRUCT_NAME  r11
 # define TESTED_REG_DWARF_NAME   DW_REG_ARM_R11
+#elif defined(GP_ARCH_arm64)
+# define TESTED_REG_STRUCT_NAME  x29
+# define TESTED_REG_DWARF_NAME   DW_REG_AARCH64_X29
 #elif defined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
 # define TESTED_REG_STRUCT_NAME  xbp
 # define TESTED_REG_DWARF_NAME   DW_REG_INTEL_XBP
 #else
 # error "Unknown plat"
 #endif
 
 struct EvaluatePfxExprFixture {
--- a/tools/profiler/tests/gtest/moz.build
+++ b/tools/profiler/tests/gtest/moz.build
@@ -1,16 +1,16 @@
 # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, you can obtain one at http://mozilla.org/MPL/2.0/.
 
 if (CONFIG['OS_TARGET'] in ('Android', 'Linux') and
-    CONFIG['CPU_ARCH'] in ('arm', 'x86', 'x86_64')):
+    CONFIG['CPU_ARCH'] in ('arm', 'aarch64', 'x86', 'x86_64')):
     UNIFIED_SOURCES += [
         'LulTest.cpp',
         'LulTestDwarf.cpp',
         'LulTestInfrastructure.cpp',
     ]
 
 LOCAL_INCLUDES += [
     '/tools/profiler/core',
--- a/xpcom/build/XPCOMInit.cpp
+++ b/xpcom/build/XPCOMInit.cpp
@@ -151,16 +151,20 @@ extern nsresult nsStringInputStreamConst
 #include "js/Initialization.h"
 
 #include "gfxPlatform.h"
 
 using namespace mozilla;
 using base::AtExitManager;
 using mozilla::ipc::BrowserProcessSubThread;
 
+// From toolkit/library/rust/lib.rs
+extern "C" void GkRust_Init();
+extern "C" void GkRust_Shutdown();
+
 namespace {
 
 static AtExitManager* sExitManager;
 static MessageLoop* sMessageLoop;
 static bool sCommandLineWasInitialized;
 static BrowserProcessSubThread* sIOThread;
 static BackgroundHangMonitor* sMainHangMonitor;
 
@@ -447,16 +451,18 @@ NS_InitXPCOM2(nsIServiceManager** aResul
 
   NS_InitAtomTable();
 
   // We don't have the arguments by hand here.  If logging has already been
   // initialized by a previous call to LogModule::Init with the arguments
   // passed, passing (0, nullptr) is alright here.
   mozilla::LogModule::Init(0, nullptr);
 
+  GkRust_Init();
+
   nsresult rv = NS_OK;
 
   // We are not shutting down
   gXPCOMShuttingDown = false;
 
   // Initialize the available memory tracker before other threads have had a
   // chance to start up, because the initialization is not thread-safe.
   mozilla::AvailableMemoryTracker::Init();
@@ -994,16 +1000,18 @@ ShutdownXPCOM(nsIServiceManager* aServMg
     NS_ASSERTION(cnt == 0, "Component Manager being held past XPCOM shutdown.");
   }
   nsComponentManagerImpl::gComponentManager = nullptr;
   nsCategoryManager::Destroy();
 
   // Shut down SystemGroup for main thread dispatching.
   SystemGroup::Shutdown();
 
+  GkRust_Shutdown();
+
   NS_ShutdownAtomTable();
 
   NS_IF_RELEASE(gDebug);
 
   delete sIOThread;
   sIOThread = nullptr;
 
   delete sMessageLoop;