Merge autoland to mozilla-central a=merge
authorRazvan Maries <rmaries@mozilla.com>
Thu, 14 Nov 2019 23:45:40 +0200
changeset 502037 0fb79a3edf1bddd8532e6d98e7b0531b5155a6e4
parent 502036 173de3ea2a270d6dd2c117a291460093d4c168ec (current diff)
parent 501989 ee728f3ea085900757752d009ea271d21087096a (diff)
child 502038 88db9bea4580df16dc444668f8c2cddbb3414318
push id114172
push userdluca@mozilla.com
push dateTue, 19 Nov 2019 11:31:10 +0000
treeherdermozilla-inbound@b5c5ba07d3db [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone72.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge autoland to mozilla-central a=merge
devtools/client/netmonitor/src/components/RequestList.js
devtools/client/netmonitor/src/components/RequestListColumnCause.js
devtools/client/netmonitor/src/components/RequestListColumnContentSize.js
devtools/client/netmonitor/src/components/RequestListColumnCookies.js
devtools/client/netmonitor/src/components/RequestListColumnDomain.js
devtools/client/netmonitor/src/components/RequestListColumnFile.js
devtools/client/netmonitor/src/components/RequestListColumnMethod.js
devtools/client/netmonitor/src/components/RequestListColumnProtocol.js
devtools/client/netmonitor/src/components/RequestListColumnRemoteIP.js
devtools/client/netmonitor/src/components/RequestListColumnResponseHeader.js
devtools/client/netmonitor/src/components/RequestListColumnScheme.js
devtools/client/netmonitor/src/components/RequestListColumnSetCookies.js
devtools/client/netmonitor/src/components/RequestListColumnStatus.js
devtools/client/netmonitor/src/components/RequestListColumnTime.js
devtools/client/netmonitor/src/components/RequestListColumnTransferredSize.js
devtools/client/netmonitor/src/components/RequestListColumnType.js
devtools/client/netmonitor/src/components/RequestListColumnUrl.js
devtools/client/netmonitor/src/components/RequestListColumnWaterfall.js
devtools/client/netmonitor/src/components/RequestListContent.js
devtools/client/netmonitor/src/components/RequestListEmptyNotice.js
devtools/client/netmonitor/src/components/RequestListHeader.js
devtools/client/netmonitor/src/components/RequestListItem.js
dom/media/mediasink/OutputStreamManager.cpp
dom/media/mediasink/OutputStreamManager.h
intl/hyphenation/glue/hnjalloc.h
intl/hyphenation/glue/hnjstdio.cpp
intl/hyphenation/hyphen/AUTHORS
intl/hyphenation/hyphen/COPYING
intl/hyphenation/hyphen/COPYING.LGPL
intl/hyphenation/hyphen/COPYING.MPL
intl/hyphenation/hyphen/NEWS
intl/hyphenation/hyphen/README
intl/hyphenation/hyphen/README.compound
intl/hyphenation/hyphen/README.hyphen
intl/hyphenation/hyphen/README.nonstandard
intl/hyphenation/hyphen/hyphen.c
intl/hyphenation/hyphen/hyphen.h
intl/hyphenation/hyphen/moz.build
mobile/android/docs/geckoview/Gemfile
mobile/android/docs/geckoview/_config.yml
mobile/android/geckoview/src/main/java/org/mozilla/geckoview/GeckoResponse.java
testing/web-platform/meta/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/mediaElementAudioSourceToScriptProcessorTest.html.ini
toolkit/components/telemetry/docs/data/optout-ping.rst
toolkit/components/telemetry/tests/marionette/tests/client/test_optout_ping.py
--- a/.cargo/config.in
+++ b/.cargo/config.in
@@ -22,16 +22,21 @@ branch = "wgpu"
 git = "https://github.com/kvark/spirv_cross"
 replace-with = "vendored-sources"
 
 [source."https://github.com/kvark/rust-objc-exception"]
 branch = "cc"
 git = "https://github.com/kvark/rust-objc-exception"
 replace-with = "vendored-sources"
 
+[source."https://github.com/jfkthame/mapped_hyph.git"]
+git = "https://github.com/jfkthame/mapped_hyph.git"
+replace-with = "vendored-sources"
+tag = "v0.3.0"
+
 [source."https://github.com/hsivonen/packed_simd"]
 branch = "rust_1_32"
 git = "https://github.com/hsivonen/packed_simd"
 replace-with = "vendored-sources"
 
 [source."https://github.com/alexcrichton/mio-named-pipes"]
 branch = "master"
 git = "https://github.com/alexcrichton/mio-named-pipes"
--- a/.vscode/tasks.json
+++ b/.vscode/tasks.json
@@ -3,18 +3,22 @@
     // for the documentation about the tasks.json format
     "version": "2.0.0",
     "type": "shell",
     "command": "${workspaceFolder}/mach",
     "args": ["--log-no-times"],
     "windows": {
       "command": "/mozilla-build/start-shell.bat",
       "args": [
+        "cd",
         // Use PowerShell to mangle path for mozilla-build environment
-        {"value": "$('${workspaceFolder}\\mach' -replace '\\\\','/')", "quoting": "weak"},
+        {"value": "$('${workspaceFolder}' -replace '\\\\','/')", "quoting": "weak"},
+        "';'",
+
+        "mach",
         "--log-no-times"
       ]
     },
     "tasks": [
       {
         "label": "clobber",
         "args": ["clobber"],
         "problemMatcher": []
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1424,16 +1424,17 @@ dependencies = [
  "encoding_glue 0.1.0",
  "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "geckoservo 0.0.1",
  "gkrust_utils 0.1.0",
  "jsrust_shared 0.1.0",
  "kvstore 0.1.0",
  "lmdb-rkv-sys 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mapped_hyph 0.3.0 (git+https://github.com/jfkthame/mapped_hyph.git?tag=v0.3.0)",
  "mdns_service 0.1.0",
  "mozurl 0.0.1",
  "mp4parse_capi 0.11.2",
  "neqo_glue 0.1.0",
  "netwerk_helper 0.0.1",
  "nserror 0.1.0",
  "nsstring 0.1.0",
  "prefs_parser 0.0.1",
@@ -1981,16 +1982,25 @@ version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)",
  "syn 0.15.30 (registry+https://github.com/rust-lang/crates.io-index)",
  "synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "mapped_hyph"
+version = "0.3.0"
+source = "git+https://github.com/jfkthame/mapped_hyph.git?tag=v0.3.0#3b5fffbe17e8cdcc6814886a9b9170fde3db13bd"
+dependencies = [
+ "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "marionette"
 version = "0.1.0"
 dependencies = [
  "serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_json 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_repr 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -4634,16 +4644,17 @@ dependencies = [
 "checksum lucet-module 0.1.1 (git+https://github.com/PLSysSec/lucet_sandbox_compiler)" = "<none>"
 "checksum lucet-runtime 0.1.1 (git+https://github.com/PLSysSec/lucet_sandbox_compiler)" = "<none>"
 "checksum lucet-runtime-internals 0.1.1 (git+https://github.com/PLSysSec/lucet_sandbox_compiler)" = "<none>"
 "checksum lucet-wasi 0.1.1 (git+https://github.com/PLSysSec/lucet_sandbox_compiler)" = "<none>"
 "checksum lzw 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7d947cbb889ed21c2a84be6ffbaebf5b4e0f4340638cba0444907e38b56be084"
 "checksum mach 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
 "checksum malloc_buf 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb"
 "checksum malloc_size_of_derive 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "35adee9ed962cf7d07d62cb58bc45029f3227f5b5b86246caa8632f06c187bc3"
+"checksum mapped_hyph 0.3.0 (git+https://github.com/jfkthame/mapped_hyph.git?tag=v0.3.0)" = "<none>"
 "checksum matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "100aabe6b8ff4e4a7e32c1c13523379802df0772b82466207ac25b013f193376"
 "checksum memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2efc7bc57c883d4a4d6e3246905283d8dae951bb3bd32f49d6ef297f546e1c39"
 "checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
 "checksum memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ce6075db033bbbb7ee5a0bbd3a3186bbae616f57fb001c485c7ff77955f8177f"
 "checksum metal 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ddf8052f20601c7af6293d3f7bf7b9159aee5974804fe65d871d437f933ec1eb"
 "checksum mime 0.3.13 (registry+https://github.com/rust-lang/crates.io-index)" = "3e27ca21f40a310bd06d9031785f4801710d566c184a6e15bad4f1d9b65f9425"
 "checksum mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a0ed03949aef72dbdf3116a383d7b38b4768e6f960528cd6a6044aa9ed68599"
 "checksum miniz-sys 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "1e9e3ae51cea1576ceba0dde3d484d30e6e5b86dee0b2d412fe3a16a15c98202"
--- a/accessible/moz.build
+++ b/accessible/moz.build
@@ -31,16 +31,17 @@ if CONFIG['MOZ_XUL']:
 
 TEST_DIRS += ['tests/mochitest']
 
 BROWSER_CHROME_MANIFESTS += [
   'tests/browser/bounds/browser.ini',
   'tests/browser/browser.ini',
   'tests/browser/e10s/browser.ini',
   'tests/browser/events/browser.ini',
+  'tests/browser/fission/browser.ini',
   'tests/browser/general/browser.ini',
   'tests/browser/scroll/browser.ini',
   'tests/browser/states/browser.ini',
   'tests/browser/tree/browser.ini'
 ]
 
 with Files("**"):
     BUG_COMPONENT = ("Core", "Disability Access APIs")
new file mode 100644
--- /dev/null
+++ b/accessible/tests/browser/fission/browser.ini
@@ -0,0 +1,16 @@
+[DEFAULT]
+support-files =
+  head.js
+  !/accessible/tests/browser/shared-head.js
+  !/accessible/tests/browser/fission_document_builder.sjs
+  !/accessible/tests/browser/*.jsm
+  !/accessible/tests/mochitest/*.js
+
+[browser_content_tree.js]
+[browser_hidden_iframe.js]
+[browser_nested_iframe.js]
+[browser_reframe_root.js]
+[browser_reframe_visibility.js]
+[browser_src_change.js]
+[browser_take_focus.js]
+skip-if = fission || (os == 'win') # Bug 1556627 and 1594300 xpcAccessible::TakeFocus() is not implemented on Windows with e10s.
new file mode 100644
--- /dev/null
+++ b/accessible/tests/browser/fission/browser_content_tree.js
@@ -0,0 +1,75 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+/* import-globals-from ../../mochitest/role.js */
+loadScripts({ name: "role.js", dir: MOCHITESTS_DIR });
+
+addAccessibleTask(
+  `<table id="table">
+    <tr>
+      <td>cell1</td>
+      <td>cell2</td>
+    </tr>
+  </table>
+  <ul id="ul">
+    <li id="li">item1</li>
+  </ul>`,
+  async function(browser, fissionDocAcc, contentDocAcc) {
+    ok(fissionDocAcc, "Fission document accessible is present");
+    (gFissionBrowser ? isnot : is)(
+      browser.browsingContext.currentWindowGlobal.osPid,
+      browser.browsingContext.getChildren()[0].currentWindowGlobal.osPid,
+      `Content and fission documents are in ${
+        gFissionBrowser ? "separate processes" : "same process"
+      }.`
+    );
+
+    const tree = {
+      DOCUMENT: [
+        {
+          INTERNAL_FRAME: [
+            {
+              DOCUMENT: [
+                {
+                  TABLE: [
+                    {
+                      ROW: [
+                        { CELL: [{ TEXT_LEAF: [] }] },
+                        { CELL: [{ TEXT_LEAF: [] }] },
+                      ],
+                    },
+                  ],
+                },
+                {
+                  LIST: [
+                    {
+                      LISTITEM: [{ STATICTEXT: [] }, { TEXT_LEAF: [] }],
+                    },
+                  ],
+                },
+              ],
+            },
+          ],
+        },
+      ],
+    };
+    testAccessibleTree(contentDocAcc, tree);
+
+    const iframeAcc = contentDocAcc.getChildAt(0);
+    is(
+      iframeAcc.getChildAt(0),
+      fissionDocAcc,
+      "Fission document for the IFRAME matches."
+    );
+
+    is(
+      fissionDocAcc.parent,
+      iframeAcc,
+      "Fission document's parent matches the IFRAME."
+    );
+  },
+  { topLevel: false, iframe: true }
+);
new file mode 100644
--- /dev/null
+++ b/accessible/tests/browser/fission/browser_hidden_iframe.js
@@ -0,0 +1,81 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+/* import-globals-from ../../mochitest/states.js */
+/* import-globals-from ../../mochitest/role.js */
+loadScripts({ name: "states.js", dir: MOCHITESTS_DIR });
+loadScripts({ name: "role.js", dir: MOCHITESTS_DIR });
+
+addAccessibleTask(
+  `<input id="textbox" value="hello"/>`,
+  async function(browser, contentDocAcc) {
+    info(
+      "Check that the IFRAME and the fission document are not accessible initially."
+    );
+    let iframeAcc = findAccessibleChildByID(contentDocAcc, FISSION_IFRAME_ID);
+    let fissionDocAcc = findAccessibleChildByID(
+      contentDocAcc,
+      DEFAULT_FISSION_DOC_BODY_ID
+    );
+    ok(!iframeAcc, "IFRAME is hidden and should not be accessible");
+    ok(!fissionDocAcc, "fission document is hidden and should be accessible");
+
+    info(
+      "Show the IFRAME and check that it's now available in the accessibility tree."
+    );
+
+    const events = [[EVENT_REORDER, contentDocAcc]];
+    if (!gFissionBrowser) {
+      // Until this event is fired, IFRAME accessible has no children attached.
+      events.push([
+        EVENT_STATE_CHANGE,
+        event => {
+          const scEvent = event.QueryInterface(nsIAccessibleStateChangeEvent);
+          const id = getAccessibleDOMNodeID(event.accessible);
+          return (
+            id === DEFAULT_FISSION_DOC_BODY_ID &&
+            scEvent.state === STATE_BUSY &&
+            scEvent.isEnabled === false
+          );
+        },
+      ]);
+    }
+    const onEvents = waitForEvents(events);
+    await SpecialPowers.spawn(browser, [FISSION_IFRAME_ID], contentId => {
+      content.document.getElementById(contentId).style.display = "";
+    });
+    await onEvents;
+
+    iframeAcc = findAccessibleChildByID(contentDocAcc, FISSION_IFRAME_ID);
+    fissionDocAcc = findAccessibleChildByID(
+      contentDocAcc,
+      DEFAULT_FISSION_DOC_BODY_ID
+    );
+
+    ok(!isDefunct(iframeAcc), "IFRAME should be accessible");
+    is(iframeAcc.childCount, 1, "IFRAME accessible should have a single child");
+    ok(fissionDocAcc, "fission document exists");
+    ok(!isDefunct(fissionDocAcc), "fission document should be accessible");
+    is(
+      iframeAcc.firstChild,
+      fissionDocAcc,
+      "An accessible for a fission document is the child of the IFRAME accessible"
+    );
+    is(
+      fissionDocAcc.parent,
+      iframeAcc,
+      "Fission document's parent matches the IFRAME."
+    );
+  },
+  {
+    topLevel: false,
+    iframe: true,
+    fissionIFrameAttrs: {
+      style: "display: none;",
+    },
+    skipFissionDocLoad: true,
+  }
+);
new file mode 100644
--- /dev/null
+++ b/accessible/tests/browser/fission/browser_nested_iframe.js
@@ -0,0 +1,142 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+/* import-globals-from ../../mochitest/role.js */
+loadScripts({ name: "role.js", dir: MOCHITESTS_DIR });
+
+const NESTED_FISSION_DOC_BODY_ID = "nested-fission-body";
+const NESTED_FISSION_IFRAME_ID = "nested-fission-iframe";
+const nestedURL = new URL(
+  `http://example.net${CURRENT_FILE_DIR}fission_document_builder.sjs`
+);
+nestedURL.searchParams.append(
+  "html",
+  `<html>
+      <head>
+        <meta charset="utf-8"/>
+        <title>Accessibility Nested Fission Frame Test</title>
+      </head>
+      <body id="${NESTED_FISSION_DOC_BODY_ID}">
+        <table id="table">
+          <tr>
+            <td>cell1</td>
+            <td>cell2</td>
+          </tr>
+        </table>
+        <ul id="ul">
+          <li id="li">item1</li>
+        </ul>
+      </body>
+    </html>`
+);
+
+function getOsPid(browsingContext) {
+  return browsingContext.currentWindowGlobal.osPid;
+}
+
+addAccessibleTask(
+  `<iframe id="${NESTED_FISSION_IFRAME_ID}" src="${nestedURL.href}"/>`,
+  async function(browser, fissionDocAcc, contentDocAcc) {
+    let nestedDocAcc = findAccessibleChildByID(
+      fissionDocAcc,
+      NESTED_FISSION_DOC_BODY_ID
+    );
+
+    ok(fissionDocAcc, "Fission document accessible is present");
+    ok(nestedDocAcc, "Nested fission document accessible is present");
+
+    const state = {};
+    nestedDocAcc.getState(state, {});
+    if (state.value & STATE_BUSY) {
+      nestedDocAcc = (await waitForEvent(
+        EVENT_DOCUMENT_LOAD_COMPLETE,
+        NESTED_FISSION_DOC_BODY_ID
+      )).accessible;
+    }
+
+    if (gFissionBrowser) {
+      isnot(
+        getOsPid(browser.browsingContext),
+        getOsPid(browser.browsingContext.getChildren()[0]),
+        `Content and fission documents are in separate processes.`
+      );
+      isnot(
+        getOsPid(browser.browsingContext),
+        getOsPid(browser.browsingContext.getChildren()[0].getChildren()[0]),
+        `Content and nested fission documents are in separate processes.`
+      );
+      isnot(
+        getOsPid(browser.browsingContext.getChildren()[0]),
+        getOsPid(browser.browsingContext.getChildren()[0].getChildren()[0]),
+        `Fission and nested fission documents are in separate processes.`
+      );
+    } else {
+      is(
+        getOsPid(browser.browsingContext),
+        getOsPid(browser.browsingContext.getChildren()[0]),
+        `Content and fission documents are in same processes.`
+      );
+      is(
+        getOsPid(browser.browsingContext),
+        getOsPid(browser.browsingContext.getChildren()[0].getChildren()[0]),
+        `Content and nested fission documents are in same processes.`
+      );
+    }
+
+    const tree = {
+      DOCUMENT: [
+        {
+          INTERNAL_FRAME: [
+            {
+              DOCUMENT: [
+                {
+                  INTERNAL_FRAME: [
+                    {
+                      DOCUMENT: [
+                        {
+                          TABLE: [
+                            {
+                              ROW: [
+                                { CELL: [{ TEXT_LEAF: [] }] },
+                                { CELL: [{ TEXT_LEAF: [] }] },
+                              ],
+                            },
+                          ],
+                        },
+                        {
+                          LIST: [
+                            {
+                              LISTITEM: [{ STATICTEXT: [] }, { TEXT_LEAF: [] }],
+                            },
+                          ],
+                        },
+                      ],
+                    },
+                  ],
+                },
+              ],
+            },
+          ],
+        },
+      ],
+    };
+    testAccessibleTree(contentDocAcc, tree);
+
+    const nestedIframeAcc = fissionDocAcc.getChildAt(0);
+    is(
+      nestedIframeAcc.getChildAt(0),
+      nestedDocAcc,
+      "Nested fission document for nested IFRAME matches."
+    );
+
+    is(
+      nestedDocAcc.parent,
+      nestedIframeAcc,
+      "Nested fission document's parent matches the nested IFRAME."
+    );
+  },
+  { topLevel: false, iframe: true }
+);
new file mode 100644
--- /dev/null
+++ b/accessible/tests/browser/fission/browser_reframe_root.js
@@ -0,0 +1,95 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+/* import-globals-from ../../mochitest/states.js */
+/* import-globals-from ../../mochitest/role.js */
+loadScripts(
+  { name: "role.js", dir: MOCHITESTS_DIR },
+  { name: "states.js", dir: MOCHITESTS_DIR }
+);
+
+addAccessibleTask(
+  `<input id="textbox" value="hello"/>`,
+  async function(browser, fissionDocAcc, contentDocAcc) {
+    info(
+      "Check that the IFRAME and the fission document are accessible initially."
+    );
+    let iframeAcc = findAccessibleChildByID(contentDocAcc, FISSION_IFRAME_ID);
+    ok(!isDefunct(iframeAcc), "IFRAME should be accessible");
+    ok(!isDefunct(fissionDocAcc), "fission document should be accessible");
+
+    info("Move the IFRAME under a new hidden root.");
+    let onEvents = waitForEvent(EVENT_REORDER, contentDocAcc);
+    await SpecialPowers.spawn(browser, [FISSION_IFRAME_ID], id => {
+      const doc = content.document;
+      const root = doc.createElement("div");
+      root.style.display = "none";
+      doc.body.appendChild(root);
+      root.appendChild(doc.getElementById(id));
+    });
+    await onEvents;
+
+    ok(
+      isDefunct(iframeAcc),
+      "IFRAME accessible should be defunct when hidden."
+    );
+    ok(
+      isDefunct(fissionDocAcc),
+      "IFRAME document's accessible should be defunct when the IFRAME is hidden."
+    );
+    ok(
+      !findAccessibleChildByID(contentDocAcc, FISSION_IFRAME_ID),
+      "No accessible for an IFRAME present."
+    );
+    ok(
+      !findAccessibleChildByID(contentDocAcc, DEFAULT_FISSION_DOC_BODY_ID),
+      "No accessible for the fission document present."
+    );
+
+    info("Move the IFRAME back under the content document's body.");
+    onEvents = waitForEvents([
+      [EVENT_REORDER, contentDocAcc],
+      [
+        EVENT_STATE_CHANGE,
+        event => {
+          const scEvent = event.QueryInterface(nsIAccessibleStateChangeEvent);
+          const id = getAccessibleDOMNodeID(event.accessible);
+          return (
+            id === DEFAULT_FISSION_DOC_BODY_ID &&
+            scEvent.state === STATE_BUSY &&
+            scEvent.isEnabled === false
+          );
+        },
+      ],
+    ]);
+    await SpecialPowers.spawn(browser, [FISSION_IFRAME_ID], id => {
+      content.document.body.appendChild(content.document.getElementById(id));
+    });
+    await onEvents;
+
+    iframeAcc = findAccessibleChildByID(contentDocAcc, FISSION_IFRAME_ID);
+    const newFissionDocAcc = iframeAcc.firstChild;
+
+    ok(!isDefunct(iframeAcc), "IFRAME should be accessible");
+    is(iframeAcc.childCount, 1, "IFRAME accessible should have a single child");
+    ok(!isDefunct(newFissionDocAcc), "fission document should be accessible");
+    ok(
+      isDefunct(fissionDocAcc),
+      "Original IFRAME document accessible should be defunct."
+    );
+    isnot(
+      iframeAcc.firstChild,
+      fissionDocAcc,
+      "A new accessible is created for a fission document."
+    );
+    is(
+      iframeAcc.firstChild,
+      newFissionDocAcc,
+      "A new accessible for a fission document is the child of the IFRAME accessible"
+    );
+  },
+  { topLevel: false, iframe: true }
+);
new file mode 100644
--- /dev/null
+++ b/accessible/tests/browser/fission/browser_reframe_visibility.js
@@ -0,0 +1,116 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+/* import-globals-from ../../mochitest/states.js */
+/* import-globals-from ../../mochitest/role.js */
+loadScripts({ name: "states.js", dir: MOCHITESTS_DIR });
+loadScripts({ name: "role.js", dir: MOCHITESTS_DIR });
+
+addAccessibleTask(
+  `<input id="textbox" value="hello"/>`,
+  async function(browser, fissionDocAcc, contentDocAcc) {
+    info(
+      "Check that the IFRAME and the fission document are accessible initially."
+    );
+    let iframeAcc = findAccessibleChildByID(contentDocAcc, FISSION_IFRAME_ID);
+    ok(!isDefunct(iframeAcc), "IFRAME should be accessible");
+    ok(!isDefunct(fissionDocAcc), "fission document should be accessible");
+
+    info(
+      "Hide the IFRAME and check that it's gone along with the fission document."
+    );
+    let onEvents = waitForEvent(EVENT_REORDER, contentDocAcc);
+    await SpecialPowers.spawn(browser, [FISSION_IFRAME_ID], contentId => {
+      content.document.getElementById(contentId).style.display = "none";
+    });
+    await onEvents;
+
+    ok(
+      isDefunct(iframeAcc),
+      "IFRAME accessible should be defunct when hidden."
+    );
+    if (gFissionBrowser) {
+      ok(
+        !isDefunct(fissionDocAcc),
+        "IFRAME document's accessible is not defunct when the IFRAME is hidden and fission is enabled."
+      );
+    } else {
+      ok(
+        isDefunct(fissionDocAcc),
+        "IFRAME document's accessible is defunct when the IFRAME is hidden and fission is not enabled."
+      );
+    }
+    ok(
+      !findAccessibleChildByID(contentDocAcc, FISSION_IFRAME_ID),
+      "No accessible for an IFRAME present."
+    );
+    ok(
+      !findAccessibleChildByID(contentDocAcc, DEFAULT_FISSION_DOC_BODY_ID),
+      "No accessible for the fission document present."
+    );
+
+    info(
+      "Show the IFRAME and check that a new accessible is created for it as " +
+        "well as the fission document."
+    );
+
+    const events = [[EVENT_REORDER, contentDocAcc]];
+    if (!gFissionBrowser) {
+      events.push([
+        EVENT_STATE_CHANGE,
+        event => {
+          const scEvent = event.QueryInterface(nsIAccessibleStateChangeEvent);
+          const id = getAccessibleDOMNodeID(event.accessible);
+          return (
+            id === DEFAULT_FISSION_DOC_BODY_ID &&
+            scEvent.state === STATE_BUSY &&
+            scEvent.isEnabled === false
+          );
+        },
+      ]);
+    }
+    onEvents = waitForEvents(events);
+    await SpecialPowers.spawn(browser, [FISSION_IFRAME_ID], contentId => {
+      content.document.getElementById(contentId).style.display = "block";
+    });
+    await onEvents;
+
+    iframeAcc = findAccessibleChildByID(contentDocAcc, FISSION_IFRAME_ID);
+    const newFissionDocAcc = iframeAcc.firstChild;
+
+    ok(!isDefunct(iframeAcc), "IFRAME should be accessible");
+    is(iframeAcc.childCount, 1, "IFRAME accessible should have a single child");
+    ok(newFissionDocAcc, "fission document exists");
+    ok(!isDefunct(newFissionDocAcc), "fission document should be accessible");
+    if (gFissionBrowser) {
+      ok(
+        !isDefunct(fissionDocAcc),
+        "Original IFRAME document accessible should not be defunct when fission is enabled."
+      );
+      is(
+        iframeAcc.firstChild,
+        fissionDocAcc,
+        "Existing accessible is used for a fission document."
+      );
+    } else {
+      ok(
+        isDefunct(fissionDocAcc),
+        "Original IFRAME document accessible should be defunct when fission is not enabled."
+      );
+      isnot(
+        iframeAcc.firstChild,
+        fissionDocAcc,
+        "A new accessible is created for a fission document."
+      );
+    }
+    is(
+      iframeAcc.firstChild,
+      newFissionDocAcc,
+      "A new accessible for a fission document is the child of the IFRAME accessible"
+    );
+  },
+  { topLevel: false, iframe: true }
+);
new file mode 100644
--- /dev/null
+++ b/accessible/tests/browser/fission/browser_src_change.js
@@ -0,0 +1,61 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+/* import-globals-from ../../mochitest/role.js */
+loadScripts({ name: "role.js", dir: MOCHITESTS_DIR });
+
+addAccessibleTask(
+  `<input id="textbox" value="hello"/>`,
+  async function(browser, fissionDocAcc, contentDocAcc) {
+    info(
+      "Check that the IFRAME and the fission document are accessible initially."
+    );
+    let iframeAcc = findAccessibleChildByID(contentDocAcc, FISSION_IFRAME_ID);
+    ok(isAccessible(iframeAcc), "IFRAME should be accessible");
+    ok(isAccessible(fissionDocAcc), "fission document should be accessible");
+
+    info("Replace src URL for the IFRAME with one with different origin.");
+    const onDocLoad = waitForEvent(
+      EVENT_DOCUMENT_LOAD_COMPLETE,
+      DEFAULT_FISSION_DOC_BODY_ID
+    );
+
+    await SpecialPowers.spawn(
+      browser,
+      [FISSION_IFRAME_ID, CURRENT_CONTENT_DIR],
+      (id, olddir) => {
+        const { src } = content.document.getElementById(id);
+        content.document.getElementById(id).src = src.replace(
+          olddir,
+          "http://example.net/browser/accessible/tests/browser/"
+        );
+      }
+    );
+    const newFissionDocAcc = (await onDocLoad).accessible;
+
+    ok(isAccessible(iframeAcc), "IFRAME should be accessible");
+    ok(
+      isAccessible(newFissionDocAcc),
+      "new fission document should be accessible"
+    );
+    isnot(
+      fissionDocAcc,
+      newFissionDocAcc,
+      "A new accessible is created for a fission document."
+    );
+    is(
+      iframeAcc.firstChild,
+      newFissionDocAcc,
+      "An IFRAME has a new accessible for a fission document as a child."
+    );
+    is(
+      newFissionDocAcc.parent,
+      iframeAcc,
+      "A new accessible for a fission document has an IFRAME as a parent."
+    );
+  },
+  { topLevel: false, iframe: true }
+);
new file mode 100644
--- /dev/null
+++ b/accessible/tests/browser/fission/browser_take_focus.js
@@ -0,0 +1,26 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+/* import-globals-from ../../mochitest/states.js */
+loadScripts(
+  { name: "role.js", dir: MOCHITESTS_DIR },
+  { name: "states.js", dir: MOCHITESTS_DIR }
+);
+
+addAccessibleTask(
+  `<input id="textbox" value="hello"/>`,
+  async function(browser, fissionDocAcc, contentDocAcc) {
+    const textbox = findAccessibleChildByID(fissionDocAcc, "textbox");
+    testStates(textbox, STATE_FOCUSABLE, 0, STATE_FOCUSED);
+
+    let onFocus = waitForEvent(EVENT_FOCUS, textbox);
+    textbox.takeFocus();
+    await onFocus;
+
+    testStates(textbox, STATE_FOCUSABLE | STATE_FOCUSED, 0);
+  },
+  { topLevel: false, iframe: true }
+);
new file mode 100644
--- /dev/null
+++ b/accessible/tests/browser/fission/head.js
@@ -0,0 +1,19 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+// Load the shared-head file first.
+/* import-globals-from ../shared-head.js */
+Services.scriptloader.loadSubScript(
+  "chrome://mochitests/content/browser/accessible/tests/browser/shared-head.js",
+  this
+);
+
+// Loading and common.js from accessible/tests/mochitest/ for all tests, as
+// well as promisified-events.js.
+loadScripts(
+  { name: "common.js", dir: MOCHITESTS_DIR },
+  { name: "promisified-events.js", dir: MOCHITESTS_DIR }
+);
--- a/accessible/tests/browser/shared-head.js
+++ b/accessible/tests/browser/shared-head.js
@@ -272,16 +272,17 @@ async function loadContentScripts(target
 function attrsToString(attrs) {
   return Object.entries(attrs)
     .map(([attr, value]) => `${attr}=${JSON.stringify(value)}`)
     .join(" ");
 }
 
 function wrapWithFissionIFrame(doc, options = {}) {
   const srcURL = new URL(`${CURRENT_CONTENT_DIR}fission_document_builder.sjs`);
+  const { fissionIFrameAttrs = {} } = options;
   if (doc.endsWith("html")) {
     srcURL.searchParams.append("file", `${CURRENT_FILE_DIR}e10s/${doc}`);
   } else {
     const { fissionDocBodyAttrs = {} } = options;
     const attrs = {
       id: DEFAULT_FISSION_DOC_BODY_ID,
       ...fissionDocBodyAttrs,
     };
@@ -293,17 +294,23 @@ function wrapWithFissionIFrame(doc, opti
           <meta charset="utf-8"/>
           <title>Accessibility Fission Test</title>
         </head>
         <body ${attrsToString(attrs)}>${doc}</body>
       </html>`
     );
   }
 
-  return `<iframe id="${FISSION_IFRAME_ID}" src="${srcURL.href}"/>`;
+  const iframeAttrs = {
+    id: FISSION_IFRAME_ID,
+    src: srcURL.href,
+    ...fissionIFrameAttrs,
+  };
+
+  return `<iframe ${attrsToString(iframeAttrs)}/>`;
 }
 
 /**
  * Takes an HTML snippet or HTML doc url and returns an encoded URI for a full
  * document with the snippet or the URL as a source for the IFRAME.
  * @param {String} doc
  *        a markup snippet or url.
  * @param {Object} options
@@ -363,17 +370,17 @@ function accessibleTask(doc, task, optio
     const onContentDocLoad = waitForEvent(
       EVENT_DOCUMENT_LOAD_COMPLETE,
       DEFAULT_CONTENT_DOC_BODY_ID
     );
 
     let onFissionDocLoad;
     if (options.fission) {
       gIsFission = true;
-      if (gFissionBrowser) {
+      if (gFissionBrowser && !options.skipFissionDocLoad) {
         onFissionDocLoad = waitForEvent(
           EVENT_DOCUMENT_LOAD_COMPLETE,
           DEFAULT_FISSION_DOC_BODY_ID
         );
       }
     }
 
     await BrowserTestUtils.withNewTab(
@@ -396,17 +403,17 @@ function accessibleTask(doc, task, optio
 
         Logger.log(
           `e10s enabled: ${Services.appinfo.browserTabsRemoteAutostart}`
         );
         Logger.log(`Actually remote browser: ${browser.isRemoteBrowser}`);
 
         const { accessible: docAccessible } = await onContentDocLoad;
         let fissionDocAccessible;
-        if (options.fission) {
+        if (options.fission && !options.skipFissionDocLoad) {
           fissionDocAccessible = gFissionBrowser
             ? (await onFissionDocLoad).accessible
             : findAccessibleChildByID(docAccessible, FISSION_IFRAME_ID)
                 .firstChild;
         }
 
         await task(
           browser,
@@ -429,28 +436,31 @@ function accessibleTask(doc, task, optio
  * @param  {null|Object} options
  *         Options for running accessibility test tasks:
  *         - {Boolean} topLevel
  *           Flag to run the test with content in the top level content process.
  *           Default is true.
  *         - {Boolean} iframe
  *           Flag to run the test with content wrapped in an iframe. Default is
  *           false.
+ *         - {Object} fissionIFrameAttrs
+ *           A map of attribute/value pairs to be applied to fission IFRAME
+ *           element.
+ *         - {Boolean} skipFissionDocLoad
+ *           If true, the test will wait not for fission document document
+ *           loaded event (useful for when fission IFRAME is initially hidden).
  */
-function addAccessibleTask(
-  doc,
-  task,
-  { topLevel = true, iframe = false } = {}
-) {
+function addAccessibleTask(doc, task, options = {}) {
+  const { topLevel = true, iframe = false } = options;
   if (topLevel) {
-    add_task(accessibleTask(doc, task));
+    add_task(accessibleTask(doc, task, options));
   }
 
   if (iframe) {
-    add_task(accessibleTask(doc, task, { fission: true }));
+    add_task(accessibleTask(doc, task, { ...options, fission: true }));
   }
 }
 
 /**
  * Check if an accessible object has a defunct test.
  * @param  {nsIAccessible}  accessible object to test defunct state for
  * @return {Boolean}        flag indicating defunct state
  */
--- a/browser/components/preferences/siteDataSettings.js
+++ b/browser/components/preferences/siteDataSettings.js
@@ -277,16 +277,18 @@ let gSiteDataSettings = {
         if (allowed) {
           try {
             await SiteDataManager.remove(removals);
           } catch (e) {
             Cu.reportError(e);
           }
         }
       }
+    } else {
+      allowed = true;
     }
 
     // If the user cancelled the confirm dialog keep the site data window open,
     // they can still press cancel again to exit.
     if (allowed) {
       this.close();
     }
   },
--- a/build/moz.configure/toolchain.configure
+++ b/build/moz.configure/toolchain.configure
@@ -857,22 +857,22 @@ def default_cxx_compilers(c_compiler, ot
             return (os.path.join(dir, file.replace('clang', 'clang++')),)
 
         return (c_compiler.compiler,)
 
     return default_cxx_compilers
 
 
 @template
-def provided_program(env_var):
+def provided_program(env_var, when=None):
     '''Template handling cases where a program can be specified either as a
     path or as a path with applicable arguments.
     '''
 
-    @depends_if(env_var)
+    @depends_if(env_var, when=when)
     @imports(_from='itertools', _import='takewhile')
     @imports(_from='mozbuild.shellutil', _import='split', _as='shell_split')
     def provided(cmd):
         # Assume the first dash-prefixed item (and any subsequent items) are
         # command-line options, the item before the dash-prefixed item is
         # the program we're looking for, and anything before that is a wrapper
         # of some kind (e.g. sccache).
         cmd = shell_split(cmd[0])
--- a/devtools/client/netmonitor/src/components/MonitorPanel.js
+++ b/devtools/client/netmonitor/src/components/MonitorPanel.js
@@ -22,17 +22,17 @@ const {
   getSelectedRequest,
   isSelectedRequestVisible,
 } = require("../selectors/index");
 
 // Components
 const SplitBox = createFactory(
   require("devtools/client/shared/components/splitter/SplitBox")
 );
-const RequestList = createFactory(require("./RequestList"));
+const RequestList = createFactory(require("./request-list/RequestList"));
 const Toolbar = createFactory(require("./Toolbar"));
 
 loader.lazyGetter(this, "NetworkDetailsPanel", function() {
   return createFactory(require("./NetworkDetailsPanel"));
 });
 
 loader.lazyGetter(this, "NetworkActionBar", function() {
   return createFactory(require("./NetworkActionBar"));
--- a/devtools/client/netmonitor/src/components/moz.build
+++ b/devtools/client/netmonitor/src/components/moz.build
@@ -1,14 +1,15 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 DIRS += [
     'request-blocking',
+    'request-list',
     'search',
     'websockets',
 ]
 
 DevToolsModules(
     'App.js',
     'CachePanel.js',
     'CookiesPanel.js',
@@ -17,38 +18,16 @@ DevToolsModules(
     'HeadersPanel.js',
     'HtmlPreview.js',
     'JSONPreview.js',
     'MonitorPanel.js',
     'NetworkActionBar.js',
     'NetworkDetailsPanel.js',
     'ParamsPanel.js',
     'PropertiesView.js',
-    'RequestList.js',
-    'RequestListColumnCause.js',
-    'RequestListColumnContentSize.js',
-    'RequestListColumnCookies.js',
-    'RequestListColumnDomain.js',
-    'RequestListColumnFile.js',
-    'RequestListColumnMethod.js',
-    'RequestListColumnProtocol.js',
-    'RequestListColumnRemoteIP.js',
-    'RequestListColumnResponseHeader.js',
-    'RequestListColumnScheme.js',
-    'RequestListColumnSetCookies.js',
-    'RequestListColumnStatus.js',
-    'RequestListColumnTime.js',
-    'RequestListColumnTransferredSize.js',
-    'RequestListColumnType.js',
-    'RequestListColumnUrl.js',
-    'RequestListColumnWaterfall.js',
-    'RequestListContent.js',
-    'RequestListEmptyNotice.js',
-    'RequestListHeader.js',
-    'RequestListItem.js',
     'ResponsePanel.js',
     'SecurityPanel.js',
     'SecurityState.js',
     'SourceEditor.js',
     'StackTracePanel.js',
     'StatisticsPanel.js',
     'StatusBar.js',
     'StatusCode.js',
rename from devtools/client/netmonitor/src/components/RequestList.js
rename to devtools/client/netmonitor/src/components/request-list/RequestList.js
--- a/devtools/client/netmonitor/src/components/RequestList.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestList.js
@@ -5,17 +5,17 @@
 "use strict";
 
 const { createFactory } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const { div } = dom;
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
 
 // Components
-const StatusBar = createFactory(require("./StatusBar"));
+const StatusBar = createFactory(require("../StatusBar"));
 
 loader.lazyGetter(this, "RequestListContent", function() {
   return createFactory(require("./RequestListContent"));
 });
 loader.lazyGetter(this, "RequestListEmptyNotice", function() {
   return createFactory(require("./RequestListEmptyNotice"));
 });
 
rename from devtools/client/netmonitor/src/components/RequestListColumnCause.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnCause.js
rename from devtools/client/netmonitor/src/components/RequestListColumnContentSize.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnContentSize.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnContentSize.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnContentSize.js
@@ -2,17 +2,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
 const { Component } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
-const { getFormattedSize } = require("../utils/format-utils");
+const { getFormattedSize } = require("../../utils/format-utils");
 
 class RequestListColumnContentSize extends Component {
   static get propTypes() {
     return {
       item: PropTypes.object.isRequired,
     };
   }
 
rename from devtools/client/netmonitor/src/components/RequestListColumnCookies.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnCookies.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnCookies.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnCookies.js
@@ -2,17 +2,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
 const { Component } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
-const { fetchNetworkUpdatePacket } = require("../utils/request-utils");
+const { fetchNetworkUpdatePacket } = require("../../utils/request-utils");
 
 class RequestListColumnCookies extends Component {
   static get propTypes() {
     return {
       connector: PropTypes.object.isRequired,
       item: PropTypes.object.isRequired,
     };
   }
rename from devtools/client/netmonitor/src/components/RequestListColumnDomain.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnDomain.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnDomain.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnDomain.js
@@ -5,19 +5,19 @@
 "use strict";
 
 const {
   Component,
   createFactory,
 } = require("devtools/client/shared/vendor/react");
 const { td } = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
-const { getFormattedIPAndPort } = require("../utils/format-utils");
-const { propertiesEqual } = require("../utils/request-utils");
-const SecurityState = createFactory(require("./SecurityState"));
+const { getFormattedIPAndPort } = require("../../utils/format-utils");
+const { propertiesEqual } = require("../../utils/request-utils");
+const SecurityState = createFactory(require("../SecurityState"));
 
 const UPDATED_DOMAIN_PROPS = ["remoteAddress", "securityState", "urlDetails"];
 
 class RequestListColumnDomain extends Component {
   static get propTypes() {
     return {
       item: PropTypes.object.isRequired,
       onSecurityIconMouseDown: PropTypes.func.isRequired,
rename from devtools/client/netmonitor/src/components/RequestListColumnFile.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnFile.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnFile.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnFile.js
@@ -1,19 +1,19 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
 const { Component } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
-const { L10N } = require("../utils/l10n");
+const { L10N } = require("../../utils/l10n");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
-const { propertiesEqual } = require("../utils/request-utils");
+const { propertiesEqual } = require("../../utils/request-utils");
 
 const UPDATED_FILE_PROPS = ["urlDetails"];
 
 class RequestListColumnFile extends Component {
   static get propTypes() {
     return {
       item: PropTypes.object.isRequired,
     };
rename from devtools/client/netmonitor/src/components/RequestListColumnMethod.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnMethod.js
rename from devtools/client/netmonitor/src/components/RequestListColumnProtocol.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnProtocol.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnProtocol.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnProtocol.js
@@ -2,17 +2,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
 const { Component } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
-const { getFormattedProtocol } = require("../utils/request-utils");
+const { getFormattedProtocol } = require("../../utils/request-utils");
 
 class RequestListColumnProtocol extends Component {
   static get propTypes() {
     return {
       item: PropTypes.object.isRequired,
     };
   }
 
rename from devtools/client/netmonitor/src/components/RequestListColumnRemoteIP.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnRemoteIP.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnRemoteIP.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnRemoteIP.js
@@ -2,17 +2,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
 const { Component } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
-const { getFormattedIPAndPort } = require("../utils/format-utils");
+const { getFormattedIPAndPort } = require("../../utils/format-utils");
 
 class RequestListColumnRemoteIP extends Component {
   static get propTypes() {
     return {
       item: PropTypes.object.isRequired,
     };
   }
 
rename from devtools/client/netmonitor/src/components/RequestListColumnResponseHeader.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnResponseHeader.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnResponseHeader.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnResponseHeader.js
@@ -5,17 +5,17 @@
 "use strict";
 
 const { Component } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
 const {
   getResponseHeader,
   fetchNetworkUpdatePacket,
-} = require("../utils/request-utils");
+} = require("../../utils/request-utils");
 
 /**
  * Renders a response header column in the requests list.  The actual
  * header to show is passed as a prop.
  */
 class RequestListColumnResponseHeader extends Component {
   static get propTypes() {
     return {
rename from devtools/client/netmonitor/src/components/RequestListColumnScheme.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnScheme.js
rename from devtools/client/netmonitor/src/components/RequestListColumnSetCookies.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnSetCookies.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnSetCookies.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnSetCookies.js
@@ -2,17 +2,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
 const { Component } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
-const { fetchNetworkUpdatePacket } = require("../utils/request-utils");
+const { fetchNetworkUpdatePacket } = require("../../utils/request-utils");
 
 class RequestListColumnSetCookies extends Component {
   static get propTypes() {
     return {
       connector: PropTypes.object.isRequired,
       item: PropTypes.object.isRequired,
     };
   }
rename from devtools/client/netmonitor/src/components/RequestListColumnStatus.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnStatus.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnStatus.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnStatus.js
@@ -8,17 +8,17 @@ const {
   Component,
   createFactory,
 } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
 
 // Components
 
-const StatusCode = createFactory(require("./StatusCode"));
+const StatusCode = createFactory(require("../StatusCode"));
 
 class RequestListColumnStatus extends Component {
   static get propTypes() {
     return {
       item: PropTypes.object.isRequired,
     };
   }
 
rename from devtools/client/netmonitor/src/components/RequestListColumnTime.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnTime.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnTime.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnTime.js
@@ -2,23 +2,23 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
 const { Component } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
-const { getFormattedTime } = require("../utils/format-utils");
+const { getFormattedTime } = require("../../utils/format-utils");
 const {
   fetchNetworkUpdatePacket,
   getResponseTime,
   getStartTime,
   getEndTime,
-} = require("../utils/request-utils");
+} = require("../../utils/request-utils");
 
 /**
  * This component represents a column displaying selected
  * timing value. There are following possible values this
  * column can render:
  * - Start Time
  * - End Time
  * - Response Time
rename from devtools/client/netmonitor/src/components/RequestListColumnTransferredSize.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnTransferredSize.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnTransferredSize.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnTransferredSize.js
@@ -2,20 +2,20 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
 const { Component } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
-const { getFormattedSize } = require("../utils/format-utils");
-const { L10N } = require("../utils/l10n");
-const { propertiesEqual } = require("../utils/request-utils");
-const { BLOCKED_REASON_MESSAGES } = require("../constants");
+const { getFormattedSize } = require("../../utils/format-utils");
+const { L10N } = require("../../utils/l10n");
+const { propertiesEqual } = require("../../utils/request-utils");
+const { BLOCKED_REASON_MESSAGES } = require("../../constants");
 
 const SIZE_CACHED = L10N.getStr("networkMenu.sizeCached");
 const SIZE_SERVICE_WORKER = L10N.getStr("networkMenu.sizeServiceWorker");
 const SIZE_UNAVAILABLE = L10N.getStr("networkMenu.sizeUnavailable");
 const SIZE_UNAVAILABLE_TITLE = L10N.getStr("networkMenu.sizeUnavailable.title");
 const UPDATED_TRANSFERRED_PROPS = [
   "transferredSize",
   "fromCache",
rename from devtools/client/netmonitor/src/components/RequestListColumnType.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnType.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnType.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnType.js
@@ -2,17 +2,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
 const { Component } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
-const { getAbbreviatedMimeType } = require("../utils/request-utils");
+const { getAbbreviatedMimeType } = require("../../utils/request-utils");
 
 class RequestListColumnType extends Component {
   static get propTypes() {
     return {
       item: PropTypes.object.isRequired,
     };
   }
 
rename from devtools/client/netmonitor/src/components/RequestListColumnUrl.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnUrl.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnUrl.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnUrl.js
@@ -4,21 +4,21 @@
 
 "use strict";
 
 const {
   Component,
   createFactory,
 } = require("devtools/client/shared/vendor/react");
 const { td } = require("devtools/client/shared/vendor/react-dom-factories");
-const { L10N } = require("../utils/l10n");
+const { L10N } = require("../../utils/l10n");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
-const { getFormattedIPAndPort } = require("../utils/format-utils");
-const { propertiesEqual } = require("../utils/request-utils");
-const SecurityState = createFactory(require("./SecurityState"));
+const { getFormattedIPAndPort } = require("../../utils/format-utils");
+const { propertiesEqual } = require("../../utils/request-utils");
+const SecurityState = createFactory(require("../SecurityState"));
 const UPDATED_FILE_PROPS = ["remoteAddress", "securityState", "urlDetails"];
 
 class RequestListColumnUrl extends Component {
   static get propTypes() {
     return {
       item: PropTypes.object.isRequired,
       onSecurityIconMouseDown: PropTypes.func.isRequired,
     };
rename from devtools/client/netmonitor/src/components/RequestListColumnWaterfall.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListColumnWaterfall.js
--- a/devtools/client/netmonitor/src/components/RequestListColumnWaterfall.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListColumnWaterfall.js
@@ -3,24 +3,24 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
 const { Component } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
 
-const { L10N } = require("../utils/l10n");
+const { L10N } = require("../../utils/l10n");
 const {
   fetchNetworkUpdatePacket,
   propertiesEqual,
-} = require("../utils/request-utils");
+} = require("../../utils/request-utils");
 
 // List of properties of the timing info we want to create boxes for
-const { TIMING_KEYS } = require("../constants");
+const { TIMING_KEYS } = require("../../constants");
 
 const { div } = dom;
 
 const UPDATED_WATERFALL_PROPS = [
   "eventTimings",
   "fromCache",
   "fromServiceWorker",
   "totalTime",
rename from devtools/client/netmonitor/src/components/RequestListContent.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListContent.js
--- a/devtools/client/netmonitor/src/components/RequestListContent.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListContent.js
@@ -12,24 +12,24 @@ const dom = require("devtools/client/sha
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
 const {
   connect,
 } = require("devtools/client/shared/redux/visibility-handler-connect");
 const {
   HTMLTooltip,
 } = require("devtools/client/shared/widgets/tooltip/HTMLTooltip");
 
-const Actions = require("../actions/index");
-const { formDataURI } = require("../utils/request-utils");
+const Actions = require("../../actions/index");
+const { formDataURI } = require("../../utils/request-utils");
 const {
   getDisplayedRequests,
   getColumns,
   getSelectedRequest,
   getWaterfallScale,
-} = require("../selectors/index");
+} = require("../../selectors/index");
 
 loader.lazyRequireGetter(
   this,
   "openRequestInTab",
   "devtools/client/netmonitor/src/utils/firefox/open-request-in-tab",
   true
 );
 loader.lazyGetter(this, "setImageTooltip", function() {
@@ -39,17 +39,17 @@ loader.lazyGetter(this, "setImageTooltip
 loader.lazyGetter(this, "getImageDimensions", function() {
   return require("devtools/client/shared/widgets/tooltip/ImageTooltipHelper")
     .getImageDimensions;
 });
 
 // Components
 const RequestListHeader = createFactory(require("./RequestListHeader"));
 const RequestListItem = createFactory(require("./RequestListItem"));
-const RequestListContextMenu = require("../widgets/RequestListContextMenu");
+const RequestListContextMenu = require("../../widgets/RequestListContextMenu");
 
 const { div } = dom;
 
 // Tooltip show / hide delay in ms
 const REQUESTS_TOOLTIP_TOGGLE_DELAY = 500;
 // Tooltip image maximum dimension in px
 const REQUESTS_TOOLTIP_IMAGE_MAX_DIM = 400;
 // Gecko's scrollTop is int32_t, so the maximum value is 2^31 - 1 = 2147483647
rename from devtools/client/netmonitor/src/components/RequestListEmptyNotice.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListEmptyNotice.js
--- a/devtools/client/netmonitor/src/components/RequestListEmptyNotice.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListEmptyNotice.js
@@ -8,20 +8,20 @@ const {
   Component,
   createFactory,
 } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
 const {
   connect,
 } = require("devtools/client/shared/redux/visibility-handler-connect");
-const Actions = require("../actions/index");
-const { ACTIVITY_TYPE } = require("../constants");
-const { L10N } = require("../utils/l10n");
-const { getPerformanceAnalysisURL } = require("../utils/mdn-utils");
+const Actions = require("../../actions/index");
+const { ACTIVITY_TYPE } = require("../../constants");
+const { L10N } = require("../../utils/l10n");
+const { getPerformanceAnalysisURL } = require("../../utils/mdn-utils");
 
 // Components
 const MDNLink = createFactory(
   require("devtools/client/shared/components/MdnLink")
 );
 
 const { button, div, span } = dom;
 
rename from devtools/client/netmonitor/src/components/RequestListHeader.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListHeader.js
--- a/devtools/client/netmonitor/src/components/RequestListHeader.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListHeader.js
@@ -14,28 +14,28 @@ const PropTypes = require("devtools/clie
 const {
   connect,
 } = require("devtools/client/shared/redux/visibility-handler-connect");
 const {
   getTheme,
   addThemeObserver,
   removeThemeObserver,
 } = require("devtools/client/shared/theme");
-const Actions = require("../actions/index");
+const Actions = require("../../actions/index");
 const {
   HEADERS,
   REQUESTS_WATERFALL,
   MIN_COLUMN_WIDTH,
   DEFAULT_COLUMN_WIDTH,
-} = require("../constants");
-const { getColumns, getWaterfallScale } = require("../selectors/index");
-const { getFormattedTime } = require("../utils/format-utils");
-const { L10N } = require("../utils/l10n");
-const RequestListHeaderContextMenu = require("../widgets/RequestListHeaderContextMenu");
-const WaterfallBackground = require("../widgets/WaterfallBackground");
+} = require("../../constants");
+const { getColumns, getWaterfallScale } = require("../../selectors/index");
+const { getFormattedTime } = require("../../utils/format-utils");
+const { L10N } = require("../../utils/l10n");
+const RequestListHeaderContextMenu = require("../../widgets/RequestListHeaderContextMenu");
+const WaterfallBackground = require("../../widgets/WaterfallBackground");
 const Draggable = createFactory(
   require("devtools/client/shared/components/splitter/Draggable")
 );
 
 const { div, button } = dom;
 
 /**
  * Render the request list header with sorting arrows for columns.
rename from devtools/client/netmonitor/src/components/RequestListItem.js
rename to devtools/client/netmonitor/src/components/request-list/RequestListItem.js
--- a/devtools/client/netmonitor/src/components/RequestListItem.js
+++ b/devtools/client/netmonitor/src/components/request-list/RequestListItem.js
@@ -8,18 +8,18 @@ const {
   Component,
   createFactory,
 } = require("devtools/client/shared/vendor/react");
 const dom = require("devtools/client/shared/vendor/react-dom-factories");
 const PropTypes = require("devtools/client/shared/vendor/react-prop-types");
 const {
   fetchNetworkUpdatePacket,
   propertiesEqual,
-} = require("../utils/request-utils");
-const { RESPONSE_HEADERS } = require("../constants");
+} = require("../../utils/request-utils");
+const { RESPONSE_HEADERS } = require("../../constants");
 
 // Components
 /* global
   RequestListColumnCause,
   RequestListColumnContentSize,
   RequestListColumnCookies,
   RequestListColumnDomain,
   RequestListColumnFile,
new file mode 100644
--- /dev/null
+++ b/devtools/client/netmonitor/src/components/request-list/moz.build
@@ -0,0 +1,28 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+DevToolsModules(
+    'RequestList.js',
+    'RequestListColumnCause.js',
+    'RequestListColumnContentSize.js',
+    'RequestListColumnCookies.js',
+    'RequestListColumnDomain.js',
+    'RequestListColumnFile.js',
+    'RequestListColumnMethod.js',
+    'RequestListColumnProtocol.js',
+    'RequestListColumnRemoteIP.js',
+    'RequestListColumnResponseHeader.js',
+    'RequestListColumnScheme.js',
+    'RequestListColumnSetCookies.js',
+    'RequestListColumnStatus.js',
+    'RequestListColumnTime.js',
+    'RequestListColumnTransferredSize.js',
+    'RequestListColumnType.js',
+    'RequestListColumnUrl.js',
+    'RequestListColumnWaterfall.js',
+    'RequestListContent.js',
+    'RequestListEmptyNotice.js',
+    'RequestListHeader.js',
+    'RequestListItem.js',
+)
--- a/devtools/docs/contributing/code-reviews-setup.md
+++ b/devtools/docs/contributing/code-reviews-setup.md
@@ -19,16 +19,9 @@ If you added an `:ircnickname` in your B
 ---
 
 Once you understand the above, please [create a Phabricator account](https://moz-conduit.readthedocs.io/en/latest/phabricator-user.html#creating-an-account). 
 
 
 
 ## Set up to send code for review
 
-There are two ways of doing this (sorry, let us explain!):
-
-* you can use [Arcanist](https://moz-conduit.readthedocs.io/en/latest/arcanist-user.html), which is the official command line tool that accompanies Phabricator, and should be enough for most of the cases.
-* or you could use [moz-phab](https://moz-conduit.readthedocs.io/en/latest/phabricator-user.html#using-moz-phab), which is a Mozilla-developed wrapper for Arcanist that makes it work better with the "Mozilla workflow".
-
-**We recommend you use Arcanist** for now, unless you are more experienced and know what you're doing, or want to take advantage of `moz-phab`'s features. You need to install Arcanist for `moz-phab` to work anyway.
-
-If you decide to use `moz-phab`, please be aware that we started using this new tool quite recently, and you might find bugs (or things that don't feel quite right), in which case you could either [have a look at the existing bugs](https://bugzilla.mozilla.org/buglist.cgi?product=Conduit&component=Review%20Wrapper&resolution=---) to see if someone else has encountered this again, or simply [file a bug](https://bugzilla.mozilla.org/enter_bug.cgi?product=Conduit&component=Review%20Wrapper) using your fancy new Bugzilla account 😀
+In order to push your commit to Phabricator, you need to install [moz-phab](https://moz-conduit.readthedocs.io/en/latest/phabricator-user.html#using-moz-phab).
--- a/devtools/docs/contributing/making-prs.md
+++ b/devtools/docs/contributing/making-prs.md
@@ -1,95 +1,66 @@
 # Sending your code for review (also known as "sending patches")
 
-There are two ways of doing this. We'll first explain the recommended version, and then the alternative, which is older, and we don't recommend, but might be handy in some circumstances.
-
-## Using Phabricator + Differential (RECOMMENDED)
-
 First, commit your changes. For example:
 
 ```bash
 hg add /path/to/file/changed
 hg commit -m "Bug 1234567 - Implement feature XYZ. r=name,name2!"
 ```
 
  The commit message explained in detail:
  - `Bug 1234567` - The number of the bug in bugzilla.
  - `- Implement feature XYZ.` - The commit message.
- - `r=name` - The short form to request a review.
- - `,name2!` - You can have more than one reviewer. The `!` makes the review a *blocking* review (Patch can not land without accepted review). You will also need to add it to the name in the popup after you ran `arc diff`.
+ - `r=name` - The short form to request a review. Enter the name you found using the 
+ instructions in the [previous step](./code-reviews-find-reviewer.md).
+ - `,name2!` - You can have more than one reviewer. The `!` makes the review a *blocking* review (Patch can not land without accepted review).
 
- Please note your first commit message will be also the title of your patch in Phabricator. (Don't worry, if something goes wrong. You can still change things in the UI later).
-
-Then create a revision in Differential, using Arcanist (or `moz-phab`):
+Then create a revision in Phabricator using `moz-phab`:
 
 ```bash
-arc diff
+moz-phab submit
 ```
 
-You'll be taken to an editor to add extra details, although some of them might be prepopulated using the data in the commit message. Make sure the bug number is filled with the right value. You'll also be able to set reviewers here: enter the names you found using the instructions in the [previous step](./code-reviews-find-reviewer.md).
-
-Save the changes and quit the editor. A revision will be created including those data and also the difference in code between your changes and the point in the repository where you based your work (this difference is sometimes called "a patch", as it's what you'd need to apply on the repository to get to the final state).
+A revision will be created including that information and the difference in code between your changes and the point in the repository where you based your work (this difference is sometimes called "a patch", as it's what you'd need to apply on the repository to get to the final state).
 
 If you click on the provided URL for the revision, it'll bring you to Phabricator's interface, which the reviewer will visit as well in order to review the code. They will look at your changes and decide if they need any additional work, based on whether the changes do fix what the bug describes or not. To get a better idea of the types of things they will look at and verify, read the [code reviews checklist](./code-reviews-checklist.md).
 
+For more information on using moz-phab, you can run:
+
+```bash
+moz-phab -h
+```
+
+or to get information on a specific command (here `submit`):
+
+```bash
+moz-phab submit -h
+```
+
 The reviewer might suggest you do additional changes. For example, they might recommend using a helper that already exists (but you were not aware of), or they might recommend renaming things to make things clearer. Or they might recommend you do *less* things (e.g. if you changed other things that are out of scope for the bug). Or they might simply ask questions if things aren't clear. You can also ask questions if the comments are unclear or if you're unsure about parts of the code you're interacting with. Something that looks very obvious to one person might confuse others.
 
-Hence, you might need to go back to the code and do some edits to address the issues and recommendations. Once you have done this, we want to amend the existing commit:
+Hence, you might need to go back to the code and do some edits to address the issues and recommendations. Once you have done this, you must update the existing commit:
 
 ```bash
 hg commit --amend -m 'Address revision issues: rewrite to use helper helpfulHelper()'
 ```
 
 And submit the change again:
 
 ```bash
-arc diff
-```
-
-This time, the editor that opens should have filled most of the information already. Add any missing information, save and quit the editor.
-
-You might have to go through this cycle of uploading a diff and getting it reviewed several times, depending on the complexity of the bug.
-
-**NOTE**: by default, Arcanist opens nano, which can be a bit confusing if you're not a nano user. You can override this by setting the `EDITOR` env variable. For instance, you could add this to your `.bash_profile` to set Vim as your preferred editor:
-
-```bash
-export EDITOR="/usr/local/bin/vim"
+moz-phab submit
 ```
 
-Once your code fixes the bug, and there are no more blocking issues, the reviewer will approve the review, and the code can be landed in the repository now.
-
-For more information on using Arcanist, please refer to [their user guide](https://moz-conduit.readthedocs.io/en/latest/arcanist-user.html#submitting-and-updating-patches).
-
-## Using Mercurial (not recommended)
-
-We don't recommend to use this method as it's more manual and makes reviewing and landing code a bit more tedious.
-
-### Creating a patch
-
-To create a patch you need to first commit your changes and then export them to a patch file.
+You might have to go through this cycle of submitting changes and getting it reviewed several times, depending on the complexity of the bug.
 
-```
-hg commit -m 'your commit message'
-hg export > /path/to/your/patch
-```
-
-### Commit messages
-
-Commit messages should follow the pattern `Bug 1234567 - change description. r=reviewer`.
+Once your code fixes the bug, and there are no more blocking issues, the reviewer will approve the changes, and the code can be landed in the repository now.
 
-First is the bug number related to the patch. Then the description should explain what the patch changes. The last part is used to keep track of the reviewer for the patch.
 
-### Submitting a patch
-
-Once you have a patch file, add it as an attachment to the Bugzilla ticket you are working on and add the `feedback?` or `review?` flag depending on if you just want feedback and confirmation you're doing the right thing or if you think the patch is ready to land respectively. Read more about [how to submit a patch and the Bugzilla review cycle here](https://developer.mozilla.org/en-US/docs/Developer_Guide/How_to_Submit_a_Patch).
-
-You can also take a look at the [Code Review Checklist](./code-reviews-checklist.md) as it contains a list of checks that your reviewer is likely to go over when reviewing your code.
-
-### Squashing commits
+# Squashing commits
 
 Sometimes you may be asked to squash your commits. Squashing means merging multiple commits into one in case you created multiple commits while working on a bug. Squashing bugs is easy!
 
 We will use the histedit extension for squashing commits in Mercurial. You can check if this extension is enabled in your Mercurial installation following these steps:
 
 * Open `.hgrc` (Linux/OSX) or `Mercurial.ini` (Windows) –this is the default configuration file of Mercurial– located in your home directory, using your favourite editor.
 * Then add `histedit= ` under the `[extensions]` list present in file, if not present already.
 
--- a/devtools/server/actors/accessibility/accessible.js
+++ b/devtools/server/actors/accessibility/accessible.js
@@ -521,22 +521,47 @@ const AccessibleActor = ActorClassWithSp
     }
 
     const { types } = options;
     let auditTypes = Object.values(AUDIT_TYPE);
     if (types && types.length > 0) {
       auditTypes = auditTypes.filter(auditType => types.includes(auditType));
     }
 
-    // More audit steps will be added here in the near future. In addition to
-    // colour contrast ratio we will add autits for to the missing names,
-    // invalid states, etc. (For example see bug 1518808).
-    this._auditing = Promise.all(
-      auditTypes.map(auditType => this._getAuditByType(auditType))
-    )
+    // For some reason keyboard checks for focus styling affect values (that are
+    // used by other types of checks (text names and values)) returned by
+    // accessible objects. This happens only when multiple checks are run at the
+    // same time (asynchronously) and the audit might return unexpected
+    // failures. We thus split the execution of the checks into two parts, first
+    // performing keyboard checks and only after the rest of the checks. See bug
+    // 1594743 for more detail.
+    let keyboardAuditResult;
+    const keyboardAuditIndex = auditTypes.indexOf(AUDIT_TYPE.KEYBOARD);
+    if (keyboardAuditIndex > -1) {
+      // If we are performing a keyboard audit, remove its value from the
+      // complete list and run it.
+      auditTypes.splice(keyboardAuditIndex, 1);
+      keyboardAuditResult = this._getAuditByType(AUDIT_TYPE.KEYBOARD);
+    }
+
+    this._auditing = Promise.resolve(keyboardAuditResult)
+      .then(keyboardResult => {
+        const audits = auditTypes.map(auditType =>
+          this._getAuditByType(auditType)
+        );
+
+        // If we are also performing a keyboard audit, add its type and its
+        // result back to the complete list of audits.
+        if (keyboardAuditIndex > -1) {
+          auditTypes.splice(keyboardAuditIndex, 0, AUDIT_TYPE.KEYBOARD);
+          audits.splice(keyboardAuditIndex, 0, keyboardResult);
+        }
+
+        return Promise.all(audits);
+      })
       .then(results => {
         if (this.isDefunct || this.isDestroyed) {
           return null;
         }
 
         const audit = results.reduce((auditResults, result, index) => {
           auditResults[auditTypes[index]] = result;
           return auditResults;
--- a/dom/base/ChildIterator.cpp
+++ b/dom/base/ChildIterator.cpp
@@ -44,17 +44,16 @@ nsIContent* ExplicitChildIterator::GetNe
       }
       return mChild;
     }
 
     MOZ_ASSERT_UNREACHABLE("This needs to be revisited");
   } else if (mDefaultChild) {
     // If we're already in default content, check if there are more nodes there
     MOZ_ASSERT(mChild);
-    MOZ_ASSERT(mChild->IsActiveChildrenElement());
 
     mDefaultChild = mDefaultChild->GetNextSibling();
     if (mDefaultChild) {
       return mDefaultChild;
     }
 
     mChild = mChild->GetNextSibling();
   } else if (mIsFirst) {  // at the beginning of the child list
@@ -72,83 +71,48 @@ nsIContent* ExplicitChildIterator::GetNe
     }
 
     mChild = mParent->GetFirstChild();
     mIsFirst = false;
   } else if (mChild) {  // in the middle of the child list
     mChild = mChild->GetNextSibling();
   }
 
-  // Iterate until we find a non-insertion point, or an insertion point with
-  // content.
-  while (mChild) {
-    if (mChild->IsActiveChildrenElement()) {
-      MOZ_ASSERT_UNREACHABLE("This needs to be revisited");
-    } else {
-      // mChild is not an insertion point, thus it is the next node to
-      // return from this iterator.
-      break;
-    }
-  }
-
   return mChild;
 }
 
 void FlattenedChildIterator::Init(bool aIgnoreXBL) {
   if (aIgnoreXBL) {
-    mXBLInvolved = Some(false);
     return;
   }
 
   // TODO(emilio): I think it probably makes sense to only allow constructing
   // FlattenedChildIterators with Element.
   if (mParent->IsElement()) {
     if (ShadowRoot* shadow = mParent->AsElement()->GetShadowRoot()) {
       mParent = shadow;
-      mXBLInvolved = Some(true);
+      mXBLInvolved = true;
+      return;
+    }
+    if (mParentAsSlot) {
+      mXBLInvolved = true;
       return;
     }
   }
 }
 
-bool FlattenedChildIterator::ComputeWhetherXBLIsInvolved() const {
-  MOZ_ASSERT(mXBLInvolved.isNothing());
-  // We set mXBLInvolved to true if either the node we're iterating has a
-  // binding with content attached to it (in which case it is handled in Init),
-  // the node is generated XBL content and has an <xbl:children> child, or the
-  // node is a <slot> element.
-  if (!mParent->GetBindingParent()) {
-    return false;
-  }
-
-  if (mParentAsSlot) {
-    return true;
-  }
-
-  for (nsIContent* child = mParent->GetFirstChild(); child;
-       child = child->GetNextSibling()) {
-    if (child->NodeInfo()->Equals(nsGkAtoms::children, kNameSpaceID_XBL)) {
-      MOZ_ASSERT(child->GetBindingParent());
-      return true;
-    }
-  }
-
-  return false;
-}
-
 bool ExplicitChildIterator::Seek(const nsIContent* aChildToFind) {
   if (aChildToFind->GetParent() == mParent &&
       !aChildToFind->IsRootOfAnonymousSubtree()) {
     // Fast path: just point ourselves to aChildToFind, which is a
     // normal DOM child of ours.
     mChild = const_cast<nsIContent*>(aChildToFind);
     mIndexInInserted = 0;
     mDefaultChild = nullptr;
     mIsFirst = false;
-    MOZ_ASSERT(!mChild->IsActiveChildrenElement());
     return true;
   }
 
   // Can we add more fast paths here based on whether the parent of aChildToFind
   // is a shadow insertion point or content insertion point?
 
   // Slow path: just walk all our kids.
   return Seek(aChildToFind, nullptr);
@@ -211,28 +175,16 @@ nsIContent* ExplicitChildIterator::GetPr
         mChild = assignedNodes[mIndexInInserted - 1]->AsContent();
         return mChild;
       }
     }
 
     mChild = mParent->GetLastChild();
   }
 
-  // Iterate until we find a non-insertion point, or an insertion point with
-  // content.
-  while (mChild) {
-    if (mChild->IsActiveChildrenElement()) {
-      MOZ_ASSERT_UNREACHABLE("This needs to be revisited");
-    } else {
-      // mChild is not an insertion point, thus it is the next node to
-      // return from this iterator.
-      break;
-    }
-  }
-
   if (!mChild) {
     mIsFirst = true;
   }
 
   return mChild;
 }
 
 nsIContent* AllChildrenIterator::Get() const {
--- a/dom/base/ChildIterator.h
+++ b/dom/base/ChildIterator.h
@@ -139,27 +139,24 @@ class FlattenedChildIterator : public Ex
         mOriginalContent(aOther.mOriginalContent),
         mXBLInvolved(aOther.mXBLInvolved) {}
 
   FlattenedChildIterator(const FlattenedChildIterator& aOther)
       : ExplicitChildIterator(aOther),
         mOriginalContent(aOther.mOriginalContent),
         mXBLInvolved(aOther.mXBLInvolved) {}
 
+  // TODO(emilio): Rename to use shadow dom terminology.
   bool XBLInvolved() {
-    if (mXBLInvolved.isNothing()) {
-      mXBLInvolved = Some(ComputeWhetherXBLIsInvolved());
-    }
-    return *mXBLInvolved;
+    return mXBLInvolved;
   }
 
   const nsIContent* Parent() const { return mOriginalContent; }
 
  private:
-  bool ComputeWhetherXBLIsInvolved() const;
 
   void Init(bool aIgnoreXBL);
 
  protected:
   /**
    * This constructor is a hack to help AllChildrenIterator which sometimes
    * doesn't want to consider XBL.
    */
@@ -171,19 +168,17 @@ class FlattenedChildIterator : public Ex
     Init(ignoreXBL);
   }
 
   const nsIContent* mOriginalContent;
 
  private:
   // For certain optimizations, nsCSSFrameConstructor needs to know if the child
   // list of the element that we're iterating matches its .childNodes.
-  //
-  // This is lazily computed when asked for it.
-  Maybe<bool> mXBLInvolved;
+  bool mXBLInvolved = false;
 };
 
 /**
  * AllChildrenIterator traverses the children of an element including before /
  * after content and optionally XBL children.  The iterator can be initialized
  * to start at the end by providing false for aStartAtBeginning in order to
  * start iterating in reverse from the last child.
  *
--- a/dom/base/FragmentOrElement.cpp
+++ b/dom/base/FragmentOrElement.cpp
@@ -527,34 +527,30 @@ static const size_t MaxDOMSlotSizeAllowe
 
 static_assert(sizeof(nsINode::nsSlots) <= MaxDOMSlotSizeAllowed,
               "DOM slots cannot be grown without consideration");
 static_assert(sizeof(FragmentOrElement::nsDOMSlots) <= MaxDOMSlotSizeAllowed,
               "DOM slots cannot be grown without consideration");
 
 void nsIContent::nsExtendedContentSlots::UnlinkExtendedSlots() {
   mBindingParent = nullptr;
-  mXBLInsertionPoint = nullptr;
   mContainingShadow = nullptr;
   mAssignedSlot = nullptr;
 }
 
 void nsIContent::nsExtendedContentSlots::TraverseExtendedSlots(
     nsCycleCollectionTraversalCallback& aCb) {
   NS_CYCLE_COLLECTION_NOTE_EDGE_NAME(aCb, "mExtendedSlots->mBindingParent");
   aCb.NoteXPCOMChild(NS_ISUPPORTS_CAST(nsIContent*, mBindingParent));
 
   NS_CYCLE_COLLECTION_NOTE_EDGE_NAME(aCb, "mExtendedSlots->mContainingShadow");
   aCb.NoteXPCOMChild(NS_ISUPPORTS_CAST(nsIContent*, mContainingShadow));
 
   NS_CYCLE_COLLECTION_NOTE_EDGE_NAME(aCb, "mExtendedSlots->mAssignedSlot");
   aCb.NoteXPCOMChild(NS_ISUPPORTS_CAST(nsIContent*, mAssignedSlot.get()));
-
-  NS_CYCLE_COLLECTION_NOTE_EDGE_NAME(aCb, "mExtendedSlots->mXBLInsertionPoint");
-  aCb.NoteXPCOMChild(mXBLInsertionPoint.get());
 }
 
 nsIContent::nsExtendedContentSlots::nsExtendedContentSlots() {}
 
 nsIContent::nsExtendedContentSlots::~nsExtendedContentSlots() = default;
 
 size_t nsIContent::nsExtendedContentSlots::SizeOfExcludingThis(
     MallocSizeOf aMallocSizeOf) const {
@@ -877,28 +873,16 @@ void nsIContent::GetEventTargetParent(Ev
     aVisitor.mEventTargetAtParent = parent;
   } else if (parent && aVisitor.mOriginalTargetIsInAnon) {
     nsCOMPtr<nsIContent> content(do_QueryInterface(aVisitor.mEvent->mTarget));
     if (content && content->GetBindingParent() == parent) {
       aVisitor.mEventTargetAtParent = parent;
     }
   }
 
-  // check for an anonymous parent
-  // XXX XBL2/sXBL issue
-  if (HasFlag(NODE_MAY_BE_IN_BINDING_MNGR)) {
-    nsIContent* insertionParent = GetXBLInsertionParent();
-    NS_ASSERTION(!(aVisitor.mEventTargetAtParent && insertionParent &&
-                   aVisitor.mEventTargetAtParent != insertionParent),
-                 "Retargeting and having insertion parent!");
-    if (insertionParent) {
-      parent = insertionParent;
-    }
-  }
-
   if (!aVisitor.mEvent->mFlags.mComposedInNativeAnonymousContent &&
       IsRootOfNativeAnonymousSubtree() && OwnerDoc()->GetWindow()) {
     aVisitor.SetParentTarget(OwnerDoc()->GetWindow()->GetParentTarget(), true);
   } else if (parent) {
     aVisitor.SetParentTarget(parent, false);
     if (slot) {
       ShadowRoot* root = slot->GetContainingShadow();
       if (root && root->IsClosed()) {
@@ -1079,28 +1063,16 @@ nsIContent* nsIContent::GetContainingSha
   return nullptr;
 }
 
 void nsIContent::SetAssignedSlot(HTMLSlotElement* aSlot) {
   MOZ_ASSERT(aSlot || GetExistingExtendedContentSlots());
   ExtendedContentSlots()->mAssignedSlot = aSlot;
 }
 
-void nsIContent::SetXBLInsertionPoint(nsIContent* aContent) {
-  if (aContent) {
-    nsExtendedContentSlots* slots = ExtendedContentSlots();
-    SetFlags(NODE_MAY_BE_IN_BINDING_MNGR);
-    slots->mXBLInsertionPoint = aContent;
-  } else {
-    if (nsExtendedContentSlots* slots = GetExistingExtendedContentSlots()) {
-      slots->mXBLInsertionPoint = nullptr;
-    }
-  }
-}
-
 #ifdef DEBUG
 void nsIContent::AssertAnonymousSubtreeRelatedInvariants() const {
   MOZ_ASSERT(!IsRootOfNativeAnonymousSubtree() ||
                  (GetParent() && GetBindingParent() == GetParent()),
              "root of native anonymous subtree must have parent equal "
              "to binding parent");
   MOZ_ASSERT(!GetParent() || !IsInComposedDoc() ||
                  ((GetBindingParent() == GetParent()) ==
--- a/dom/base/nsContentUtils.cpp
+++ b/dom/base/nsContentUtils.cpp
@@ -4375,24 +4375,16 @@ bool nsContentUtils::HasMutationListener
   // might not be in our chain.  If we don't have a window, we might have a
   // mutation listener.  Check quickly to see.
   while (aNode) {
     EventListenerManager* manager = aNode->GetExistingListenerManager();
     if (manager && manager->HasMutationListeners()) {
       return true;
     }
 
-    if (aNode->IsContent()) {
-      nsIContent* insertionPoint = aNode->AsContent()->GetXBLInsertionPoint();
-      if (insertionPoint) {
-        aNode = insertionPoint->GetParent();
-        MOZ_ASSERT(aNode);
-        continue;
-      }
-    }
     aNode = aNode->GetParentNode();
   }
 
   return false;
 }
 
 /* static */
 bool nsContentUtils::HasMutationListeners(Document* aDocument, uint32_t aType) {
--- a/dom/base/nsIContent.h
+++ b/dom/base/nsIContent.h
@@ -266,18 +266,16 @@ class nsIContent : public nsINode {
     return mNodeInfo->Equals(aTag, kNameSpaceID_MathML);
   }
 
   template <typename First, typename... Args>
   inline bool IsAnyOfMathMLElements(First aFirst, Args... aArgs) const {
     return IsMathMLElement() && IsNodeInternal(aFirst, aArgs...);
   }
 
-  inline bool IsActiveChildrenElement() const;
-
   bool IsGeneratedContentContainerForBefore() const {
     return IsRootOfNativeAnonymousSubtree() &&
            mNodeInfo->NameAtom() == nsGkAtoms::mozgeneratedcontentbefore;
   }
 
   bool IsGeneratedContentContainerForAfter() const {
     return IsRootOfNativeAnonymousSubtree() &&
            mNodeInfo->NameAtom() == nsGkAtoms::mozgeneratedcontentafter;
@@ -442,39 +440,16 @@ class nsIContent : public nsINode {
    * Gets the assigned slot associated with this content based on parent's
    * shadow root mode. Returns null if parent's shadow root is "closed".
    * https://dom.spec.whatwg.org/#dom-slotable-assignedslot
    *
    * @return The assigned slot element or null.
    */
   mozilla::dom::HTMLSlotElement* GetAssignedSlotByMode() const;
 
-  nsIContent* GetXBLInsertionParent() const {
-    nsIContent* ip = GetXBLInsertionPoint();
-    return ip ? ip->GetParent() : nullptr;
-  }
-
-  /**
-   * Gets the insertion parent element of the XBL binding.
-   * The insertion parent is our one true parent in the transformed DOM.
-   *
-   * @return the insertion parent element.
-   */
-  nsIContent* GetXBLInsertionPoint() const {
-    const nsExtendedContentSlots* slots = GetExistingExtendedContentSlots();
-    return slots ? slots->mXBLInsertionPoint.get() : nullptr;
-  }
-
-  /**
-   * Sets the insertion parent element of the XBL binding.
-   *
-   * @param aContent The insertion parent element.
-   */
-  void SetXBLInsertionPoint(nsIContent* aContent);
-
   /**
    * Same as GetFlattenedTreeParentNode, but returns null if the parent is
    * non-nsIContent.
    */
   inline nsIContent* GetFlattenedTreeParent() const;
 
  protected:
   // Handles getting inserted or removed directly under a <slot> element.
@@ -762,21 +737,16 @@ class nsIContent : public nsINode {
     /**
      * The nearest enclosing content node with a binding that created us.
      *
      * @see nsIContent::GetBindingParent
      */
     RefPtr<mozilla::dom::Element> mBindingParent;
 
     /**
-     * @see nsIContent::GetXBLInsertionPoint
-     */
-    nsCOMPtr<nsIContent> mXBLInsertionPoint;
-
-    /**
      * @see nsIContent::GetContainingShadow
      */
     RefPtr<mozilla::dom::ShadowRoot> mContainingShadow;
 
     /**
      * @see nsIContent::GetAssignedSlot
      */
     RefPtr<mozilla::dom::HTMLSlotElement> mAssignedSlot;
--- a/dom/base/nsIContentInlines.h
+++ b/dom/base/nsIContentInlines.h
@@ -112,19 +112,16 @@ static inline nsINode* GetFlattenedTreeP
     }
 
     if (auto* shadowRoot =
             mozilla::dom::ShadowRoot::FromNode(parentAsContent)) {
       return shadowRoot->GetHost();
     }
   }
 
-  MOZ_ASSERT(!parentAsContent->IsActiveChildrenElement(),
-             "<xbl:children> isn't in the flattened tree");
-
   // Common case.
   return parent;
 }
 
 inline nsINode* nsINode::GetFlattenedTreeParentNode() const {
   return ::GetFlattenedTreeParentNode<nsINode::eNotForStyle>(this);
 }
 
@@ -162,31 +159,16 @@ inline bool nsINode::IsEditable() const 
     return false;
   }
 
   // Check if the node is in a document and the document is in designMode.
   Document* doc = GetUncomposedDoc();
   return doc && doc->HasFlag(NODE_IS_EDITABLE);
 }
 
-inline bool nsIContent::IsActiveChildrenElement() const {
-  if (!mNodeInfo->Equals(nsGkAtoms::children, kNameSpaceID_XBL)) {
-    return false;
-  }
-
-  nsIContent* bindingParent = GetBindingParent();
-  if (!bindingParent) {
-    return false;
-  }
-
-  // We reuse the binding parent machinery for Shadow DOM too, so prevent that
-  // from getting us confused in this case.
-  return !bindingParent->GetShadowRoot();
-}
-
 inline bool nsIContent::IsInAnonymousSubtree() const {
   NS_ASSERTION(
       !IsInNativeAnonymousSubtree() || GetBindingParent() ||
           (!IsInUncomposedDoc() && static_cast<nsIContent*>(SubtreeRoot())
                                        ->IsInNativeAnonymousSubtree()),
       "Must have binding parent when in native anonymous subtree which is in "
       "document.\n"
       "Native anonymous subtree which is not in document must have native "
--- a/dom/base/nsINode.cpp
+++ b/dom/base/nsINode.cpp
@@ -1168,17 +1168,17 @@ nsPIDOMWindowOuter* nsINode::GetOwnerGlo
 nsIGlobalObject* nsINode::GetOwnerGlobal() const {
   bool dummy;
   return OwnerDoc()->GetScriptHandlingObject(dummy);
 }
 
 bool nsINode::UnoptimizableCCNode() const {
   const uintptr_t problematicFlags =
       (NODE_IS_ANONYMOUS_ROOT | NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE |
-       NODE_IS_NATIVE_ANONYMOUS_ROOT | NODE_MAY_BE_IN_BINDING_MNGR);
+       NODE_IS_NATIVE_ANONYMOUS_ROOT);
   return HasFlag(problematicFlags) || NodeType() == ATTRIBUTE_NODE ||
          // For strange cases like xbl:content/xbl:children
          (IsElement() && AsElement()->IsInNamespace(kNameSpaceID_XBL));
 }
 
 /* static */
 bool nsINode::Traverse(nsINode* tmp, nsCycleCollectionTraversalCallback& cb) {
   if (MOZ_LIKELY(!cb.WantAllTraces())) {
--- a/dom/base/nsINode.h
+++ b/dom/base/nsINode.h
@@ -126,74 +126,69 @@ enum {
   NODE_IS_IN_NATIVE_ANONYMOUS_SUBTREE = NODE_FLAG_BIT(3),
 
   // Whether this node is the root of a native anonymous (from the perspective
   // of its parent) subtree.  This flag is set-once: once a node has it, it
   // must not be removed.
   // NOTE: Should only be used on nsIContent nodes
   NODE_IS_NATIVE_ANONYMOUS_ROOT = NODE_FLAG_BIT(4),
 
-  // Whether a binding manager may have a pointer to this
-  NODE_MAY_BE_IN_BINDING_MNGR = NODE_FLAG_BIT(5),
-
-  NODE_IS_EDITABLE = NODE_FLAG_BIT(6),
-
-  // Free bit here.
+  NODE_IS_EDITABLE = NODE_FLAG_BIT(5),
 
   // Whether the node participates in a shadow tree.
-  NODE_IS_IN_SHADOW_TREE = NODE_FLAG_BIT(8),
+  NODE_IS_IN_SHADOW_TREE = NODE_FLAG_BIT(6),
 
   // Node has an :empty or :-moz-only-whitespace selector
-  NODE_HAS_EMPTY_SELECTOR = NODE_FLAG_BIT(9),
+  NODE_HAS_EMPTY_SELECTOR = NODE_FLAG_BIT(7),
 
   // A child of the node has a selector such that any insertion,
   // removal, or appending of children requires restyling the parent.
-  NODE_HAS_SLOW_SELECTOR = NODE_FLAG_BIT(10),
+  NODE_HAS_SLOW_SELECTOR = NODE_FLAG_BIT(8),
 
   // A child of the node has a :first-child, :-moz-first-node,
   // :only-child, :last-child or :-moz-last-node selector.
-  NODE_HAS_EDGE_CHILD_SELECTOR = NODE_FLAG_BIT(11),
+  NODE_HAS_EDGE_CHILD_SELECTOR = NODE_FLAG_BIT(9),
 
   // A child of the node has a selector such that any insertion or
   // removal of children requires restyling later siblings of that
   // element.  Additionally (in this manner it is stronger than
   // NODE_HAS_SLOW_SELECTOR), if a child's style changes due to any
   // other content tree changes (e.g., the child changes to or from
   // matching :empty due to a grandchild insertion or removal), the
   // child's later siblings must also be restyled.
-  NODE_HAS_SLOW_SELECTOR_LATER_SIBLINGS = NODE_FLAG_BIT(12),
+  NODE_HAS_SLOW_SELECTOR_LATER_SIBLINGS = NODE_FLAG_BIT(10),
 
   NODE_ALL_SELECTOR_FLAGS = NODE_HAS_EMPTY_SELECTOR | NODE_HAS_SLOW_SELECTOR |
                             NODE_HAS_EDGE_CHILD_SELECTOR |
                             NODE_HAS_SLOW_SELECTOR_LATER_SIBLINGS,
 
   // This node needs to go through frame construction to get a frame (or
   // undisplayed entry).
-  NODE_NEEDS_FRAME = NODE_FLAG_BIT(13),
+  NODE_NEEDS_FRAME = NODE_FLAG_BIT(11),
 
   // At least one descendant in the flattened tree has NODE_NEEDS_FRAME set.
   // This should be set on every node on the flattened tree path between the
   // node(s) with NODE_NEEDS_FRAME and the root content.
-  NODE_DESCENDANTS_NEED_FRAMES = NODE_FLAG_BIT(14),
+  NODE_DESCENDANTS_NEED_FRAMES = NODE_FLAG_BIT(12),
 
   // Set if the node has the accesskey attribute set.
-  NODE_HAS_ACCESSKEY = NODE_FLAG_BIT(15),
+  NODE_HAS_ACCESSKEY = NODE_FLAG_BIT(13),
 
   // Set if the node has right-to-left directionality
-  NODE_HAS_DIRECTION_RTL = NODE_FLAG_BIT(16),
+  NODE_HAS_DIRECTION_RTL = NODE_FLAG_BIT(14),
 
   // Set if the node has left-to-right directionality
-  NODE_HAS_DIRECTION_LTR = NODE_FLAG_BIT(17),
+  NODE_HAS_DIRECTION_LTR = NODE_FLAG_BIT(15),
 
   NODE_ALL_DIRECTION_FLAGS = NODE_HAS_DIRECTION_LTR | NODE_HAS_DIRECTION_RTL,
 
-  NODE_HAS_BEEN_IN_UA_WIDGET = NODE_FLAG_BIT(18),
+  NODE_HAS_BEEN_IN_UA_WIDGET = NODE_FLAG_BIT(16),
 
   // Remaining bits are node type specific.
-  NODE_TYPE_SPECIFIC_BITS_OFFSET = 19
+  NODE_TYPE_SPECIFIC_BITS_OFFSET = 17
 };
 
 // Make sure we have space for our bits
 #define ASSERT_NODE_FLAGS_SPACE(n)                         \
   static_assert(WRAPPER_CACHE_FLAGS_BITS_USED + (n) <=     \
                     sizeof(nsWrapperCache::FlagsType) * 8, \
                 "Not enough space for our bits")
 ASSERT_NODE_FLAGS_SPACE(NODE_TYPE_SPECIFIC_BITS_OFFSET);
--- a/dom/bindings/moz.build
+++ b/dom/bindings/moz.build
@@ -1,16 +1,16 @@
 # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 with Files("**"):
-    BUG_COMPONENT = ("Core", "DOM: Core & HTML")
+    BUG_COMPONENT = ("Core", "DOM: Bindings (WebIDL)")
 
 TEST_DIRS += ['test']
 
 XPIDL_SOURCES += [
     'nsIScriptError.idl'
 ]
 
 XPIDL_MODULE = 'dom_bindings'
--- a/dom/broadcastchannel/moz.build
+++ b/dom/broadcastchannel/moz.build
@@ -1,16 +1,16 @@
 # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 with Files("**"):
-    BUG_COMPONENT = ("Core", "DOM: Core & HTML")
+    BUG_COMPONENT = ("Core", "DOM: postMessage")
 
 EXPORTS.mozilla.dom += [
     'BroadcastChannel.h',
 ]
 
 UNIFIED_SOURCES += [
     'BroadcastChannel.cpp',
     'BroadcastChannelChild.cpp',
--- a/dom/events/moz.build
+++ b/dom/events/moz.build
@@ -1,15 +1,18 @@
 # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 with Files("**"):
+    BUG_COMPONENT = ("Core", "DOM: UI Events & Focus Handling")
+
+with Files("Event*"):
     BUG_COMPONENT = ("Core", "DOM: Events")
 
 if CONFIG['OS_ARCH'] == 'WINNT':
     DIRS += ['win']
 elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'cocoa':
     DIRS += ['mac']
 elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'android':
     DIRS += ['android']
--- a/dom/filehandle/moz.build
+++ b/dom/filehandle/moz.build
@@ -1,16 +1,16 @@
 # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 with Files("**"):
-    BUG_COMPONENT = ("Core", "DOM: Core & HTML")
+    BUG_COMPONENT = ("Core", "DOM: File")
 
 EXPORTS.mozilla.dom.filehandle += [
     'ActorsParent.h',
     'SerializationHelpers.h',
 ]
 
 EXPORTS.mozilla.dom += [
     'FileHandleStorage.h',
--- a/dom/html/HTMLMediaElement.cpp
+++ b/dom/html/HTMLMediaElement.cpp
@@ -653,66 +653,81 @@ class HTMLMediaElement::MediaStreamRende
 
   // Currently enabled (and rendered) audio tracks.
   nsTArray<WeakPtr<MediaStreamTrack>> mAudioTracks;
 
   // Currently selected (and rendered) video track.
   WeakPtr<MediaStreamTrack> mVideoTrack;
 };
 
-class HTMLMediaElement::StreamCaptureTrackSource
+class HTMLMediaElement::MediaElementTrackSource
     : public MediaStreamTrackSource,
       public MediaStreamTrackSource::Sink {
  public:
   NS_DECL_ISUPPORTS_INHERITED
-  NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(StreamCaptureTrackSource,
+  NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(MediaElementTrackSource,
                                            MediaStreamTrackSource)
 
-  StreamCaptureTrackSource(MediaStreamTrackSource* aCapturedTrackSource,
-                           ProcessedMediaTrack* aStream, MediaInputPort* aPort)
+  /* MediaDecoder track source */
+  MediaElementTrackSource(ProcessedMediaTrack* aTrack, nsIPrincipal* aPrincipal)
+      : MediaStreamTrackSource(aPrincipal, nsString()), mTrack(aTrack) {
+    MOZ_ASSERT(mTrack);
+  }
+
+  /* MediaStream track source */
+  MediaElementTrackSource(MediaStreamTrackSource* aCapturedTrackSource,
+                          ProcessedMediaTrack* aTrack, MediaInputPort* aPort)
       : MediaStreamTrackSource(aCapturedTrackSource->GetPrincipal(),
                                nsString()),
         mCapturedTrackSource(aCapturedTrackSource),
-        mTrack(aStream),
+        mTrack(aTrack),
         mPort(aPort) {
+    MOZ_ASSERT(mTrack);
     MOZ_ASSERT(mCapturedTrackSource);
-    MOZ_ASSERT(mTrack);
     MOZ_ASSERT(mPort);
 
     mCapturedTrackSource->RegisterSink(this);
   }
 
   void SetEnabled(bool aEnabled) {
     if (!mTrack) {
       return;
     }
     mTrack->SetEnabled(aEnabled ? DisabledTrackMode::ENABLED
                                 : DisabledTrackMode::SILENCE_FREEZE);
   }
 
+  void SetPrincipal(RefPtr<nsIPrincipal> aPrincipal) {
+    mPrincipal = std::move(aPrincipal);
+    MediaStreamTrackSource::PrincipalChanged();
+  }
+
   void Destroy() override {
     if (mCapturedTrackSource) {
       mCapturedTrackSource->UnregisterSink(this);
       mCapturedTrackSource = nullptr;
     }
-    if (mTrack) {
+    if (mTrack && !mTrack->IsDestroyed()) {
       mTrack->Destroy();
-      mTrack = nullptr;
     }
     if (mPort) {
       mPort->Destroy();
       mPort = nullptr;
     }
   }
 
   MediaSourceEnum GetMediaSource() const override {
     return MediaSourceEnum::Other;
   }
 
-  void Stop() override { Destroy(); }
+  void Stop() override {
+    // Do nothing. There may appear new output streams
+    // that need tracks sourced from this source, so we
+    // cannot destroy things yet.
+  }
 
   /**
    * Do not keep the track source alive. The source lifetime is controlled by
    * its associated tracks.
    */
   bool KeepsSourceAlive() const override { return false; }
 
   /**
@@ -725,60 +740,79 @@ class HTMLMediaElement::StreamCaptureTra
   void Enable() override {}
 
   void PrincipalChanged() override {
     if (!mCapturedTrackSource) {
       // This could happen during shutdown.
       return;
     }
 
-    mPrincipal = mCapturedTrackSource->GetPrincipal();
-    MediaStreamTrackSource::PrincipalChanged();
+    SetPrincipal(mCapturedTrackSource->GetPrincipal());
   }
 
   void MutedChanged(bool aNewState) override {
-    if (!mCapturedTrackSource) {
-      // This could happen during shutdown.
-      return;
-    }
-
     MediaStreamTrackSource::MutedChanged(aNewState);
   }
 
   void OverrideEnded() override {
-    if (!mCapturedTrackSource) {
-      // This could happen during shutdown.
-      return;
-    }
-
     Destroy();
     MediaStreamTrackSource::OverrideEnded();
   }
 
+  ProcessedMediaTrack* Track() const { return mTrack; }
+
  private:
-  virtual ~StreamCaptureTrackSource() {
-    MOZ_ASSERT(!mCapturedTrackSource);
-    MOZ_ASSERT(!mTrack);
-    MOZ_ASSERT(!mPort);
-  };
+  virtual ~MediaElementTrackSource() { Destroy(); };
 
   RefPtr<MediaStreamTrackSource> mCapturedTrackSource;
-  RefPtr<ProcessedMediaTrack> mTrack;
+  const RefPtr<ProcessedMediaTrack> mTrack;
   RefPtr<MediaInputPort> mPort;
 };
 
-NS_IMPL_ADDREF_INHERITED(HTMLMediaElement::StreamCaptureTrackSource,
+HTMLMediaElement::OutputMediaStream::OutputMediaStream(
+    RefPtr<DOMMediaStream> aStream, bool aCapturingAudioOnly,
+    bool aFinishWhenEnded)
+    : mStream(std::move(aStream)),
+      mCapturingAudioOnly(aCapturingAudioOnly),
+      mFinishWhenEnded(aFinishWhenEnded) {}
+HTMLMediaElement::OutputMediaStream::~OutputMediaStream() = default;
+
+void ImplCycleCollectionTraverse(nsCycleCollectionTraversalCallback& aCallback,
+                                 HTMLMediaElement::OutputMediaStream& aField,
+                                 const char* aName, uint32_t aFlags) {
+  ImplCycleCollectionTraverse(aCallback, aField.mStream, "mStream", aFlags);
+  ImplCycleCollectionTraverse(aCallback, aField.mFinishWhenEndedLoadingSrc,
+                              "mFinishWhenEndedLoadingSrc", aFlags);
+  ImplCycleCollectionTraverse(aCallback, aField.mFinishWhenEndedAttrStream,
+                              "mFinishWhenEndedAttrStream", aFlags);
+}
+
+void ImplCycleCollectionUnlink(HTMLMediaElement::OutputMediaStream& aField) {
+  ImplCycleCollectionUnlink(aField.mStream);
+  ImplCycleCollectionUnlink(aField.mFinishWhenEndedLoadingSrc);
+  ImplCycleCollectionUnlink(aField.mFinishWhenEndedAttrStream);
+}
+
+NS_IMPL_ADDREF_INHERITED(HTMLMediaElement::MediaElementTrackSource,
                          MediaStreamTrackSource)
-NS_IMPL_RELEASE_INHERITED(HTMLMediaElement::StreamCaptureTrackSource,
+NS_IMPL_RELEASE_INHERITED(HTMLMediaElement::MediaElementTrackSource,
                           MediaStreamTrackSource)
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(
-    HTMLMediaElement::StreamCaptureTrackSource)
+    HTMLMediaElement::MediaElementTrackSource)
 NS_INTERFACE_MAP_END_INHERITING(MediaStreamTrackSource)
-NS_IMPL_CYCLE_COLLECTION_INHERITED(HTMLMediaElement::StreamCaptureTrackSource,
-                                   MediaStreamTrackSource, mCapturedTrackSource)
+NS_IMPL_CYCLE_COLLECTION_CLASS(HTMLMediaElement::MediaElementTrackSource)
+NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(
+    HTMLMediaElement::MediaElementTrackSource, MediaStreamTrackSource)
+  tmp->Destroy();
+  NS_IMPL_CYCLE_COLLECTION_UNLINK(mCapturedTrackSource)
+NS_IMPL_CYCLE_COLLECTION_UNLINK_END
+NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(
+    HTMLMediaElement::MediaElementTrackSource, MediaStreamTrackSource)
+  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mCapturedTrackSource)
+NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
 
 /**
  * There is a reference cycle involving this class: MediaLoadListener
  * holds a reference to the HTMLMediaElement, which holds a reference
  * to an nsIChannel, which holds a reference to this listener.
  * We break the reference cycle in OnStartRequest by clearing mElement.
  */
 class HTMLMediaElement::MediaLoadListener final
@@ -1637,19 +1671,18 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSrcMediaSource)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSrcStream)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSrcAttrStream)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSourcePointer)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mLoadBlockedDoc)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSourceLoadCandidate)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mAudioChannelWrapper)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mErrorSink->mError)
-  for (uint32_t i = 0; i < tmp->mOutputStreams.Length(); ++i) {
-    NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputStreams[i].mStream)
-  }
+  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputStreams)
+  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputTrackSources);
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPlayed);
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mTextTrackManager)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mAudioTrackList)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mVideoTrackList)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mMediaKeys)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mIncomingMediaKeys)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSelectedVideoStreamTrack)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPendingPlayPromises)
@@ -1671,20 +1704,18 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_IN
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mSourcePointer)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mLoadBlockedDoc)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mSourceLoadCandidate)
   if (tmp->mAudioChannelWrapper) {
     tmp->mAudioChannelWrapper->Shutdown();
   }
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mAudioChannelWrapper)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mErrorSink->mError)
-  for (OutputMediaStream& s : tmp->mOutputStreams) {
-    s.mStream->SetFinishedOnInactive(true);
-  }
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutputStreams)
+  NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutputTrackSources)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mPlayed)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mTextTrackManager)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mAudioTrackList)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mVideoTrackList)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mMediaKeys)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mIncomingMediaKeys)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mSelectedVideoStreamTrack)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mPendingPlayPromises)
@@ -1829,17 +1860,16 @@ nsresult HTMLMediaElement::OnChannelRedi
 void HTMLMediaElement::ShutdownDecoder() {
   RemoveMediaElementFromURITable();
   NS_ASSERTION(mDecoder, "Must have decoder to shut down");
 
   mWaitingForKeyListener.DisconnectIfExists();
   if (mMediaSource) {
     mMediaSource->CompletePendingTransactions();
   }
-  DiscardFinishWhenEndedOutputStreams();
   mDecoder->Shutdown();
   DDUNLINKCHILD(mDecoder.get());
   mDecoder = nullptr;
   ReportAudioTrackSilenceProportionTelemetry();
 }
 
 void HTMLMediaElement::ReportPlayedTimeAfterBlockedTelemetry() {
   if (!mHasPlayEverBeenBlocked) {
@@ -1912,37 +1942,24 @@ void HTMLMediaElement::AbortExistingLoad
   bool fireTimeUpdate = false;
 
   // We need to remove FirstFrameListener before VideoTracks get emptied.
   if (mFirstFrameListener) {
     mSelectedVideoStreamTrack->RemoveVideoOutput(mFirstFrameListener);
     mFirstFrameListener = nullptr;
   }
 
-  // When aborting the existing loads, empty the objects in audio track list and
-  // video track list, no events (in particular, no removetrack events) are
-  // fired as part of this. Ending MediaTrack sends track ended notifications,
-  // so we empty the track lists prior.
-  if (AudioTracks()) {
-    AudioTracks()->EmptyTracks();
-  }
-  if (VideoTracks()) {
-    VideoTracks()->EmptyTracks();
-  }
-
   if (mDecoder) {
     fireTimeUpdate = mDecoder->GetCurrentTime() != 0.0;
     ShutdownDecoder();
   }
   if (mSrcStream) {
     EndSrcMediaStreamPlayback();
   }
 
-  DiscardFinishWhenEndedOutputStreams();
-
   RemoveMediaElementFromURITable();
   mLoadingSrc = nullptr;
   mLoadingSrcTriggeringPrincipal = nullptr;
   DDLOG(DDLogCategory::Property, "loading_src", "");
   DDUNLINKCHILD(mMediaSource.get());
   mMediaSource = nullptr;
 
   if (mNetworkState == NETWORK_LOADING || mNetworkState == NETWORK_IDLE) {
@@ -1974,16 +1991,17 @@ void HTMLMediaElement::AbortExistingLoad
                  "How did someone setup a new stream/decoder already?");
     // ChangeNetworkState() will call UpdateAudioChannelPlayingState()
     // indirectly which depends on mPaused. So we need to update mPaused first.
     if (!mPaused) {
       mPaused = true;
       RejectPromises(TakePendingPlayPromises(), NS_ERROR_DOM_MEDIA_ABORT_ERR);
     }
     ChangeNetworkState(NETWORK_EMPTY);
+    RemoveMediaTracks();
     ChangeReadyState(HAVE_NOTHING);
 
     // TODO: Apply the rules for text track cue rendering Bug 865407
     if (mTextTrackManager) {
       mTextTrackManager->GetTextTracks()->SetCuesInactive();
     }
 
     if (fireTimeUpdate) {
@@ -2021,16 +2039,17 @@ void HTMLMediaElement::AbortExistingLoad
 }
 
 void HTMLMediaElement::NoSupportedMediaSourceError(
     const nsACString& aErrorDetails) {
   if (mDecoder) {
     ShutdownDecoder();
   }
   mErrorSink->SetError(MEDIA_ERR_SRC_NOT_SUPPORTED, aErrorDetails);
+  RemoveMediaTracks();
   ChangeDelayLoadStatus(false);
   UpdateAudioChannelPlayingState();
   RejectPromises(TakePendingPlayPromises(),
                  NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR);
 }
 
 typedef void (HTMLMediaElement::*SyncSectionFn)();
 
@@ -2337,44 +2356,34 @@ void HTMLMediaElement::NotifyMediaTrackE
           if (!mFirstFrameListener) {
             mFirstFrameListener =
                 new FirstFrameListener(container, mAbstractMainThread);
           }
           mSelectedVideoStreamTrack->AddVideoOutput(mFirstFrameListener);
         }
       }
     }
-
-    if (mReadyState == HAVE_NOTHING) {
-      // No MediaStreamTracks are captured until we have metadata.
-      return;
-    }
-    for (OutputMediaStream& ms : mOutputStreams) {
-      if (aTrack->AsVideoTrack() && ms.mCapturingAudioOnly) {
-        // If the output stream is for audio only we ignore video tracks.
-        continue;
-      }
-      AddCaptureMediaTrackToOutputStream(aTrack, ms);
-    }
-  }
+  }
+
+  // The set of enabled/selected tracks changed.
+  mWatchManager.ManualNotify(&HTMLMediaElement::UpdateOutputTrackSources);
 }
 
 void HTMLMediaElement::NotifyMediaTrackDisabled(dom::MediaTrack* aTrack) {
   MOZ_ASSERT(aTrack);
   if (!aTrack) {
     return;
   }
-#ifdef DEBUG
+
   nsString id;
   aTrack->GetId(id);
 
   LOG(LogLevel::Debug, ("MediaElement %p %sTrack with id %s disabled", this,
                         aTrack->AsAudioTrack() ? "Audio" : "Video",
                         NS_ConvertUTF16toUTF8(id).get()));
-#endif
 
   MOZ_ASSERT((!aTrack->AsAudioTrack() || !aTrack->AsAudioTrack()->Enabled()) &&
              (!aTrack->AsVideoTrack() || !aTrack->AsVideoTrack()->Selected()));
 
   if (AudioTrack* t = aTrack->AsAudioTrack()) {
     if (mSrcStream) {
       if (mMediaStreamRenderer) {
         mMediaStreamRenderer->RemoveTrack(t->GetAudioStreamTrack());
@@ -2406,55 +2415,18 @@ void HTMLMediaElement::NotifyMediaTrackD
       if (mMediaStreamRenderer) {
         mMediaStreamRenderer->RemoveTrack(mSelectedVideoStreamTrack);
       }
       mSelectedVideoStreamTrack->RemovePrincipalChangeObserver(this);
       mSelectedVideoStreamTrack = nullptr;
     }
   }
 
-  if (mReadyState == HAVE_NOTHING) {
-    // No MediaStreamTracks are captured until we have metadata, and code
-    // below doesn't do anything for captured decoders.
-    return;
-  }
-
-  for (OutputMediaStream& ms : mOutputStreams) {
-    if (ms.mCapturingDecoder) {
-      MOZ_ASSERT(!ms.mCapturingMediaStream);
-      continue;
-    }
-    if (ms.mCapturingAudioOnly && aTrack->AsVideoTrack()) {
-      continue;
-    }
-    MOZ_ASSERT(ms.mCapturingMediaStream);
-    for (int32_t i = ms.mTracks.Length() - 1; i >= 0; --i) {
-      if (ms.mTracks[i].first() != aTrack->GetId()) {
-        continue;
-      }
-      // The source of this track just ended. Force-notify that it ended.
-      // If we bounce it to the MediaTrackGraph it might not be picked up,
-      // for instance if the MediaInputPort was destroyed in the same
-      // iteration as it was added.
-      mMainThreadEventTarget->Dispatch(NewRunnableMethod(
-          "StreamCaptureTrackSource::OverrideEnded",
-          static_cast<StreamCaptureTrackSource*>(ms.mTracks[i].second().get()),
-          &StreamCaptureTrackSource::OverrideEnded));
-
-      ms.mTracks.RemoveElementAt(i);
-      break;
-    }
-#ifdef DEBUG
-    for (auto pair : ms.mTracks) {
-      MOZ_ASSERT(pair.first() != aTrack->GetId(),
-                 "The same MediaTrack was forwarded to the output stream more "
-                 "than once. This shouldn't happen.");
-    }
-#endif
-  }
+  // The set of enabled/selected tracks changed.
+  mWatchManager.ManualNotify(&HTMLMediaElement::UpdateOutputTrackSources);
 }
 
 void HTMLMediaElement::DealWithFailedElement(nsIContent* aSourceElement) {
   if (mShuttingDown) {
     return;
   }
 
   DispatchAsyncSourceError(aSourceElement);
@@ -2466,16 +2438,18 @@ void HTMLMediaElement::DealWithFailedEle
 void HTMLMediaElement::LoadFromSourceChildren() {
   NS_ASSERTION(mDelayingLoadEvent,
                "Should delay load event (if in document) during load");
   NS_ASSERTION(mIsLoadingFromSourceChildren,
                "Must remember we're loading from source children");
 
   AddMutationObserverUnlessExists(this);
 
+  RemoveMediaTracks();
+
   while (true) {
     Element* child = GetNextSource();
     if (!child) {
       // Exhausted candidates, wait for more candidates to be appended to
       // the media element.
       mLoadWaitStatus = WAITING_FOR_SOURCE;
       ChangeNetworkState(NETWORK_NO_SOURCE);
       ChangeDelayLoadStatus(false);
@@ -3162,119 +3136,267 @@ void HTMLMediaElement::SetMuted(bool aMu
 
   DispatchAsyncEvent(NS_LITERAL_STRING("volumechange"));
 
   // We allow inaudible autoplay. But changing our mute status may make this
   // media audible. So pause if we are no longer supposed to be autoplaying.
   PauseIfShouldNotBePlaying();
 }
 
+void HTMLMediaElement::GetAllEnabledMediaTracks(
+    nsTArray<RefPtr<MediaTrack>>& aTracks) {
+  if (AudioTrackList* tracks = AudioTracks()) {
+    for (size_t i = 0; i < tracks->Length(); ++i) {
+      AudioTrack* track = (*tracks)[i];
+      if (track->Enabled()) {
+        aTracks.AppendElement(track);
+      }
+    }
+  }
+  if (IsVideo()) {
+    if (VideoTrackList* tracks = VideoTracks()) {
+      for (size_t i = 0; i < tracks->Length(); ++i) {
+        VideoTrack* track = (*tracks)[i];
+        if (track->Selected()) {
+          aTracks.AppendElement(track);
+        }
+      }
+    }
+  }
+}
+
 void HTMLMediaElement::SetCapturedOutputStreamsEnabled(bool aEnabled) {
-  for (OutputMediaStream& ms : mOutputStreams) {
-    if (ms.mCapturingDecoder) {
-      MOZ_ASSERT(!ms.mCapturingMediaStream);
-      continue;
-    }
-    for (auto pair : ms.mTracks) {
-      static_cast<StreamCaptureTrackSource*>(pair.second().get())
-          ->SetEnabled(aEnabled);
-
-      LOG(LogLevel::Debug, ("%s track %p for captured MediaStream %p",
-                            aEnabled ? "Enabled" : "Disabled",
-                            pair.second().get(), ms.mStream.get()));
-    }
-  }
-}
-
-void HTMLMediaElement::AddCaptureMediaTrackToOutputStream(
-    dom::MediaTrack* aTrack, OutputMediaStream& aOutputStream,
-    bool aAsyncAddtrack) {
-  if (aOutputStream.mCapturingDecoder) {
-    MOZ_ASSERT(!aOutputStream.mCapturingMediaStream);
-    return;
-  }
-  aOutputStream.mCapturingMediaStream = true;
-
+  for (auto& entry : mOutputTrackSources) {
+    entry.GetData()->SetEnabled(aEnabled);
+  }
+}
+
+void HTMLMediaElement::AddOutputTrackSourceToOutputStream(
+    MediaElementTrackSource* aSource, OutputMediaStream& aOutputStream,
+    AddTrackMode aMode) {
   if (aOutputStream.mStream == mSrcStream) {
     // Cycle detected. This can happen since tracks are added async.
     // We avoid forwarding it to the output here or we'd get into an infloop.
-    return;
-  }
-
-  if (!aTrack) {
-    MOZ_ASSERT(false, "Bad MediaTrack");
+    LOG(LogLevel::Warning,
+        ("NOT adding output track source %p to output stream "
+         "%p -- cycle detected",
+         aSource, aOutputStream.mStream.get()));
     return;
   }
 
-  MediaStreamTrack* inputTrack = mSrcStream->GetTrackById(aTrack->GetId());
-  MOZ_ASSERT(inputTrack);
-  if (!inputTrack) {
-    NS_ERROR("Input track not found in source stream");
-    return;
-  }
-  MOZ_DIAGNOSTIC_ASSERT(!inputTrack->Ended());
-
+  LOG(LogLevel::Debug, ("Adding output track source %p to output stream %p",
+                        aSource, aOutputStream.mStream.get()));
+
+  RefPtr<MediaStreamTrack> domTrack;
+  if (aSource->Track()->mType == MediaSegment::AUDIO) {
+    domTrack = new AudioStreamTrack(aOutputStream.mStream->GetParentObject(),
+                                    aSource->Track(), aSource);
+  } else {
+    domTrack = new VideoStreamTrack(aOutputStream.mStream->GetParentObject(),
+                                    aSource->Track(), aSource);
+  }
+
+  switch (aMode) {
+    case AddTrackMode::ASYNC:
+      mMainThreadEventTarget->Dispatch(
+          NewRunnableMethod<StoreRefPtrPassByPtr<MediaStreamTrack>>(
+              "DOMMediaStream::AddTrackInternal", aOutputStream.mStream,
+              &DOMMediaStream::AddTrackInternal, domTrack));
+      break;
+    case AddTrackMode::SYNC:
+      aOutputStream.mStream->AddTrackInternal(domTrack);
+      break;
+    default:
+      MOZ_CRASH("Unexpected mode");
+  }
+
+  LOG(LogLevel::Debug,
+      ("Created capture %s track %p",
+       domTrack->AsAudioStreamTrack() ? "audio" : "video", domTrack.get()));
+}
+
+void HTMLMediaElement::UpdateOutputTrackSources() {
+  // This updates the track sources in mOutputTrackSources so they're in sync
+  // with the tracks being currently played, and state saying whether we should
+  // be capturing tracks. This method is long so here is a breakdown:
+  // - Figure out the tracks that should be captured
+  // - Diff those against currently captured tracks (mOutputTrackSources), into
+  //   tracks-to-add, and tracks-to-remove
+  // - Remove the tracks in tracks-to-remove and dispatch "removetrack" and
+  //   "ended" events for them
+  // - If playback has ended, or there is no longer a media provider object,
+  //   remove any OutputMediaStreams that have the finish-when-ended flag set
+  // - Create track sources for, and add to OutputMediaStreams, the tracks in
+  //   tracks-to-add
+
+  const bool shouldHaveTrackSources = mTracksCaptured.Ref() &&
+                                      !IsPlaybackEnded() &&
+                                      mReadyState >= HAVE_METADATA;
+
+  // Add track sources for all enabled/selected MediaTracks.
   nsPIDOMWindowInner* window = OwnerDoc()->GetInnerWindow();
   if (!window) {
     return;
   }
 
-  MediaSegment::Type type = inputTrack->AsAudioStreamTrack()
-                                ? MediaSegment::AUDIO
-                                : MediaSegment::VIDEO;
-  ProcessedMediaTrack* track =
-      inputTrack->Graph()->CreateForwardedInputTrack(type);
-  RefPtr<MediaInputPort> port = inputTrack->ForwardTrackContentsTo(track);
-  auto source = MakeRefPtr<StreamCaptureTrackSource>(&inputTrack->GetSource(),
-                                                     track, port);
-
-  // Track is muted initially, so we don't leak data if it's added while paused
-  // and an MTG iteration passes before the mute comes into effect.
-  source->SetEnabled(mSrcStreamIsPlaying);
-
-  RefPtr<MediaStreamTrack> domTrack;
-  if (inputTrack->AsAudioStreamTrack()) {
-    domTrack = new AudioStreamTrack(window, track, source);
-  } else {
-    domTrack = new VideoStreamTrack(window, track, source);
-  }
-
-  aOutputStream.mTracks.AppendElement(
-      Pair<nsString, RefPtr<MediaStreamTrackSource>>(aTrack->GetId(),
-                                                     source.get()));
-
-  if (aAsyncAddtrack) {
+  if (mDecoder) {
+    mDecoder->SetOutputCaptured(mTracksCaptured.Ref());
+  }
+
+  // Start with all MediaTracks
+  AutoTArray<RefPtr<MediaTrack>, 4> mediaTracksToAdd;
+  if (shouldHaveTrackSources) {
+    GetAllEnabledMediaTracks(mediaTracksToAdd);
+  }
+
+  // ...and all MediaElementTrackSources.
+  AutoTArray<nsString, 4> trackSourcesToRemove;
+  for (const auto& entry : mOutputTrackSources) {
+    trackSourcesToRemove.AppendElement(entry.GetKey());
+  }
+
+  // Then work out the differences.
+  for (const auto& track :
+       AutoTArray<RefPtr<MediaTrack>, 4>(mediaTracksToAdd)) {
+    if (mOutputTrackSources.GetWeak(track->GetId())) {
+      mediaTracksToAdd.RemoveElement(track);
+      trackSourcesToRemove.RemoveElement(track->GetId());
+    }
+  }
+
+  // First remove stale track sources.
+  for (const auto& id : trackSourcesToRemove) {
+    RefPtr<MediaElementTrackSource> source = mOutputTrackSources.GetWeak(id);
+
+    LOG(LogLevel::Debug, ("Removing output track source %p for track %s",
+                          source.get(), NS_ConvertUTF16toUTF8(id).get()));
+
+    if (mDecoder) {
+      mDecoder->RemoveOutputTrack(source->Track());
+    }
+
+    // The source of this track just ended. Force-notify that it ended.
+    // If we bounce it to the MediaTrackGraph it might not be picked up,
+    // for instance if the MediaInputPort was destroyed in the same
+    // iteration as it was added.
     mMainThreadEventTarget->Dispatch(
-        NewRunnableMethod<StoreRefPtrPassByPtr<MediaStreamTrack>>(
-            "DOMMediaStream::AddTrackInternal", aOutputStream.mStream,
-            &DOMMediaStream::AddTrackInternal, domTrack));
-  } else {
-    aOutputStream.mStream->AddTrackInternal(domTrack);
-  }
-
-  LOG(LogLevel::Debug,
-      ("Created %s track %p from track %p through MediaInputPort %p",
-       inputTrack->AsAudioStreamTrack() ? "audio" : "video", domTrack.get(),
-       inputTrack, port.get()));
-}
-
-void HTMLMediaElement::DiscardFinishWhenEndedOutputStreams() {
-  // Discard all output streams that have finished now.
+        NewRunnableMethod("MediaElementTrackSource::OverrideEnded", source,
+                          &MediaElementTrackSource::OverrideEnded));
+
+    mOutputTrackSources.Remove(id);
+  }
+
+  // Then update finish-when-ended output streams as needed.
   for (int32_t i = mOutputStreams.Length() - 1; i >= 0; --i) {
     if (!mOutputStreams[i].mFinishWhenEnded) {
       continue;
     }
+
+    if (!mOutputStreams[i].mFinishWhenEndedLoadingSrc &&
+        !mOutputStreams[i].mFinishWhenEndedAttrStream) {
+      // This finish-when-ended stream has not seen any source loaded yet.
+      // Update the loading src if it's time.
+      if (!IsPlaybackEnded()) {
+        if (mLoadingSrc) {
+          mOutputStreams[i].mFinishWhenEndedLoadingSrc = mLoadingSrc;
+        } else if (mSrcAttrStream) {
+          mOutputStreams[i].mFinishWhenEndedAttrStream = mSrcAttrStream;
+        }
+      }
+      continue;
+    }
+
+    // Discard finish-when-ended output streams with a loading src set as
+    // needed.
+    if (!IsPlaybackEnded() &&
+        mLoadingSrc == mOutputStreams[i].mFinishWhenEndedLoadingSrc) {
+      continue;
+    }
+    if (!IsPlaybackEnded() &&
+        mSrcAttrStream == mOutputStreams[i].mFinishWhenEndedAttrStream) {
+      continue;
+    }
     LOG(LogLevel::Debug,
-        ("Playback ended. Letting output stream %p go inactive",
+        ("Playback ended or source changed. Discarding stream %p",
          mOutputStreams[i].mStream.get()));
-    mOutputStreams[i].mStream->SetFinishedOnInactive(true);
-    if (mOutputStreams[i].mCapturingDecoder) {
-      mDecoder->RemoveOutputStream(mOutputStreams[i].mStream);
-    }
     mOutputStreams.RemoveElementAt(i);
+    if (mOutputStreams.IsEmpty()) {
+      mTracksCaptured = nullptr;
+    }
+  }
+
+  // Finally add new MediaTracks.
+  for (const auto& mediaTrack : mediaTracksToAdd) {
+    nsAutoString id;
+    mediaTrack->GetId(id);
+
+    MediaSegment::Type type;
+    if (mediaTrack->AsAudioTrack()) {
+      type = MediaSegment::AUDIO;
+    } else if (mediaTrack->AsVideoTrack()) {
+      type = MediaSegment::VIDEO;
+    } else {
+      MOZ_CRASH("Unknown track type");
+    }
+
+    RefPtr<ProcessedMediaTrack> track;
+    RefPtr<MediaElementTrackSource> source;
+    if (mDecoder) {
+      track = mTracksCaptured.Ref()->mTrack->Graph()->CreateForwardedInputTrack(
+          type);
+      RefPtr<nsIPrincipal> principal = GetCurrentPrincipal();
+      if (!principal || IsCORSSameOrigin()) {
+        principal = NodePrincipal();
+      }
+      source = MakeAndAddRef<MediaElementTrackSource>(track, principal);
+      mDecoder->AddOutputTrack(track);
+    } else if (mSrcStream) {
+      MediaStreamTrack* inputTrack;
+      if (AudioTrack* t = mediaTrack->AsAudioTrack()) {
+        inputTrack = t->GetAudioStreamTrack();
+      } else if (VideoTrack* t = mediaTrack->AsVideoTrack()) {
+        inputTrack = t->GetVideoStreamTrack();
+      } else {
+        MOZ_CRASH("Unknown track type");
+      }
+      MOZ_ASSERT(inputTrack);
+      if (!inputTrack) {
+        NS_ERROR("Input track not found in source stream");
+        return;
+      }
+      MOZ_DIAGNOSTIC_ASSERT(!inputTrack->Ended());
+
+      track = inputTrack->Graph()->CreateForwardedInputTrack(type);
+      RefPtr<MediaInputPort> port = inputTrack->ForwardTrackContentsTo(track);
+      source = MakeAndAddRef<MediaElementTrackSource>(&inputTrack->GetSource(),
+                                                      track, port);
+
+      // Track is muted initially, so we don't leak data if it's added while
+      // paused and an MTG iteration passes before the mute comes into effect.
+      source->SetEnabled(mSrcStreamIsPlaying);
+    } else {
+      MOZ_CRASH("Unknown source");
+    }
+
+    LOG(LogLevel::Debug, ("Adding output track source %p for track %s",
+                          source.get(), NS_ConvertUTF16toUTF8(id).get()));
+
+    track->QueueSetAutoend(false);
+    MOZ_DIAGNOSTIC_ASSERT(!mOutputTrackSources.GetWeak(id));
+    mOutputTrackSources.Put(id, source);
+
+    // Add the new track source to any existing output streams
+    for (OutputMediaStream& ms : mOutputStreams) {
+      if (source->Track()->mType == MediaSegment::VIDEO &&
+          ms.mCapturingAudioOnly) {
+        // If the output stream is for audio only we ignore video sources.
+        continue;
+      }
+      AddOutputTrackSourceToOutputStream(source, ms);
+    }
   }
 }
 
 bool HTMLMediaElement::CanBeCaptured(StreamCaptureType aCaptureType) {
   // Don't bother capturing when the document has gone away
   nsPIDOMWindowInner* window = OwnerDoc()->GetInnerWindow();
   if (!window) {
     return false;
@@ -3286,91 +3408,89 @@ bool HTMLMediaElement::CanBeCaptured(Str
     return false;
   }
   return true;
 }
 
 already_AddRefed<DOMMediaStream> HTMLMediaElement::CaptureStreamInternal(
     StreamCaptureBehavior aFinishBehavior, StreamCaptureType aStreamCaptureType,
     MediaTrackGraph* aGraph) {
-  MOZ_RELEASE_ASSERT(aGraph);
   MOZ_ASSERT(CanBeCaptured(aStreamCaptureType));
 
   MarkAsContentSource(CallerAPI::CAPTURE_STREAM);
   MarkAsTainted();
 
-  // We don't support routing to a different graph.
-  if (!mOutputStreams.IsEmpty() &&
-      aGraph != mOutputStreams[0].mGraphKeepAliveDummyStream->mTrack->Graph()) {
+  if (mTracksCaptured.Ref() &&
+      aGraph != mTracksCaptured.Ref()->mTrack->Graph()) {
     return nullptr;
   }
 
-  OutputMediaStream* out = mOutputStreams.AppendElement();
+  if (!mTracksCaptured.Ref()) {
+    // This is the first output stream, or there are no tracks. If the former,
+    // start capturing all tracks. If the latter, they will be added later.
+    mTracksCaptured = MakeRefPtr<SharedDummyTrack>(
+        aGraph->CreateSourceTrack(MediaSegment::AUDIO));
+    UpdateOutputTrackSources();
+  }
+
   nsPIDOMWindowInner* window = OwnerDoc()->GetInnerWindow();
-  out->mGraphKeepAliveDummyStream =
-      mOutputStreams.Length() == 1
-          ? MakeRefPtr<SharedDummyTrack>(
-                aGraph->CreateSourceTrack(MediaSegment::AUDIO))
-          : mOutputStreams[0].mGraphKeepAliveDummyStream;
-  out->mStream = MakeAndAddRef<DOMMediaStream>(window);
-  out->mStream->SetFinishedOnInactive(false);
-  out->mFinishWhenEnded =
-      aFinishBehavior == StreamCaptureBehavior::FINISH_WHEN_ENDED;
-  out->mCapturingAudioOnly =
-      aStreamCaptureType == StreamCaptureType::CAPTURE_AUDIO;
+  OutputMediaStream* out = mOutputStreams.AppendElement(OutputMediaStream(
+      MakeRefPtr<DOMMediaStream>(window),
+      aStreamCaptureType == StreamCaptureType::CAPTURE_AUDIO,
+      aFinishBehavior == StreamCaptureBehavior::FINISH_WHEN_ENDED));
+
+  if (aFinishBehavior == StreamCaptureBehavior::FINISH_WHEN_ENDED &&
+      !mOutputTrackSources.IsEmpty()) {
+    // This output stream won't receive any more tracks when playback of the
+    // current src of this media element ends, or when the src of this media
+    // element changes. If we're currently playing something (i.e., if there are
+    // tracks currently captured), set the current src on the output stream so
+    // this can be tracked. If we're not playing anything,
+    // UpdateOutputTrackSources will set the current src when it becomes
+    // available later.
+    if (mLoadingSrc) {
+      out->mFinishWhenEndedLoadingSrc = mLoadingSrc;
+    }
+    if (mSrcAttrStream) {
+      out->mFinishWhenEndedAttrStream = mSrcAttrStream;
+    }
+    MOZ_ASSERT(out->mFinishWhenEndedLoadingSrc ||
+               out->mFinishWhenEndedAttrStream);
+  }
 
   if (aStreamCaptureType == StreamCaptureType::CAPTURE_AUDIO) {
     if (mSrcStream) {
       // We don't support applying volume and mute to the captured stream, when
       // capturing a MediaStream.
       ReportToConsole(nsIScriptError::errorFlag,
                       "MediaElementAudioCaptureOfMediaStreamError");
     }
 
     // mAudioCaptured tells the user that the audio played by this media element
     // is being routed to the captureStreams *instead* of being played to
     // speakers.
     mAudioCaptured = true;
   }
 
-  if (mDecoder) {
-    out->mCapturingDecoder = true;
-    mDecoder->AddOutputStream(out->mStream, out->mGraphKeepAliveDummyStream);
-  } else if (mSrcStream) {
-    out->mCapturingMediaStream = true;
-  }
-
-  if (mReadyState == HAVE_NOTHING) {
-    // Do not expose the tracks until we have metadata.
-    RefPtr<DOMMediaStream> result = out->mStream;
-    return result.forget();
-  }
-
-  if (mSrcStream) {
-    MOZ_DIAGNOSTIC_ASSERT(AudioTracks(), "Element can't have been unlinked");
-    for (size_t i = 0; i < AudioTracks()->Length(); ++i) {
-      AudioTrack* t = (*AudioTracks())[i];
-      if (t->Enabled()) {
-        AddCaptureMediaTrackToOutputStream(t, *out, false);
-      }
-    }
-    if (IsVideo() && !out->mCapturingAudioOnly) {
-      MOZ_DIAGNOSTIC_ASSERT(VideoTracks(), "Element can't have been unlinked");
+  for (const auto& entry : mOutputTrackSources) {
+    const RefPtr<MediaElementTrackSource>& source = entry.GetData();
+    if (source->Track()->mType == MediaSegment::VIDEO) {
       // Only add video tracks if we're a video element and the output stream
       // wants video.
-      for (size_t i = 0; i < VideoTracks()->Length(); ++i) {
-        VideoTrack* t = (*VideoTracks())[i];
-        if (t->Selected()) {
-          AddCaptureMediaTrackToOutputStream(t, *out, false);
-        }
+      if (!IsVideo()) {
+        continue;
+      }
+      if (out->mCapturingAudioOnly) {
+        continue;
       }
     }
-  }
-  RefPtr<DOMMediaStream> result = out->mStream;
-  return result.forget();
+    AddOutputTrackSourceToOutputStream(source, *out, AddTrackMode::SYNC);
+  }
+
+  return do_AddRef(out->mStream);
 }
 
 already_AddRefed<DOMMediaStream> HTMLMediaElement::CaptureAudio(
     ErrorResult& aRv, MediaTrackGraph* aGraph) {
   MOZ_RELEASE_ASSERT(aGraph);
 
   if (!CanBeCaptured(StreamCaptureType::CAPTURE_AUDIO)) {
     aRv.Throw(NS_ERROR_FAILURE);
@@ -3646,17 +3766,17 @@ HTMLMediaElement::HTMLMediaElement(
     : nsGenericHTMLElement(std::move(aNodeInfo)),
       mWatchManager(this,
                     OwnerDoc()->AbstractMainThreadFor(TaskCategory::Other)),
       mMainThreadEventTarget(OwnerDoc()->EventTargetFor(TaskCategory::Other)),
       mAbstractMainThread(
           OwnerDoc()->AbstractMainThreadFor(TaskCategory::Other)),
       mShutdownObserver(new ShutdownObserver),
       mPlayed(new TimeRanges(ToSupports(OwnerDoc()))),
-      mPaused(true, "HTMLMediaElement::mPaused"),
+      mTracksCaptured(nullptr, "HTMLMediaElement::mTracksCaptured"),
       mErrorSink(new ErrorSink(this)),
       mAudioChannelWrapper(new AudioChannelAgentCallback(this)),
       mSink(MakePair(nsString(), RefPtr<AudioDeviceInfo>())),
       mShowPoster(IsVideo()) {
   MOZ_ASSERT(mMainThreadEventTarget);
   MOZ_ASSERT(mAbstractMainThread);
   // Please don't add anything to this constructor or the initialization
   // list that can cause AddRef to be called. This prevents subclasses
@@ -3674,16 +3794,27 @@ void HTMLMediaElement::Init() {
 
   mAudioTrackList = new AudioTrackList(OwnerDoc()->GetParentObject(), this);
   mVideoTrackList = new VideoTrackList(OwnerDoc()->GetParentObject(), this);
 
   DecoderDoctorLogger::LogConstruction(this);
 
   mWatchManager.Watch(mPaused, &HTMLMediaElement::UpdateWakeLock);
 
+  mWatchManager.Watch(mTracksCaptured,
+                      &HTMLMediaElement::UpdateOutputTrackSources);
+  mWatchManager.Watch(mReadyState, &HTMLMediaElement::UpdateOutputTrackSources);
+
+  mWatchManager.Watch(mDownloadSuspendedByCache,
+                      &HTMLMediaElement::UpdateReadyStateInternal);
+  mWatchManager.Watch(mFirstFrameLoaded,
+                      &HTMLMediaElement::UpdateReadyStateInternal);
+  mWatchManager.Watch(mSrcStreamPlaybackEnded,
+                      &HTMLMediaElement::UpdateReadyStateInternal);
+
   ErrorResult rv;
 
   double defaultVolume = Preferences::GetFloat("media.default_volume", 1.0);
   SetVolume(defaultVolume, rv);
 
   RegisterActivityObserver();
   NotifyOwnerDocumentActivityChanged();
 
@@ -3699,16 +3830,18 @@ void HTMLMediaElement::Init() {
 
 HTMLMediaElement::~HTMLMediaElement() {
   MOZ_ASSERT(mInitialized,
              "HTMLMediaElement must be initialized before it is destroyed.");
   NS_ASSERTION(
       !mHasSelfReference,
       "How can we be destroyed if we're still holding a self reference?");
 
+  mWatchManager.Shutdown();
+
   mShutdownObserver->Unsubscribe();
 
   if (mVideoFrameContainer) {
     mVideoFrameContainer->ForgetElement();
   }
   UnregisterActivityObserver();
 
   mSetCDMRequest.DisconnectIfExists();
@@ -4041,24 +4174,16 @@ void HTMLMediaElement::ReleaseAudioWakeL
     mWakeLock->Unlock(rv);
     rv.SuppressException();
     mWakeLock = nullptr;
   }
 }
 
 void HTMLMediaElement::WakeLockRelease() { ReleaseAudioWakeLockIfExists(); }
 
-HTMLMediaElement::OutputMediaStream::OutputMediaStream()
-    : mFinishWhenEnded(false),
-      mCapturingAudioOnly(false),
-      mCapturingDecoder(false),
-      mCapturingMediaStream(false) {}
-
-HTMLMediaElement::OutputMediaStream::~OutputMediaStream() = default;
-
 void HTMLMediaElement::GetEventTargetParent(EventChainPreVisitor& aVisitor) {
   if (!this->Controls() || !aVisitor.mEvent->mFlags.mIsTrusted) {
     nsGenericHTMLElement::GetEventTargetParent(aVisitor);
     return;
   }
 
   HTMLInputElement* el = nullptr;
   nsCOMPtr<nsINode> node;
@@ -4641,26 +4766,16 @@ nsresult HTMLMediaElement::FinishDecoder
                [](const GenericPromise::ResolveOrRejectValue& aValue) {
                  MOZ_ASSERT(aValue.IsResolve() && !aValue.ResolveValue());
                });
 #else
         ;
 #endif
   }
 
-  for (OutputMediaStream& ms : mOutputStreams) {
-    if (ms.mCapturingMediaStream) {
-      MOZ_ASSERT(!ms.mCapturingDecoder);
-      continue;
-    }
-
-    ms.mCapturingDecoder = true;
-    aDecoder->AddOutputStream(ms.mStream, ms.mGraphKeepAliveDummyStream);
-  }
-
   if (mMediaKeys) {
     if (mMediaKeys->GetCDMProxy()) {
       mDecoder->SetCDMProxy(mMediaKeys->GetCDMProxy());
     } else {
       // CDM must have crashed.
       ShutdownDecoder();
       return NS_ERROR_FAILURE;
     }
@@ -4784,17 +4899,16 @@ class HTMLMediaElement::MediaStreamTrack
 
     if (mElement->IsPlaybackEnded()) {
       return;
     }
     LOG(LogLevel::Debug, ("%p, mSrcStream %p became inactive", mElement.get(),
                           mElement->mSrcStream.get()));
 
     mElement->PlaybackEnded();
-    mElement->UpdateReadyStateInternal();
   }
 
   void NotifyInactive() override {
     if (!mElement) {
       return;
     }
 
     if (!mElement->IsVideo()) {
@@ -4972,28 +5086,20 @@ void HTMLMediaElement::EndSrcMediaStream
     mWatchManager.Unwatch(mMediaStreamRenderer->CurrentGraphTime(),
                           &HTMLMediaElement::UpdateSrcStreamTime);
     mMediaStreamRenderer->Shutdown();
     mMediaStreamRenderer = nullptr;
   }
 
   mSrcStream->UnregisterTrackListener(mMediaStreamTrackListener.get());
   mMediaStreamTrackListener = nullptr;
-  mSrcStreamTracksAvailable = false;
   mSrcStreamPlaybackEnded = false;
   mSrcStreamReportPlaybackEnded = false;
   mSrcStreamVideoPrincipal = nullptr;
 
-#ifdef DEBUG
-  for (OutputMediaStream& ms : mOutputStreams) {
-    // These tracks were removed by clearing AudioTracks() and VideoTracks().
-    MOZ_ASSERT(ms.mTracks.IsEmpty());
-  }
-#endif
-
   mSrcStream = nullptr;
 }
 
 static already_AddRefed<AudioTrack> CreateAudioTrack(
     AudioStreamTrack* aStreamTrack, nsIGlobalObject* aOwnerGlobal) {
   nsAutoString id;
   nsAutoString label;
   aStreamTrack->GetId(id);
@@ -5020,17 +5126,17 @@ void HTMLMediaElement::NotifyMediaStream
     const RefPtr<MediaStreamTrack>& aTrack) {
   MOZ_ASSERT(aTrack);
 
   if (aTrack->Ended()) {
     return;
   }
 
 #ifdef DEBUG
-  nsString id;
+  nsAutoString id;
   aTrack->GetId(id);
 
   LOG(LogLevel::Debug, ("%p, Adding %sTrack with id %s", this,
                         aTrack->AsAudioStreamTrack() ? "Audio" : "Video",
                         NS_ConvertUTF16toUTF8(id).get()));
 #endif
 
   if (AudioStreamTrack* t = aTrack->AsAudioStreamTrack()) {
@@ -5050,35 +5156,21 @@ void HTMLMediaElement::NotifyMediaStream
     // New MediaStreamTrack added, set the new added video track as selected
     // video track when there is no selected track.
     if (VideoTracks()->SelectedIndex() == -1) {
       MOZ_ASSERT(!mSelectedVideoStreamTrack);
       videoTrack->SetEnabledInternal(true, dom::MediaTrack::FIRE_NO_EVENTS);
     }
   }
 
-  UpdateReadyStateInternal();
-
-  if (!mSrcStreamTracksAvailable) {
-    mAbstractMainThread->Dispatch(NS_NewRunnableFunction(
-        "HTMLMediaElement::NotifyMediaStreamTrackAdded->FirstFrameLoaded",
-        [this, self = RefPtr<HTMLMediaElement>(this), stream = mSrcStream]() {
-          if (!mSrcStream || mSrcStream != stream) {
-            return;
-          }
-
-          LOG(LogLevel::Debug,
-              ("MediaElement %p MediaStream tracks available", this));
-
-          mSrcStreamTracksAvailable = true;
-
-          FirstFrameLoaded();
-          UpdateReadyStateInternal();
-        }));
-  }
+  // The set of enabled AudioTracks and selected video track might have changed.
+  mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
+  mAbstractMainThread->TailDispatcher().AddDirectTask(
+      NewRunnableMethod("HTMLMediaElement::FirstFrameLoaded", this,
+                        &HTMLMediaElement::FirstFrameLoaded));
 }
 
 void HTMLMediaElement::NotifyMediaStreamTrackRemoved(
     const RefPtr<MediaStreamTrack>& aTrack) {
   MOZ_ASSERT(aTrack);
 
   nsAutoString id;
   aTrack->GetId(id);
@@ -5114,24 +5206,32 @@ void HTMLMediaElement::ProcessMediaFragm
     mFragmentStart = parser.GetStartTime();
   }
 }
 
 void HTMLMediaElement::MetadataLoaded(const MediaInfo* aInfo,
                                       UniquePtr<const MetadataTags> aTags) {
   MOZ_ASSERT(NS_IsMainThread());
 
+  if (mDecoder) {
+    ConstructMediaTracks(aInfo);
+  }
+
   SetMediaInfo(*aInfo);
 
   mIsEncrypted =
       aInfo->IsEncrypted() || mPendingEncryptedInitData.IsEncrypted();
   mTags = std::move(aTags);
   mLoadedDataFired = false;
   ChangeReadyState(HAVE_METADATA);
 
+  // Add output tracks synchronously now to be sure they're available in
+  // "loadedmetadata" event handlers.
+  UpdateOutputTrackSources();
+
   DispatchAsyncEvent(NS_LITERAL_STRING("durationchange"));
   if (IsVideo() && HasVideo()) {
     DispatchAsyncEvent(NS_LITERAL_STRING("resize"));
   }
   NS_ASSERTION(!HasVideo() || (mMediaInfo.mVideo.mDisplay.width > 0 &&
                                mMediaInfo.mVideo.mDisplay.height > 0),
                "Video resolution must be known on 'loadedmetadata'");
   DispatchAsyncEvent(NS_LITERAL_STRING("loadedmetadata"));
@@ -5162,54 +5262,28 @@ void HTMLMediaElement::MetadataLoaded(co
     NotifyOwnerDocumentActivityChanged();
   }
 
   if (mDefaultPlaybackStartPosition != 0.0) {
     SetCurrentTime(mDefaultPlaybackStartPosition);
     mDefaultPlaybackStartPosition = 0.0;
   }
 
-  UpdateReadyStateInternal();
-
-  if (!mSrcStream) {
-    return;
-  }
-
-  for (OutputMediaStream& ms : mOutputStreams) {
-    if (AudioTracks()) {
-      for (size_t i = 0; i < AudioTracks()->Length(); ++i) {
-        AudioTrack* t = (*AudioTracks())[i];
-        if (t->Enabled()) {
-          AddCaptureMediaTrackToOutputStream(t, ms);
-        }
-      }
-    }
-    if (VideoTracks() && IsVideo() && !ms.mCapturingAudioOnly) {
-      // Only add video tracks if we're a video element and the output stream
-      // wants video.
-      for (size_t i = 0; i < VideoTracks()->Length(); ++i) {
-        VideoTrack* t = (*VideoTracks())[i];
-        if (t->Selected()) {
-          AddCaptureMediaTrackToOutputStream(t, ms);
-        }
-      }
-    }
-  }
+  mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
 }
 
 void HTMLMediaElement::FirstFrameLoaded() {
   LOG(LogLevel::Debug,
       ("%p, FirstFrameLoaded() mFirstFrameLoaded=%d mWaitingForKey=%d", this,
-       mFirstFrameLoaded, mWaitingForKey));
+       mFirstFrameLoaded.Ref(), mWaitingForKey));
 
   NS_ASSERTION(!mSuspendedAfterFirstFrame, "Should not have already suspended");
 
   if (!mFirstFrameLoaded) {
     mFirstFrameLoaded = true;
-    UpdateReadyStateInternal();
   }
 
   ChangeDelayLoadStatus(false);
 
   if (mDecoder && mAllowSuspendAfterFirstFrame && mPaused &&
       !HasAttr(kNameSpaceID_None, nsGkAtoms::autoplay) &&
       mPreloadAction == HTMLMediaElement::PRELOAD_METADATA) {
     mSuspendedAfterFirstFrame = true;
@@ -5229,22 +5303,16 @@ void HTMLMediaElement::DecodeError(const
   nsAutoString src;
   GetCurrentSrc(src);
   AutoTArray<nsString, 1> params = {src};
   ReportLoadError("MediaLoadDecodeError", params);
 
   DecoderDoctorDiagnostics diagnostics;
   diagnostics.StoreDecodeError(OwnerDoc(), aError, src, __func__);
 
-  if (AudioTracks()) {
-    AudioTracks()->EmptyTracks();
-  }
-  if (VideoTracks()) {
-    VideoTracks()->EmptyTracks();
-  }
   if (mIsLoadingFromSourceChildren) {
     mErrorSink->ResetError();
     if (mSourceLoadCandidate) {
       DispatchAsyncSourceError(mSourceLoadCandidate);
       QueueLoadFromSourceTask();
     } else {
       NS_WARNING("Should know the source we were loading from!");
     }
@@ -5275,17 +5343,18 @@ void HTMLMediaElement::Error(uint16_t aE
 
 void HTMLMediaElement::PlaybackEnded() {
   // We changed state which can affect AddRemoveSelfReference
   AddRemoveSelfReference();
 
   NS_ASSERTION(!mDecoder || mDecoder->IsEnded(),
                "Decoder fired ended, but not in ended state");
 
-  DiscardFinishWhenEndedOutputStreams();
+  // IsPlaybackEnded() became true.
+  mWatchManager.ManualNotify(&HTMLMediaElement::UpdateOutputTrackSources);
 
   if (mSrcStream) {
     LOG(LogLevel::Debug,
         ("%p, got duration by reaching the end of the resource", this));
     mSrcStreamPlaybackEnded = true;
     DispatchAsyncEvent(NS_LITERAL_STRING("durationchange"));
   } else {
     // mediacapture-main:
@@ -5357,17 +5426,16 @@ void HTMLMediaElement::SeekAborted() {
           promise->MaybeReject(NS_ERROR_DOM_ABORT_ERR);
         }));
   }
   MOZ_ASSERT(!mSeekDOMPromise);
 }
 
 void HTMLMediaElement::NotifySuspendedByCache(bool aSuspendedByCache) {
   mDownloadSuspendedByCache = aSuspendedByCache;
-  UpdateReadyStateInternal();
 }
 
 void HTMLMediaElement::DownloadSuspended() {
   if (mNetworkState == NETWORK_LOADING) {
     DispatchAsyncEvent(NS_LITERAL_STRING("progress"));
   }
   ChangeNetworkState(NETWORK_IDLE);
 }
@@ -5412,17 +5480,17 @@ void HTMLMediaElement::CheckProgress(boo
       // Were stalled.  Restart timer.
       StartProgressTimer();
       if (!mLoadedDataFired) {
         ChangeDelayLoadStatus(true);
       }
     }
     // Download statistics may have been updated, force a recheck of the
     // readyState.
-    UpdateReadyStateInternal();
+    mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
   }
 
   if (now - mDataTime >= TimeDuration::FromMilliseconds(STALL_MS)) {
     if (!mMediaSource) {
       DispatchAsyncEvent(NS_LITERAL_STRING("stalled"));
     } else {
       ChangeDelayLoadStatus(false);
     }
@@ -5505,24 +5573,22 @@ void HTMLMediaElement::UpdateReadyStateI
     // on its own thread before MetadataLoaded gets a chance to run.
     // The arrival of more data can't change us out of this readyState.
     LOG(LogLevel::Debug, ("MediaElement %p UpdateReadyStateInternal() "
                           "Decoder ready state < HAVE_METADATA",
                           this));
     return;
   }
 
+  if (mDecoder) {
+    // IsPlaybackEnded() might have become false.
+    mWatchManager.ManualNotify(&HTMLMediaElement::UpdateOutputTrackSources);
+  }
+
   if (mSrcStream && mReadyState < HAVE_METADATA) {
-    if (!mSrcStreamTracksAvailable) {
-      LOG(LogLevel::Debug, ("MediaElement %p UpdateReadyStateInternal() "
-                            "MediaStreamTracks not available yet",
-                            this));
-      return;
-    }
-
     bool hasAudioTracks = AudioTracks() && !AudioTracks()->IsEmpty();
     bool hasVideoTracks = VideoTracks() && !VideoTracks()->IsEmpty();
     if (!hasAudioTracks && !hasVideoTracks) {
       LOG(LogLevel::Debug, ("MediaElement %p UpdateReadyStateInternal() "
                             "Stream with no tracks",
                             this));
       // Give it one last chance to remove the self reference if needed.
       AddRemoveSelfReference();
@@ -6100,18 +6166,24 @@ already_AddRefed<nsIPrincipal> HTMLMedia
   }
   return nullptr;
 }
 
 void HTMLMediaElement::NotifyDecoderPrincipalChanged() {
   RefPtr<nsIPrincipal> principal = GetCurrentPrincipal();
   bool isSameOrigin = !principal || IsCORSSameOrigin();
   mDecoder->UpdateSameOriginStatus(isSameOrigin);
-  mDecoder->SetOutputStreamPrincipal(isSameOrigin ? NodePrincipal()
-                                                  : principal.get());
+
+  if (isSameOrigin) {
+    principal = NodePrincipal();
+  }
+  for (const auto& entry : mOutputTrackSources) {
+    entry.GetData()->SetPrincipal(principal);
+  }
+  mDecoder->SetOutputTracksPrincipal(principal);
 }
 
 void HTMLMediaElement::Invalidate(bool aImageSizeChanged,
                                   Maybe<nsIntSize>& aNewIntrinsicSize,
                                   bool aForceInvalidate) {
   nsIFrame* frame = GetPrimaryFrame();
   if (aNewIntrinsicSize) {
     UpdateMediaSize(aNewIntrinsicSize.value());
@@ -6142,17 +6214,17 @@ void HTMLMediaElement::UpdateMediaSize(c
   MOZ_ASSERT(NS_IsMainThread());
 
   if (IsVideo() && mReadyState != HAVE_NOTHING &&
       mMediaInfo.mVideo.mDisplay != aSize) {
     DispatchAsyncEvent(NS_LITERAL_STRING("resize"));
   }
 
   mMediaInfo.mVideo.mDisplay = aSize;
-  UpdateReadyStateInternal();
+  mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
 
   if (mFirstFrameListener) {
     mSelectedVideoStreamTrack->RemoveVideoOutput(mFirstFrameListener);
     // The first-frame listener won't be needed again for this stream.
     mFirstFrameListener = nullptr;
   }
 }
 
@@ -6885,17 +6957,19 @@ void HTMLMediaElement::NotifyWaitingForK
   // 7.3.4 Queue a "waitingforkey" Event
   // 1. Let the media element be the specified HTMLMediaElement object.
   // 2. If the media element's waiting for key value is true, abort these steps.
   if (mWaitingForKey == NOT_WAITING_FOR_KEY) {
     // 3. Set the media element's waiting for key value to true.
     // Note: algorithm continues in UpdateReadyStateInternal() when all decoded
     // data enqueued in the MDSM is consumed.
     mWaitingForKey = WAITING_FOR_KEY;
-    UpdateReadyStateInternal();
+    // mWaitingForKey changed outside of UpdateReadyStateInternal. This may
+    // affect mReadyState.
+    mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
   }
 }
 
 AudioTrackList* HTMLMediaElement::AudioTracks() { return mAudioTrackList; }
 
 VideoTrackList* HTMLMediaElement::VideoTracks() { return mVideoTrackList; }
 
 TextTrackList* HTMLMediaElement::GetTextTracks() {
@@ -6923,17 +6997,19 @@ TextTrackManager* HTMLMediaElement::GetO
   return mTextTrackManager;
 }
 
 MediaDecoderOwner::NextFrameStatus HTMLMediaElement::NextFrameStatus() {
   if (mDecoder) {
     return mDecoder->NextFrameStatus();
   }
   if (mSrcStream) {
-    if (mSrcStreamTracksAvailable && !mSrcStreamPlaybackEnded) {
+    AutoTArray<RefPtr<MediaTrack>, 4> tracks;
+    GetAllEnabledMediaTracks(tracks);
+    if (!tracks.IsEmpty() && !mSrcStreamPlaybackEnded) {
       return NEXT_FRAME_AVAILABLE;
     }
     return NEXT_FRAME_UNAVAILABLE;
   }
   return NEXT_FRAME_UNINITIALIZED;
 }
 
 void HTMLMediaElement::SetDecoder(MediaDecoder* aDecoder) {
@@ -7090,24 +7166,30 @@ void HTMLMediaElement::AudioCaptureTrack
     RefPtr<DOMMediaStream> stream =
         CaptureStreamInternal(StreamCaptureBehavior::CONTINUE_WHEN_ENDED,
                               StreamCaptureType::CAPTURE_AUDIO, mtg);
     mStreamWindowCapturer =
         MakeUnique<MediaStreamWindowCapturer>(stream, window->WindowID());
   } else if (!aCapture && mStreamWindowCapturer) {
     for (size_t i = 0; i < mOutputStreams.Length(); i++) {
       if (mOutputStreams[i].mStream == mStreamWindowCapturer->mStream) {
-        if (mOutputStreams[i].mCapturingDecoder && mDecoder) {
-          mDecoder->RemoveOutputStream(mOutputStreams[i].mStream);
+        // We own this MediaStream, it is not exposed to JS.
+        AutoTArray<RefPtr<MediaStreamTrack>, 2> tracks;
+        mStreamWindowCapturer->mStream->GetTracks(tracks);
+        for (auto& track : tracks) {
+          track->Stop();
         }
         mOutputStreams.RemoveElementAt(i);
         break;
       }
     }
     mStreamWindowCapturer = nullptr;
+    if (mOutputStreams.IsEmpty()) {
+      mTracksCaptured = nullptr;
+    }
   }
 }
 
 void HTMLMediaElement::NotifyCueDisplayStatesChanged() {
   if (!mTextTrackManager) {
     return;
   }
 
@@ -7236,22 +7318,20 @@ bool HTMLMediaElement::IsAudible() const
   if (mMuted || (std::fabs(Volume()) <= 1e-7)) {
     return false;
   }
 
   return mIsAudioTrackAudible;
 }
 
 void HTMLMediaElement::ConstructMediaTracks(const MediaInfo* aInfo) {
-  if (mMediaTracksConstructed || !aInfo) {
+  if (!aInfo) {
     return;
   }
 
-  mMediaTracksConstructed = true;
-
   AudioTrackList* audioList = AudioTracks();
   if (audioList && aInfo->HasAudio()) {
     const TrackInfo& info = aInfo->mAudio;
     RefPtr<AudioTrack> track = MediaTrackList::CreateAudioTrack(
         audioList->GetOwnerGlobal(), info.mId, info.mKind, info.mLabel,
         info.mLanguage, info.mEnabled);
 
     audioList->AddTrack(track);
@@ -7268,22 +7348,19 @@ void HTMLMediaElement::ConstructMediaTra
     track->SetEnabledInternal(info.mEnabled, MediaTrack::FIRE_NO_EVENTS);
   }
 }
 
 void HTMLMediaElement::RemoveMediaTracks() {
   if (mAudioTrackList) {
     mAudioTrackList->RemoveTracks();
   }
-
   if (mVideoTrackList) {
     mVideoTrackList->RemoveTracks();
   }
-
-  mMediaTracksConstructed = false;
 }
 
 class MediaElementGMPCrashHelper : public GMPCrashHelper {
  public:
   explicit MediaElementGMPCrashHelper(HTMLMediaElement* aElement)
       : mElement(aElement) {
     MOZ_ASSERT(NS_IsMainThread());  // WeakPtr isn't thread safe.
   }
--- a/dom/html/HTMLMediaElement.h
+++ b/dom/html/HTMLMediaElement.h
@@ -108,16 +108,36 @@ class HTMLMediaElement : public nsGeneri
  public:
   typedef mozilla::TimeStamp TimeStamp;
   typedef mozilla::layers::ImageContainer ImageContainer;
   typedef mozilla::VideoFrameContainer VideoFrameContainer;
   typedef mozilla::MediaResource MediaResource;
   typedef mozilla::MediaDecoderOwner MediaDecoderOwner;
   typedef mozilla::MetadataTags MetadataTags;
 
+  // Helper struct to keep track of the MediaStreams returned by
+  // mozCaptureStream(). For each OutputMediaStream, dom::MediaTracks get
+  // captured into MediaStreamTracks which get added to
+  // OutputMediaStream::mStream.
+  struct OutputMediaStream {
+    OutputMediaStream(RefPtr<DOMMediaStream> aStream, bool aCapturingAudioOnly,
+                      bool aFinishWhenEnded);
+    ~OutputMediaStream();
+
+    RefPtr<DOMMediaStream> mStream;
+    const bool mCapturingAudioOnly;
+    const bool mFinishWhenEnded;
+    // If mFinishWhenEnded is true, this is the URI of the first resource
+    // mStream got tracks for, if not a MediaStream.
+    nsCOMPtr<nsIURI> mFinishWhenEndedLoadingSrc;
+    // If mFinishWhenEnded is true, this is the first MediaStream mStream got
+    // tracks for, if not a resource.
+    RefPtr<DOMMediaStream> mFinishWhenEndedAttrStream;
+  };
+
   MOZ_DECLARE_WEAKREFERENCE_TYPENAME(HTMLMediaElement)
   NS_DECL_NSIMUTATIONOBSERVER_CONTENTREMOVED
 
   CORSMode GetCORSMode() { return mCORSMode; }
 
   explicit HTMLMediaElement(
       already_AddRefed<mozilla::dom::NodeInfo>&& aNodeInfo);
   void Init();
@@ -246,17 +266,19 @@ class HTMLMediaElement : public nsGeneri
   void PrincipalHandleChangedForVideoFrameContainer(
       VideoFrameContainer* aContainer,
       const PrincipalHandle& aNewPrincipalHandle) override;
 
   // Dispatch events
   void DispatchAsyncEvent(const nsAString& aName) final;
 
   // Triggers a recomputation of readyState.
-  void UpdateReadyState() override { UpdateReadyStateInternal(); }
+  void UpdateReadyState() override {
+    mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
+  }
 
   // Dispatch events that were raised while in the bfcache
   nsresult DispatchPendingMediaEvents();
 
   // Return true if we can activate autoplay assuming enough data has arrived.
   bool CanActivateAutoplay();
 
   // Notify that state has changed that might cause an autoplay element to
@@ -688,20 +710,16 @@ class HTMLMediaElement : public nsGeneri
     CREATE_PATTERN,
     CREATE_IMAGEBITMAP,
     CAPTURE_STREAM,
   };
   void MarkAsContentSource(CallerAPI aAPI);
 
   Document* GetDocument() const override;
 
-  void ConstructMediaTracks(const MediaInfo* aInfo) override;
-
-  void RemoveMediaTracks() override;
-
   already_AddRefed<GMPCrashHelper> CreateGMPCrashHelper() override;
 
   nsISerialEventTarget* MainThreadEventTarget() {
     return mMainThreadEventTarget;
   }
 
   // Set the sink id (of the output device) that the audio will play. If aSinkId
   // is empty the default device will be set.
@@ -724,47 +742,27 @@ class HTMLMediaElement : public nsGeneri
   bool IsAudible() const;
 
  protected:
   virtual ~HTMLMediaElement();
 
   class AudioChannelAgentCallback;
   class ChannelLoader;
   class ErrorSink;
+  class MediaElementTrackSource;
   class MediaLoadListener;
   class MediaStreamRenderer;
   class MediaStreamTrackListener;
   class FirstFrameListener;
   class ShutdownObserver;
-  class StreamCaptureTrackSource;
 
   MediaDecoderOwner::NextFrameStatus NextFrameStatus();
 
   void SetDecoder(MediaDecoder* aDecoder);
 
-  // Holds references to the DOM wrappers for the MediaStreams that we're
-  // writing to.
-  struct OutputMediaStream {
-    OutputMediaStream();
-    ~OutputMediaStream();
-
-    RefPtr<DOMMediaStream> mStream;
-    // Dummy stream to keep mGraph from shutting down when MediaDecoder shuts
-    // down. Shared across all OutputMediaStreams as one stream is enough to
-    // keep the graph alive.
-    RefPtr<SharedDummyTrack> mGraphKeepAliveDummyStream;
-    bool mFinishWhenEnded;
-    bool mCapturingAudioOnly;
-    bool mCapturingDecoder;
-    bool mCapturingMediaStream;
-
-    // The following members are keeping state for a captured MediaStream.
-    nsTArray<Pair<nsString, RefPtr<MediaStreamTrackSource>>> mTracks;
-  };
-
   void PlayInternal(bool aHandlingUserInput);
 
   /** Use this method to change the mReadyState member, so required
    * events can be fired.
    */
   void ChangeReadyState(nsMediaReadyState aState);
 
   /**
@@ -850,52 +848,59 @@ class HTMLMediaElement : public nsGeneri
 
   /**
    * Called by our DOMMediaStream::TrackListener when a MediaStreamTrack in
    * |mSrcStream|'s playback stream has ended.
    */
   void NotifyMediaStreamTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack);
 
   /**
+   * Convenience method to get in a single list all enabled AudioTracks and, if
+   * this is a video element, the selected VideoTrack.
+   */
+  void GetAllEnabledMediaTracks(nsTArray<RefPtr<MediaTrack>>& aTracks);
+
+  /**
    * Enables or disables all tracks forwarded from mSrcStream to all
    * OutputMediaStreams. We do this for muting the tracks when pausing,
    * and unmuting when playing the media element again.
-   *
-   * If mSrcStream is unset, this does nothing.
    */
   void SetCapturedOutputStreamsEnabled(bool aEnabled);
 
   /**
-   * Create a new MediaStreamTrack for aTrack and add it to the DOMMediaStream
-   * in aOutputStream. This automatically sets the output track to enabled or
-   * disabled depending on our current playing state.
+   * Create a new MediaStreamTrack for the TrackSource corresponding to aTrack
+   * and add it to the DOMMediaStream in aOutputStream. This automatically sets
+   * the output track to enabled or disabled depending on our current playing
+   * state.
    */
-  void AddCaptureMediaTrackToOutputStream(dom::MediaTrack* aTrack,
-                                          OutputMediaStream& aOutputStream,
-                                          bool aAsyncAddtrack = true);
+  enum class AddTrackMode { ASYNC, SYNC };
+  void AddOutputTrackSourceToOutputStream(
+      MediaElementTrackSource* aSource, OutputMediaStream& aOutputStream,
+      AddTrackMode aMode = AddTrackMode::ASYNC);
 
   /**
-   * Discard all output streams that are flagged to finish when playback ends.
+   * Creates output track sources when this media element is captured, tracks
+   * exist, playback is not ended and readyState is >= HAVE_METADATA.
    */
-  void DiscardFinishWhenEndedOutputStreams();
+  void UpdateOutputTrackSources();
 
   /**
    * Returns an DOMMediaStream containing the played contents of this
    * element. When aBehavior is FINISH_WHEN_ENDED, when this element ends
    * playback we will finish the stream and not play any more into it.  When
    * aType is CONTINUE_WHEN_ENDED, ending playback does not finish the stream.
    * The stream will never finish.
    *
    * When aType is CAPTURE_AUDIO, we stop playout of audio and instead route it
    * to the DOMMediaStream. Volume and mute state will be applied to the audio
    * reaching the stream. No video tracks will be captured in this case.
    */
   already_AddRefed<DOMMediaStream> CaptureStreamInternal(
-      StreamCaptureBehavior aBehavior, StreamCaptureType aType,
-      MediaTrackGraph* aGraph);
+      StreamCaptureBehavior aFinishBehavior,
+      StreamCaptureType aStreamCaptureType, MediaTrackGraph* aGraph);
 
   /**
    * Initialize a decoder as a clone of an existing decoder in another
    * element.
    * mLoadingSrc must already be set.
    */
   nsresult InitializeDecoderAsClone(ChannelMediaDecoder* aOriginal);
 
@@ -1244,16 +1249,28 @@ class HTMLMediaElement : public nsGeneri
   // and queues a task to resolve them also to dispatch a "playing" event.
   void NotifyAboutPlaying();
 
   already_AddRefed<Promise> CreateDOMPromise(ErrorResult& aRv) const;
 
   // Pass information for deciding the video decode mode to decoder.
   void NotifyDecoderActivityChanges() const;
 
+  // Constructs an AudioTrack in mAudioTrackList if aInfo reports that audio is
+  // available, and a VideoTrack in mVideoTrackList if aInfo reports that video
+  // is available.
+  void ConstructMediaTracks(const MediaInfo* aInfo);
+
+  // Removes all MediaTracks from mAudioTrackList and mVideoTrackList and fires
+  // "removetrack" on the lists accordingly.
+  // Note that by spec, this should not fire "removetrack". However, it appears
+  // other user agents do, per
+  // https://wpt.fyi/results/media-source/mediasource-avtracks.html.
+  void RemoveMediaTracks();
+
   // Mark the decoder owned by the element as tainted so that the
   // suspend-video-decoder is disabled.
   void MarkAsTainted();
 
   virtual nsresult AfterSetAttr(int32_t aNameSpaceID, nsAtom* aName,
                                 const nsAttrValue* aValue,
                                 const nsAttrValue* aOldValue,
                                 nsIPrincipal* aMaybeScriptedPrincipal,
@@ -1325,19 +1342,16 @@ class HTMLMediaElement : public nsGeneri
   // Holds a reference to the MediaStream that we're actually playing.
   // At most one of mDecoder and mSrcStream can be non-null.
   RefPtr<DOMMediaStream> mSrcStream;
 
   // The MediaStreamRenderer handles rendering of our selected video track, and
   // enabled audio tracks, while mSrcStream is set.
   RefPtr<MediaStreamRenderer> mMediaStreamRenderer;
 
-  // True once mSrcStream's initial set of tracks are known.
-  bool mSrcStreamTracksAvailable = false;
-
   // True once PlaybackEnded() is called and we're playing a MediaStream.
   // Reset to false if we start playing mSrcStream again.
   Watchable<bool> mSrcStreamPlaybackEnded = {
       false, "HTMLMediaElement::mSrcStreamPlaybackEnded"};
 
   // Mirrors mSrcStreamPlaybackEnded after a tail dispatch when set to true,
   // but may be be forced to false directly. To accomodate when an application
   // ends playback synchronously by manipulating mSrcStream or its tracks,
@@ -1347,16 +1361,22 @@ class HTMLMediaElement : public nsGeneri
   // Holds a reference to the stream connecting this stream to the window
   // capture sink.
   UniquePtr<MediaStreamWindowCapturer> mStreamWindowCapturer;
 
   // Holds references to the DOM wrappers for the MediaStreams that we're
   // writing to.
   nsTArray<OutputMediaStream> mOutputStreams;
 
+  // Mapping for output tracks, from dom::MediaTrack ids to the
+  // MediaElementTrackSource that represents the source of all corresponding
+  // MediaStreamTracks captured from this element.
+  nsRefPtrHashtable<nsStringHashKey, MediaElementTrackSource>
+      mOutputTrackSources;
+
   // Holds a reference to the first-frame-getting track listener attached to
   // mSelectedVideoStreamTrack.
   RefPtr<FirstFrameListener> mFirstFrameListener;
   // The currently selected video stream track.
   RefPtr<VideoStreamTrack> mSelectedVideoStreamTrack;
 
   const RefPtr<ShutdownObserver> mShutdownObserver;
 
@@ -1538,26 +1558,34 @@ class HTMLMediaElement : public nsGeneri
   // start playing when loaded. The 'autoplay' attribute of the object
   // is a mirror of the HTML attribute. These are different from this
   // 'mAutoplaying' flag, which indicates whether the current playback
   // is a result of the autoplay attribute.
   bool mAutoplaying = true;
 
   // Playback of the video is paused either due to calling the
   // 'Pause' method, or playback not yet having started.
-  Watchable<bool> mPaused;
+  Watchable<bool> mPaused = {true, "HTMLMediaElement::mPaused"};
 
   // The following two fields are here for the private storage of the builtin
   // video controls, and control 'casting' of the video to external devices
   // (TVs, projectors etc.)
   // True if casting is currently allowed
   bool mAllowCasting = false;
   // True if currently casting this video
   bool mIsCasting = false;
 
+  // Set while there are some OutputMediaStreams this media element's enabled
+  // and selected tracks are captured into. When set, all tracks are captured
+  // into the graph of this dummy track.
+  // NB: This is a SharedDummyTrack to allow non-default graphs (AudioContexts
+  // with an explicit sampleRate defined) to capture this element. When
+  // cross-graph tracks are supported, this can become a bool.
+  Watchable<RefPtr<SharedDummyTrack>> mTracksCaptured;
+
   // True if the sound is being captured.
   bool mAudioCaptured = false;
 
   // If TRUE then the media element was actively playing before the currently
   // in progress seeking. If FALSE then the media element is either not seeking
   // or was not actively playing before the current seek. Used to decide whether
   // to raise the 'waiting' event as per 4.7.1.8 in HTML 5 specification.
   bool mPlayingBeforeSeek = false;
@@ -1649,17 +1677,18 @@ class HTMLMediaElement : public nsGeneri
 
   // Listens for waitingForKey events from the owned decoder.
   MediaEventListener mWaitingForKeyListener;
 
   // Init Data that needs to be sent in 'encrypted' events in MetadataLoaded().
   EncryptionInfo mPendingEncryptedInitData;
 
   // True if the media's channel's download has been suspended.
-  bool mDownloadSuspendedByCache = false;
+  Watchable<bool> mDownloadSuspendedByCache = {
+      false, "HTMLMediaElement::mDownloadSuspendedByCache"};
 
   // Disable the video playback by track selection. This flag might not be
   // enough if we ever expand the ability of supporting multi-tracks video
   // playback.
   bool mDisableVideo = false;
 
   RefPtr<TextTrackManager> mTextTrackManager;
 
@@ -1787,35 +1816,32 @@ class HTMLMediaElement : public nsGeneri
   // True if Init() has been called after construction
   bool mInitialized = false;
 
   // True if user has called load(), seek() or element has started playing
   // before. It's *only* use for checking autoplay policy
   bool mIsBlessed = false;
 
   // True if the first frame has been successfully loaded.
-  bool mFirstFrameLoaded = false;
+  Watchable<bool> mFirstFrameLoaded = {false,
+                                       "HTMLMediaElement::mFirstFrameLoaded"};
 
   // Media elements also have a default playback start position, which must
   // initially be set to zero seconds. This time is used to allow the element to
   // be seeked even before the media is loaded.
   double mDefaultPlaybackStartPosition = 0.0;
 
   // True if media element has been marked as 'tainted' and can't
   // participate in video decoder suspending.
   bool mHasSuspendTaint = false;
 
   // True if media element has been forced into being considered 'hidden'.
   // For use by mochitests. Enabling pref "media.test.video-suspend"
   bool mForcedHidden = false;
 
-  // True if audio tracks and video tracks are constructed and added into the
-  // track list, false if all tracks are removed from the track list.
-  bool mMediaTracksConstructed = false;
-
   Visibility mVisibilityState = Visibility::Untracked;
 
   UniquePtr<ErrorSink> mErrorSink;
 
   // This wrapper will handle all audio channel related stuffs, eg. the
   // operations of tab audio indicator, Fennec's media control. Note:
   // mAudioChannelWrapper might be null after GC happened.
   RefPtr<AudioChannelAgentCallback> mAudioChannelWrapper;
--- a/dom/indexedDB/ActorsChild.cpp
+++ b/dom/indexedDB/ActorsChild.cpp
@@ -3256,34 +3256,36 @@ BackgroundCursorChild::BackgroundCursorC
                                              IDBObjectStore* aObjectStore,
                                              Direction aDirection)
     : mRequest(aRequest),
       mTransaction(aRequest->GetTransaction()),
       mObjectStore(aObjectStore),
       mIndex(nullptr),
       mCursor(nullptr),
       mStrongRequest(aRequest),
-      mDirection(aDirection) {
+      mDirection(aDirection),
+      mInFlightResponseInvalidationNeeded(false) {
   MOZ_ASSERT(aObjectStore);
   aObjectStore->AssertIsOnOwningThread();
   MOZ_ASSERT(mTransaction);
 
   MOZ_COUNT_CTOR(indexedDB::BackgroundCursorChild);
 }
 
 BackgroundCursorChild::BackgroundCursorChild(IDBRequest* aRequest,
                                              IDBIndex* aIndex,
                                              Direction aDirection)
     : mRequest(aRequest),
       mTransaction(aRequest->GetTransaction()),
       mObjectStore(nullptr),
       mIndex(aIndex),
       mCursor(nullptr),
       mStrongRequest(aRequest),
-      mDirection(aDirection) {
+      mDirection(aDirection),
+      mInFlightResponseInvalidationNeeded(false) {
   MOZ_ASSERT(aIndex);
   aIndex->AssertIsOnOwningThread();
   MOZ_ASSERT(mTransaction);
 
   MOZ_COUNT_CTOR(indexedDB::BackgroundCursorChild);
 }
 
 BackgroundCursorChild::~BackgroundCursorChild() {
@@ -3491,16 +3493,29 @@ void BackgroundCursorChild::InvalidateCa
   // need to care, etc.
 
   IDB_LOG_MARK_CHILD_TRANSACTION_REQUEST(
       "PRELOAD: Invalidating all %zu cached responses", "Invalidating",
       mTransaction->LoggingSerialNumber(), mRequest->LoggingSerialNumber(),
       mCachedResponses.size());
 
   mCachedResponses.clear();
+
+  // We only hold a strong cursor reference in mStrongCursor when
+  // continue()/similar has been called. In those cases we expect a response
+  // that will be received in the future, and it may include prefetched data
+  // that needs to be discarded.
+  if (mStrongCursor) {
+    IDB_LOG_MARK_CHILD_TRANSACTION_REQUEST(
+        "PRELOAD: Setting flag to invalidate in-flight responses",
+        "Set flag to invalidate in-flight responses",
+        mTransaction->LoggingSerialNumber(), mRequest->LoggingSerialNumber());
+
+    mInFlightResponseInvalidationNeeded = true;
+  }
 }
 
 template <typename Condition>
 void BackgroundCursorChild::DiscardCachedResponses(
     const Condition& aConditionFunc) {
   size_t discardedCount = 0;
   while (!mCachedResponses.empty() &&
          aConditionFunc(mCachedResponses.front())) {
@@ -3596,16 +3611,27 @@ void BackgroundCursorChild::HandleMultip
 
     // TODO: At the moment, we only send a cursor request to the parent if
     // requested by the user code. Therefore, the first result is always used as
     // the current result, and the potential extra results are cached. If we
     // extended this towards preloading in the background, all results might
     // need to be cached.
     aHandleRecord(/* aUseAsCurrentResult */ isFirst, response);
     isFirst = false;
+
+    if (mInFlightResponseInvalidationNeeded) {
+      IDB_LOG_MARK_CHILD_TRANSACTION_REQUEST(
+          "PRELOAD: Discarding remaining responses since "
+          "mInFlightResponseInvalidationNeeded is set",
+          "Discarding responses", mTransaction->LoggingSerialNumber(),
+          mRequest->LoggingSerialNumber());
+
+      mInFlightResponseInvalidationNeeded = false;
+      break;
+    }
   }
 
   ResultHelper helper(mRequest, mTransaction, mCursor);
   DispatchSuccessEvent(&helper);
 }
 
 void BackgroundCursorChild::HandleResponse(
     const nsTArray<ObjectStoreCursorResponse>& aResponses) {
--- a/dom/indexedDB/ActorsChild.h
+++ b/dom/indexedDB/ActorsChild.h
@@ -653,16 +653,17 @@ class BackgroundCursorChild final : publ
   RefPtr<IDBRequest> mStrongRequest;
   RefPtr<IDBCursor> mStrongCursor;
 
   Direction mDirection;
 
   NS_DECL_OWNINGTHREAD
 
   std::deque<CachedResponse> mCachedResponses, mDelayedResponses;
+  bool mInFlightResponseInvalidationNeeded;
 
  public:
   BackgroundCursorChild(IDBRequest* aRequest, IDBObjectStore* aObjectStore,
                         Direction aDirection);
 
   BackgroundCursorChild(IDBRequest* aRequest, IDBIndex* aIndex,
                         Direction aDirection);
 
--- a/dom/indexedDB/IndexedDatabaseManager.cpp
+++ b/dom/indexedDB/IndexedDatabaseManager.cpp
@@ -107,17 +107,23 @@ const uint32_t kDeleteTimeoutMs = 1000;
 // The threshold we use for structured clone data storing.
 // Anything smaller than the threshold is compressed and stored in the database.
 // Anything larger is compressed and stored outside the database.
 const int32_t kDefaultDataThresholdBytes = 1024 * 1024;  // 1MB
 
 // The maximal size of a serialized object to be transfered through IPC.
 const int32_t kDefaultMaxSerializedMsgSize = IPC::Channel::kMaximumMessageSize;
 
-const int32_t kDefaultMaxPreloadExtraRecords = 0;
+// The maximum number of records to preload (in addition to the one requested by
+// the child).
+//
+// TODO: The current number was chosen for no particular reason. Telemetry
+// should be added to determine whether this is a reasonable number for an
+// overwhelming majority of cases.
+const int32_t kDefaultMaxPreloadExtraRecords = 64;
 
 #define IDB_PREF_BRANCH_ROOT "dom.indexedDB."
 
 const char kTestingPref[] = IDB_PREF_BRANCH_ROOT "testing";
 const char kPrefExperimental[] = IDB_PREF_BRANCH_ROOT "experimental";
 const char kPrefFileHandle[] = "dom.fileHandle.enabled";
 const char kDataThresholdPref[] = IDB_PREF_BRANCH_ROOT "dataThreshold";
 const char kPrefMaxSerilizedMsgSize[] =
--- a/dom/ipc/ContentChild.cpp
+++ b/dom/ipc/ContentChild.cpp
@@ -1345,17 +1345,27 @@ void ContentChild::InitXPCOM(
     MOZ_ASSERT_UNREACHABLE("PBackground init can't fail at this point");
     return;
   }
 
   LSObject::Initialize();
 
   ClientManager::Startup();
 
-  RemoteWorkerService::Initialize();
+  // Respecting COOP and COEP requires processing headers in the parent process
+  // in order to choose an appropriate content process, but the workers'
+  // ScriptLoader processes headers in content processes. An intermediary step
+  // that provides security guarantees is to simply never allow SharedWorkers
+  // and ServiceWorkers to exist in a COOP+COEP process. The ultimate goal
+  // is to allow these worker types to be put in such processes based on their
+  // script response headers.
+  // https://bugzilla.mozilla.org/show_bug.cgi?id=1595206
+  if (!IsWebCoopCoepRemoteType(GetRemoteType())) {
+    RemoteWorkerService::Initialize();
+  }
 
   nsCOMPtr<nsIConsoleService> svc(do_GetService(NS_CONSOLESERVICE_CONTRACTID));
   if (!svc) {
     NS_WARNING("Couldn't acquire console service");
     return;
   }
 
   mConsoleListener = new ConsoleListener(this);
--- a/dom/ipc/ContentParent.cpp
+++ b/dom/ipc/ContentParent.cpp
@@ -722,16 +722,21 @@ const nsDependentSubstring RemoteTypePre
   return StringHead(aContentProcessType, equalIdx);
 }
 
 bool IsWebRemoteType(const nsAString& aContentProcessType) {
   return StringBeginsWith(aContentProcessType,
                           NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE));
 }
 
+bool IsWebCoopCoepRemoteType(const nsAString& aContentProcessType) {
+  return StringBeginsWith(aContentProcessType,
+                          NS_LITERAL_STRING(WITH_COOP_COEP_REMOTE_TYPE_PREFIX));
+}
+
 /*static*/
 uint32_t ContentParent::GetMaxProcessCount(
     const nsAString& aContentProcessType) {
   // Max process count is based only on the prefix.
   const nsDependentSubstring processTypePrefix =
       RemoteTypePrefix(aContentProcessType);
 
   // Check for the default remote type of "web", as it uses different prefs.
--- a/dom/ipc/ContentParent.h
+++ b/dom/ipc/ContentParent.h
@@ -1389,16 +1389,18 @@ NS_DEFINE_STATIC_IID_ACCESSOR(ContentPar
 
 // This is the C++ version of remoteTypePrefix in E10SUtils.jsm.
 const nsDependentSubstring RemoteTypePrefix(
     const nsAString& aContentProcessType);
 
 // This is based on isWebRemoteType in E10SUtils.jsm.
 bool IsWebRemoteType(const nsAString& aContentProcessType);
 
+bool IsWebCoopCoepRemoteType(const nsAString& aContentProcessType);
+
 }  // namespace dom
 }  // namespace mozilla
 
 class ParentIdleListener : public nsIObserver {
   friend class mozilla::dom::ContentParent;
 
  public:
   NS_DECL_ISUPPORTS
--- a/dom/media/ChannelMediaDecoder.cpp
+++ b/dom/media/ChannelMediaDecoder.cpp
@@ -216,23 +216,37 @@ MediaDecoderStateMachine* ChannelMediaDe
   mReader = DecoderTraits::CreateReader(ContainerType(), init);
   return new MediaDecoderStateMachine(this, mReader);
 }
 
 void ChannelMediaDecoder::Shutdown() {
   mResourceCallback->Disconnect();
   MediaDecoder::Shutdown();
 
-  // Force any outstanding seek and byterange requests to complete
-  // to prevent shutdown from deadlocking.
   if (mResource) {
-    mResource->Close();
+    // Force any outstanding seek and byterange requests to complete
+    // to prevent shutdown from deadlocking.
+    mResourceClosePromise = mResource->Close();
   }
 }
 
+void ChannelMediaDecoder::ShutdownInternal() {
+  if (!mResourceClosePromise) {
+    MediaShutdownManager::Instance().Unregister(this);
+    return;
+  }
+
+  mResourceClosePromise->Then(
+      AbstractMainThread(), __func__,
+      [self = RefPtr<ChannelMediaDecoder>(this)] {
+        MediaShutdownManager::Instance().Unregister(self);
+      });
+  return;
+}
+
 nsresult ChannelMediaDecoder::Load(nsIChannel* aChannel,
                                    bool aIsPrivateBrowsing,
                                    nsIStreamListener** aStreamListener) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(!mResource);
   MOZ_ASSERT(aStreamListener);
   AbstractThread::AutoEnter context(AbstractMainThread());
 
--- a/dom/media/ChannelMediaDecoder.h
+++ b/dom/media/ChannelMediaDecoder.h
@@ -54,16 +54,17 @@ class ChannelMediaDecoder
     // The decoder to send notifications. Main-thread only.
     ChannelMediaDecoder* mDecoder = nullptr;
     nsCOMPtr<nsITimer> mTimer;
     bool mTimerArmed = false;
     const RefPtr<AbstractThread> mAbstractMainThread;
   };
 
  protected:
+  void ShutdownInternal() override;
   void OnPlaybackEvent(MediaPlaybackEvent&& aEvent) override;
   void DurationChanged() override;
   void MetadataLoaded(UniquePtr<MediaInfo> aInfo, UniquePtr<MetadataTags> aTags,
                       MediaDecoderEventVisibility aEventVisibility) override;
   void NotifyPrincipalChanged() override;
 
   RefPtr<ResourceCallback> mResourceCallback;
   RefPtr<BaseMediaResource> mResource;
@@ -151,13 +152,17 @@ class ChannelMediaDecoder
   // start playing back again.
   int64_t mPlaybackPosition = 0;
 
   bool mCanPlayThrough = false;
 
   // True if we've been notified that the ChannelMediaResource has
   // a principal.
   bool mInitialChannelPrincipalKnown = false;
+
+  // Set in Shutdown() when we start closing mResource, if mResource is set.
+  // Must resolve before we unregister the shutdown blocker.
+  RefPtr<GenericPromise> mResourceClosePromise;
 };
 
 }  // namespace mozilla
 
 #endif  // ChannelMediaDecoder_h_
--- a/dom/media/ChannelMediaResource.cpp
+++ b/dom/media/ChannelMediaResource.cpp
@@ -584,25 +584,25 @@ nsresult ChannelMediaResource::SetupChan
     element->SetRequestHeaders(hc);
   } else {
     NS_ASSERTION(aOffset == 0, "Don't know how to seek on this channel type");
     return NS_ERROR_FAILURE;
   }
   return NS_OK;
 }
 
-nsresult ChannelMediaResource::Close() {
+RefPtr<GenericPromise> ChannelMediaResource::Close() {
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
 
   if (!mClosed) {
     CloseChannel();
-    mCacheStream.Close();
     mClosed = true;
+    return mCacheStream.Close();
   }
-  return NS_OK;
+  return GenericPromise::CreateAndResolve(true, __func__);
 }
 
 already_AddRefed<nsIPrincipal> ChannelMediaResource::GetCurrentPrincipal() {
   MOZ_ASSERT(NS_IsMainThread());
   return do_AddRef(mSharedInfo->mPrincipal);
 }
 
 bool ChannelMediaResource::HadCrossOriginRedirects() {
--- a/dom/media/ChannelMediaResource.h
+++ b/dom/media/ChannelMediaResource.h
@@ -112,17 +112,17 @@ class ChannelMediaResource
   void CacheClientResume();
 
   bool IsSuspended();
 
   void ThrottleReadahead(bool bThrottle) override;
 
   // Main thread
   nsresult Open(nsIStreamListener** aStreamListener) override;
-  nsresult Close() override;
+  RefPtr<GenericPromise> Close() override;
   void Suspend(bool aCloseImmediately) override;
   void Resume() override;
   already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override;
   bool HadCrossOriginRedirects() override;
   bool CanClone() override;
   already_AddRefed<BaseMediaResource> CloneData(
       MediaResourceCallback* aDecoder) override;
   nsresult ReadFromCache(char* aBuffer, int64_t aOffset,
--- a/dom/media/CloneableWithRangeMediaResource.cpp
+++ b/dom/media/CloneableWithRangeMediaResource.cpp
@@ -143,17 +143,19 @@ nsresult CloneableWithRangeMediaResource
     nsIStreamListener** aStreamListener) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(aStreamListener);
 
   *aStreamListener = nullptr;
   return NS_OK;
 }
 
-nsresult CloneableWithRangeMediaResource::Close() { return NS_OK; }
+RefPtr<GenericPromise> CloneableWithRangeMediaResource::Close() {
+  return GenericPromise::CreateAndResolve(true, __func__);
+}
 
 already_AddRefed<nsIPrincipal>
 CloneableWithRangeMediaResource::GetCurrentPrincipal() {
   MOZ_ASSERT(NS_IsMainThread());
 
   nsCOMPtr<nsIPrincipal> principal;
   nsIScriptSecurityManager* secMan = nsContentUtils::GetSecurityManager();
   if (!secMan || !mChannel) {
--- a/dom/media/CloneableWithRangeMediaResource.h
+++ b/dom/media/CloneableWithRangeMediaResource.h
@@ -22,17 +22,17 @@ class CloneableWithRangeMediaResource : 
         mInitialized(false) {
     MOZ_ASSERT(mStream);
   }
 
   ~CloneableWithRangeMediaResource() {}
 
   // Main thread
   nsresult Open(nsIStreamListener** aStreamListener) override;
-  nsresult Close() override;
+  RefPtr<GenericPromise> Close() override;
   void Suspend(bool aCloseImmediately) override {}
   void Resume() override {}
   already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override;
   bool HadCrossOriginRedirects() override;
   nsresult ReadFromCache(char* aBuffer, int64_t aOffset,
                          uint32_t aCount) override;
 
   // These methods are called off the main thread.
--- a/dom/media/DOMMediaStream.cpp
+++ b/dom/media/DOMMediaStream.cpp
@@ -368,16 +368,17 @@ already_AddRefed<DOMMediaStream> DOMMedi
     RefPtr<MediaStreamTrack> clone = track->Clone();
     newStream->AddTrack(*clone);
   }
 
   return newStream.forget();
 }
 
 bool DOMMediaStream::Active() const { return mActive; }
+bool DOMMediaStream::Audible() const { return mAudible; }
 
 MediaStreamTrack* DOMMediaStream::GetTrackById(const nsAString& aId) const {
   for (const auto& track : mTracks) {
     nsString id;
     track->GetId(id);
     if (id == aId) {
       return track;
     }
@@ -450,30 +451,16 @@ void DOMMediaStream::RegisterTrackListen
   mTrackListeners.AppendElement(aListener);
 }
 
 void DOMMediaStream::UnregisterTrackListener(TrackListener* aListener) {
   MOZ_ASSERT(NS_IsMainThread());
   mTrackListeners.RemoveElement(aListener);
 }
 
-void DOMMediaStream::SetFinishedOnInactive(bool aFinishedOnInactive) {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  if (mFinishedOnInactive == aFinishedOnInactive) {
-    return;
-  }
-
-  mFinishedOnInactive = aFinishedOnInactive;
-
-  if (mFinishedOnInactive && !ContainsLiveTracks(mTracks)) {
-    NotifyTrackRemoved(nullptr);
-  }
-}
-
 void DOMMediaStream::NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) {
   MOZ_ASSERT(NS_IsMainThread());
 
   aTrack->AddConsumer(mPlaybackTrackListener);
 
   for (int32_t i = mTrackListeners.Length() - 1; i >= 0; --i) {
     mTrackListeners[i]->NotifyTrackAdded(aTrack);
   }
@@ -512,20 +499,16 @@ void DOMMediaStream::NotifyTrackRemoved(
     }
 
     if (!mActive) {
       NS_ASSERTION(false, "Shouldn't remove a live track if already inactive");
       return;
     }
   }
 
-  if (!mFinishedOnInactive) {
-    return;
-  }
-
   if (mAudible) {
     // Check if we became inaudible.
     if (!ContainsLiveAudioTracks(mTracks)) {
       mAudible = false;
       NotifyInaudible();
     }
   }
 
--- a/dom/media/DOMMediaStream.h
+++ b/dom/media/DOMMediaStream.h
@@ -139,16 +139,19 @@ class DOMMediaStream : public DOMEventTa
 
   bool Active() const;
 
   IMPL_EVENT_HANDLER(addtrack)
   IMPL_EVENT_HANDLER(removetrack)
 
   // NON-WebIDL
 
+  // Returns true if this stream contains a live audio track.
+  bool Audible() const;
+
   /**
    * Returns true if this DOMMediaStream has aTrack in mTracks.
    */
   bool HasTrack(const MediaStreamTrack& aTrack) const;
 
   /**
    * Returns a principal indicating who may access this stream. The stream
    * contents can only be accessed by principals subsuming this principal.
@@ -181,20 +184,16 @@ class DOMMediaStream : public DOMEventTa
   // being destroyed, so we don't hold on to a dead pointer. Main thread only.
   void RegisterTrackListener(TrackListener* aListener);
 
   // Unregisters a track listener from this MediaStream. The caller must call
   // UnregisterTrackListener before being destroyed, so we don't hold on to
   // a dead pointer. Main thread only.
   void UnregisterTrackListener(TrackListener* aListener);
 
-  // Tells this MediaStream whether it can go inactive as soon as no tracks
-  // are live anymore.
-  void SetFinishedOnInactive(bool aFinishedOnInactive);
-
  protected:
   virtual ~DOMMediaStream();
 
   void Destroy();
 
   // Dispatches NotifyActive() to all registered track listeners.
   void NotifyActive();
 
@@ -235,19 +234,15 @@ class DOMMediaStream : public DOMEventTa
   // The track listeners subscribe to changes in this stream's track set.
   nsTArray<TrackListener*> mTrackListeners;
 
   // True if this stream has live tracks.
   bool mActive = false;
 
   // True if this stream has live audio tracks.
   bool mAudible = false;
-
-  // For compatibility with mozCaptureStream, we in some cases do not go
-  // inactive until the MediaDecoder lets us. (Remove this in Bug 1302379)
-  bool mFinishedOnInactive = true;
 };
 
 NS_DEFINE_STATIC_IID_ACCESSOR(DOMMediaStream, NS_DOMMEDIASTREAM_IID)
 
 }  // namespace mozilla
 
 #endif /* NSDOMMEDIASTREAM_H_ */
--- a/dom/media/FileMediaResource.cpp
+++ b/dom/media/FileMediaResource.cpp
@@ -90,27 +90,27 @@ nsresult FileMediaResource::Open(nsIStre
     // doing an async open and waiting until we locate the real resource,
     // then using that (if it's still a file!).
     return NS_ERROR_FAILURE;
   }
 
   return NS_OK;
 }
 
-nsresult FileMediaResource::Close() {
+RefPtr<GenericPromise> FileMediaResource::Close() {
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
 
   // Since mChennel is only accessed by main thread, there is no necessary to
   // take the lock.
   if (mChannel) {
     mChannel->Cancel(NS_ERROR_PARSED_DATA_CACHED);
     mChannel = nullptr;
   }
 
-  return NS_OK;
+  return GenericPromise::CreateAndResolve(true, __func__);
 }
 
 already_AddRefed<nsIPrincipal> FileMediaResource::GetCurrentPrincipal() {
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
 
   nsCOMPtr<nsIPrincipal> principal;
   nsIScriptSecurityManager* secMan = nsContentUtils::GetSecurityManager();
   if (!secMan || !mChannel) return nullptr;
--- a/dom/media/FileMediaResource.h
+++ b/dom/media/FileMediaResource.h
@@ -18,17 +18,17 @@ class FileMediaResource : public BaseMed
       : BaseMediaResource(aCallback, aChannel, aURI),
         mSize(aSize),
         mLock("FileMediaResource.mLock"),
         mSizeInitialized(aSize != -1) {}
   ~FileMediaResource() {}
 
   // Main thread
   nsresult Open(nsIStreamListener** aStreamListener) override;
-  nsresult Close() override;
+  RefPtr<GenericPromise> Close() override;
   void Suspend(bool aCloseImmediately) override {}
   void Resume() override {}
   already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override;
   bool HadCrossOriginRedirects() override;
   nsresult ReadFromCache(char* aBuffer, int64_t aOffset,
                          uint32_t aCount) override;
 
   // These methods are called off the main thread.
--- a/dom/media/ForwardedInputTrack.cpp
+++ b/dom/media/ForwardedInputTrack.cpp
@@ -55,21 +55,16 @@ void ForwardedInputTrack::AddInput(Media
   SetInput(aPort);
   ProcessedMediaTrack::AddInput(aPort);
 }
 
 void ForwardedInputTrack::RemoveInput(MediaInputPort* aPort) {
   TRACK_LOG(LogLevel::Debug,
             ("ForwardedInputTrack %p removing input %p", this, aPort));
   MOZ_ASSERT(aPort == mInputPort);
-  nsTArray<RefPtr<DirectMediaTrackListener>> listeners(mOwnedDirectListeners);
-  for (const auto& listener : listeners) {
-    // Remove listeners while the entry still exists.
-    RemoveDirectListenerImpl(listener);
-  }
   mInputPort = nullptr;
   ProcessedMediaTrack::RemoveInput(aPort);
 }
 
 void ForwardedInputTrack::SetInput(MediaInputPort* aPort) {
   MOZ_ASSERT(aPort);
   MOZ_ASSERT(aPort->GetSource());
   MOZ_ASSERT(aPort->GetSource()->GetData());
@@ -158,16 +153,20 @@ void ForwardedInputTrack::ProcessInput(G
     AudioSegment audio;
     ProcessInputImpl(source, &audio, aFrom, aTo, aFlags);
   } else if (mType == MediaSegment::VIDEO) {
     VideoSegment video;
     ProcessInputImpl(source, &video, aFrom, aTo, aFlags);
   } else {
     MOZ_CRASH("Unknown segment type");
   }
+
+  if (mEnded) {
+    RemoveAllDirectListenersImpl();
+  }
 }
 
 void ForwardedInputTrack::SetEnabledImpl(DisabledTrackMode aMode) {
   bool enabled = aMode == DisabledTrackMode::ENABLED;
   TRACK_LOG(LogLevel::Info, ("ForwardedInputTrack %p was explicitly %s", this,
                              enabled ? "enabled" : "disabled"));
   for (DirectMediaTrackListener* listener : mOwnedDirectListeners) {
     DisabledTrackMode oldMode = mDisabledMode;
--- a/dom/media/MediaCache.cpp
+++ b/dom/media/MediaCache.cpp
@@ -156,17 +156,17 @@ class MediaCache {
 
   // Get an instance of a MediaCache (or nullptr if initialization failed).
   // aContentLength is the content length if known already, otherwise -1.
   // If the length is known and considered small enough, a discrete MediaCache
   // with memory backing will be given. Otherwise the one MediaCache with
   // file backing will be provided.
   static RefPtr<MediaCache> GetMediaCache(int64_t aContentLength);
 
-  nsIEventTarget* OwnerThread() const { return sThread; }
+  nsISerialEventTarget* OwnerThread() const { return sThread; }
 
   // Brutally flush the cache contents. Main thread only.
   void Flush();
 
   // Close all streams associated with private browsing windows. This will
   // also remove the blocks from the cache since we don't want to leave any
   // traces when PB is done.
   void CloseStreamsForPrivateBrowsing();
@@ -2191,27 +2191,28 @@ bool MediaCacheStream::AreAllStreamsForR
       continue;
     }
     return false;
   }
 
   return true;
 }
 
-void MediaCacheStream::Close() {
+RefPtr<GenericPromise> MediaCacheStream::Close() {
   MOZ_ASSERT(NS_IsMainThread());
   if (!mMediaCache) {
-    return;
+    return GenericPromise::CreateAndResolve(true, __func__);
   }
-  OwnerThread()->Dispatch(NS_NewRunnableFunction(
-      "MediaCacheStream::Close",
-      [this, client = RefPtr<ChannelMediaResource>(mClient)]() {
-        AutoLock lock(mMediaCache->Monitor());
-        CloseInternal(lock);
-      }));
+
+  return InvokeAsync(OwnerThread(), "MediaCacheStream::Close",
+                     [this, client = RefPtr<ChannelMediaResource>(mClient)] {
+                       AutoLock lock(mMediaCache->Monitor());
+                       CloseInternal(lock);
+                       return GenericPromise::CreateAndResolve(true, __func__);
+                     });
 }
 
 void MediaCacheStream::CloseInternal(AutoLock& aLock) {
   MOZ_ASSERT(OwnerThread()->IsOnCurrentThread());
 
   if (mClosed) {
     return;
   }
@@ -2729,17 +2730,17 @@ void MediaCacheStream::InitAsCloneIntern
   mClient->CacheClientSuspend();
 
   // Step 5: add the stream to be managed by the cache.
   mMediaCache->OpenStream(lock, this, true /* aIsClone */);
   // Wake up the reader which is waiting for the cloned data.
   lock.NotifyAll();
 }
 
-nsIEventTarget* MediaCacheStream::OwnerThread() const {
+nsISerialEventTarget* MediaCacheStream::OwnerThread() const {
   return mMediaCache->OwnerThread();
 }
 
 nsresult MediaCacheStream::GetCachedRanges(MediaByteRangeSet& aRanges) {
   MOZ_ASSERT(!NS_IsMainThread());
   // Take the monitor, so that the cached data ranges can't grow while we're
   // trying to loop over them.
   AutoLock lock(mMediaCache->Monitor());
--- a/dom/media/MediaCache.h
+++ b/dom/media/MediaCache.h
@@ -212,22 +212,22 @@ class MediaCacheStream : public DecoderD
   nsresult Init(int64_t aContentLength);
 
   // Set up this stream with the cache, assuming it's for the same data
   // as the aOriginal stream.
   // Exactly one of InitAsClone or Init must be called before any other method
   // on this class.
   void InitAsClone(MediaCacheStream* aOriginal);
 
-  nsIEventTarget* OwnerThread() const;
+  nsISerialEventTarget* OwnerThread() const;
 
   // These are called on the main thread.
-  // This must be called (and return) before the ChannelMediaResource
+  // This must be called (and resolve) before the ChannelMediaResource
   // used to create this MediaCacheStream is deleted.
-  void Close();
+  RefPtr<GenericPromise> Close();
   // This returns true when the stream has been closed.
   bool IsClosed(AutoLock&) const { return mClosed; }
   // Returns true when this stream is can be shared by a new resource load.
   // Called on the main thread only.
   bool IsAvailableForSharing() const { return !mIsPrivateBrowsing; }
 
   // These callbacks are called on the main thread by the client
   // when data has been received via the channel.
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -1,16 +1,17 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaDecoder.h"
 
+#include "AudioDeviceInfo.h"
 #include "DOMMediaStream.h"
 #include "DecoderBenchmark.h"
 #include "ImageContainer.h"
 #include "Layers.h"
 #include "MediaDecoderStateMachine.h"
 #include "MediaFormatReader.h"
 #include "MediaResource.h"
 #include "MediaShutdownManager.h"
@@ -220,46 +221,56 @@ void MediaDecoder::Pause() {
 }
 
 void MediaDecoder::SetVolume(double aVolume) {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
   mVolume = aVolume;
 }
 
-RefPtr<GenericPromise> MediaDecoder::SetSink(AudioDeviceInfo* aSink) {
+RefPtr<GenericPromise> MediaDecoder::SetSink(AudioDeviceInfo* aSinkDevice) {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
-  return GetStateMachine()->InvokeSetSink(aSink);
+  mSinkDevice = aSinkDevice;
+  return GetStateMachine()->InvokeSetSink(aSinkDevice);
 }
 
-void MediaDecoder::AddOutputStream(DOMMediaStream* aStream,
-                                   SharedDummyTrack* aDummyStream) {
+void MediaDecoder::SetOutputCaptured(bool aCaptured) {
+  MOZ_ASSERT(NS_IsMainThread());
+  MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
+  AbstractThread::AutoEnter context(AbstractMainThread());
+  mOutputCaptured = aCaptured;
+}
+
+void MediaDecoder::AddOutputTrack(RefPtr<ProcessedMediaTrack> aTrack) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
   AbstractThread::AutoEnter context(AbstractMainThread());
-  mDecoderStateMachine->EnsureOutputStreamManager(aDummyStream);
-  if (mInfo) {
-    mDecoderStateMachine->EnsureOutputStreamManagerHasTracks(*mInfo);
-  }
-  mDecoderStateMachine->AddOutputStream(aStream);
+  nsTArray<RefPtr<ProcessedMediaTrack>> tracks = mOutputTracks;
+  tracks.AppendElement(std::move(aTrack));
+  mOutputTracks = tracks;
 }
 
-void MediaDecoder::RemoveOutputStream(DOMMediaStream* aStream) {
+void MediaDecoder::RemoveOutputTrack(
+    const RefPtr<ProcessedMediaTrack>& aTrack) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
   AbstractThread::AutoEnter context(AbstractMainThread());
-  mDecoderStateMachine->RemoveOutputStream(aStream);
+  nsTArray<RefPtr<ProcessedMediaTrack>> tracks = mOutputTracks;
+  if (tracks.RemoveElement(aTrack)) {
+    mOutputTracks = tracks;
+  }
 }
 
-void MediaDecoder::SetOutputStreamPrincipal(nsIPrincipal* aPrincipal) {
+void MediaDecoder::SetOutputTracksPrincipal(
+    const RefPtr<nsIPrincipal>& aPrincipal) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
   AbstractThread::AutoEnter context(AbstractMainThread());
-  mDecoderStateMachine->SetOutputStreamPrincipal(aPrincipal);
+  mOutputPrincipal = MakePrincipalHandle(aPrincipal);
 }
 
 double MediaDecoder::GetDuration() {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
   return mDuration;
 }
 
@@ -295,16 +306,20 @@ MediaDecoder::MediaDecoder(MediaDecoderI
       mLogicallySeeking(false, "MediaDecoder::mLogicallySeeking"),
       INIT_MIRROR(mBuffered, TimeIntervals()),
       INIT_MIRROR(mCurrentPosition, TimeUnit::Zero()),
       INIT_MIRROR(mStateMachineDuration, NullableTimeUnit()),
       INIT_MIRROR(mIsAudioDataAudible, false),
       INIT_CANONICAL(mVolume, aInit.mVolume),
       INIT_CANONICAL(mPreservesPitch, aInit.mPreservesPitch),
       INIT_CANONICAL(mLooping, aInit.mLooping),
+      INIT_CANONICAL(mSinkDevice, nullptr),
+      INIT_CANONICAL(mOutputCaptured, false),
+      INIT_CANONICAL(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
+      INIT_CANONICAL(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
       INIT_CANONICAL(mPlayState, PLAY_STATE_LOADING),
       mSameOriginMedia(false),
       mVideoDecodingOberver(
           new BackgroundVideoDecodingPermissionObserver(this)),
       mIsBackgroundVideoDecodingAllowed(false),
       mTelemetryReported(false),
       mContainerType(aInit.mContainerType) {
   MOZ_ASSERT(NS_IsMainThread());
@@ -370,24 +385,21 @@ void MediaDecoder::Shutdown() {
         &MediaDecoder::FinishShutdown);
   } else {
     // Ensure we always unregister asynchronously in order not to disrupt
     // the hashtable iterating in MediaShutdownManager::Shutdown().
     RefPtr<MediaDecoder> self = this;
     nsCOMPtr<nsIRunnable> r =
         NS_NewRunnableFunction("MediaDecoder::Shutdown", [self]() {
           self->mVideoFrameContainer = nullptr;
-          MediaShutdownManager::Instance().Unregister(self);
+          self->ShutdownInternal();
         });
     mAbstractMainThread->Dispatch(r.forget());
   }
 
-  // Ask the owner to remove its audio/video tracks.
-  GetOwner()->RemoveMediaTracks();
-
   ChangeState(PLAY_STATE_SHUTDOWN);
   mVideoDecodingOberver->UnregisterEvent();
   mVideoDecodingOberver = nullptr;
   mOwner = nullptr;
 }
 
 void MediaDecoder::NotifyXPCOMShutdown() {
   MOZ_ASSERT(NS_IsMainThread());
@@ -514,21 +526,26 @@ void MediaDecoder::OnStoreDecoderBenchma
         "type = %s\n",
         benchmarkInfo.mWidth, benchmarkInfo.mHeight, benchmarkInfo.mFrameRate,
         benchmarkInfo.mContentType.BeginReading());
 
     mDecoderBenchmark->Store(benchmarkInfo, mFrameStats);
   }
 }
 
+void MediaDecoder::ShutdownInternal() {
+  MOZ_ASSERT(NS_IsMainThread());
+  MediaShutdownManager::Instance().Unregister(this);
+}
+
 void MediaDecoder::FinishShutdown() {
   MOZ_ASSERT(NS_IsMainThread());
   SetStateMachine(nullptr);
   mVideoFrameContainer = nullptr;
-  MediaShutdownManager::Instance().Unregister(this);
+  ShutdownInternal();
 }
 
 nsresult MediaDecoder::InitializeStateMachine() {
   MOZ_ASSERT(NS_IsMainThread());
   NS_ASSERTION(mDecoderStateMachine, "Cannot initialize null state machine!");
   AbstractThread::AutoEnter context(AbstractMainThread());
 
   nsresult rv = mDecoderStateMachine->Init(this);
@@ -637,17 +654,16 @@ double MediaDecoder::GetCurrentTime() {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
   return mLogicalPosition;
 }
 
 void MediaDecoder::OnMetadataUpdate(TimedMetadata&& aMetadata) {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
-  GetOwner()->RemoveMediaTracks();
   MetadataLoaded(MakeUnique<MediaInfo>(*aMetadata.mInfo),
                  UniquePtr<MetadataTags>(std::move(aMetadata.mTags)),
                  MediaDecoderEventVisibility::Observable);
   FirstFrameLoaded(std::move(aMetadata.mInfo),
                    MediaDecoderEventVisibility::Observable);
 }
 
 void MediaDecoder::MetadataLoaded(
@@ -660,18 +676,16 @@ void MediaDecoder::MetadataLoaded(
   LOG("MetadataLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d",
       aInfo->mAudio.mChannels, aInfo->mAudio.mRate, aInfo->HasAudio(),
       aInfo->HasVideo());
 
   mMediaSeekable = aInfo->mMediaSeekable;
   mMediaSeekableOnlyInBufferedRanges =
       aInfo->mMediaSeekableOnlyInBufferedRanges;
   mInfo = aInfo.release();
-  GetOwner()->ConstructMediaTracks(mInfo);
-  mDecoderStateMachine->EnsureOutputStreamManagerHasTracks(*mInfo);
 
   // Make sure the element and the frame (if any) are told about
   // our new size.
   if (aEventVisibility != MediaDecoderEventVisibility::Suppressed) {
     mFiredMetadataLoaded = true;
     GetOwner()->MetadataLoaded(mInfo, std::move(aTags));
   }
   // Invalidate() will end up calling GetOwner()->UpdateMediaSize with the last
@@ -852,22 +866,16 @@ void MediaDecoder::ChangeState(PlayState
   if (mNextState == aState) {
     mNextState = PLAY_STATE_PAUSED;
   }
 
   if (mPlayState != aState) {
     DDLOG(DDLogCategory::Property, "play_state", ToPlayStateStr(aState));
   }
   mPlayState = aState;
-
-  if (mPlayState == PLAY_STATE_PLAYING) {
-    GetOwner()->ConstructMediaTracks(mInfo);
-  } else if (IsEnded()) {
-    GetOwner()->RemoveMediaTracks();
-  }
 }
 
 bool MediaDecoder::IsLoopingBack(double aPrevPos, double aCurPos) const {
   // If current position is early than previous position and we didn't do seek,
   // that means we looped back to the start position.
   return mLooping && !mSeekRequest.Exists() && aCurPos < aPrevPos;
 }
 
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -38,22 +38,22 @@ namespace mozilla {
 
 namespace dom {
 class MediaMemoryInfo;
 }
 
 class AbstractThread;
 class DOMMediaStream;
 class DecoderBenchmark;
+class ProcessedMediaTrack;
 class FrameStatistics;
 class VideoFrameContainer;
 class MediaFormatReader;
 class MediaDecoderStateMachine;
 struct MediaPlaybackEvent;
-struct SharedDummyTrack;
 
 enum class Visibility : uint8_t;
 
 struct MOZ_STACK_CLASS MediaDecoderInit {
   MediaDecoderOwner* const mOwner;
   const double mVolume;
   const bool mPreservesPitch;
   const double mPlaybackRate;
@@ -150,36 +150,44 @@ class MediaDecoder : public DecoderDocto
   // Adjust the speed of the playback, optionally with pitch correction,
   void SetVolume(double aVolume);
 
   void SetPlaybackRate(double aPlaybackRate);
   void SetPreservesPitch(bool aPreservesPitch);
   void SetLooping(bool aLooping);
 
   // Set the given device as the output device.
-  RefPtr<GenericPromise> SetSink(AudioDeviceInfo* aSink);
+  RefPtr<GenericPromise> SetSink(AudioDeviceInfo* aSinkDevice);
 
   bool GetMinimizePreroll() const { return mMinimizePreroll; }
 
   // All MediaStream-related data is protected by mReentrantMonitor.
   // We have at most one DecodedStreamData per MediaDecoder. Its stream
   // is used as the input for each ProcessedMediaTrack created by calls to
   // captureStream(UntilEnded). Seeking creates a new source stream, as does
   // replaying after the input as ended. In the latter case, the new source is
   // not connected to streams created by captureStreamUntilEnded.
 
-  // Add an output stream. All decoder output will be sent to the stream.
-  // The stream is initially blocked. The decoder is responsible for unblocking
-  // it while it is playing back.
-  void AddOutputStream(DOMMediaStream* aStream, SharedDummyTrack* aDummyStream);
-  // Remove an output stream added with AddOutputStream.
-  void RemoveOutputStream(DOMMediaStream* aStream);
-
-  // Update the principal for any output streams and their tracks.
-  void SetOutputStreamPrincipal(nsIPrincipal* aPrincipal);
+  // Turn output capturing of this decoder on or off. If it is on, the
+  // MediaDecoderStateMachine's media sink will only play after output tracks
+  // have been set. This is to ensure that it doesn't skip over any data
+  // while the owner has intended to capture the full output, thus missing to
+  // capture some of it. The owner of the MediaDecoder is responsible for adding
+  // output tracks in a timely fashion while the output is captured.
+  void SetOutputCaptured(bool aCaptured);
+  // Add an output track. All decoder output for the track's media type will be
+  // sent to the track.
+  // Note that only one audio track and one video track is supported by
+  // MediaDecoder at this time. Passing in more of one type, or passing in a
+  // type that metadata says we are not decoding, is an error.
+  void AddOutputTrack(RefPtr<ProcessedMediaTrack> aTrack);
+  // Remove an output track added with AddOutputTrack.
+  void RemoveOutputTrack(const RefPtr<ProcessedMediaTrack>& aTrack);
+  // Update the principal for any output tracks.
+  void SetOutputTracksPrincipal(const RefPtr<nsIPrincipal>& aPrincipal);
 
   // Return the duration of the video in seconds.
   virtual double GetDuration();
 
   // Return true if the stream is infinite.
   bool IsInfinite() const;
 
   // Return true if we are currently seeking in the media resource.
@@ -390,16 +398,21 @@ class MediaDecoder : public DecoderDocto
 
   // Called when the first audio and/or video from the media file has been
   // loaded by the state machine. Call on the main thread only.
   virtual void FirstFrameLoaded(nsAutoPtr<MediaInfo> aInfo,
                                 MediaDecoderEventVisibility aEventVisibility);
 
   void SetStateMachineParameters();
 
+  // Called when MediaDecoder shutdown is finished. Subclasses use this to clean
+  // up internal structures, and unregister potential shutdown blockers when
+  // they're done.
+  virtual void ShutdownInternal();
+
   bool IsShutdown() const;
 
   // Called to notify the decoder that the duration has changed.
   virtual void DurationChanged();
 
   // State-watching manager.
   WatchManager<MediaDecoder> mWatchManager;
 
@@ -601,16 +614,30 @@ class MediaDecoder : public DecoderDocto
 
   // Volume of playback.  0.0 = muted. 1.0 = full volume.
   Canonical<double> mVolume;
 
   Canonical<bool> mPreservesPitch;
 
   Canonical<bool> mLooping;
 
+  // The device used with SetSink, or nullptr if no explicit device has been
+  // set.
+  Canonical<RefPtr<AudioDeviceInfo>> mSinkDevice;
+
+  // Whether this MediaDecoder's output is captured. When captured, all decoded
+  // data must be played out through mOutputTracks.
+  Canonical<bool> mOutputCaptured;
+
+  // Tracks that, if set, will get data routed through them.
+  Canonical<nsTArray<RefPtr<ProcessedMediaTrack>>> mOutputTracks;
+
+  // PrincipalHandle to be used when feeding data into mOutputTracks.
+  Canonical<PrincipalHandle> mOutputPrincipal;
+
   // Media duration set explicitly by JS. At present, this is only ever present
   // for MSE.
   Maybe<double> mExplicitDuration;
 
   // Set to one of the valid play states.
   // This can only be changed on the main thread while holding the decoder
   // monitor. Thus, it can be safely read while holding the decoder monitor
   // OR on the main thread.
@@ -633,16 +660,29 @@ class MediaDecoder : public DecoderDocto
   bool mIsBackgroundVideoDecodingAllowed;
 
  public:
   AbstractCanonical<double>* CanonicalVolume() { return &mVolume; }
   AbstractCanonical<bool>* CanonicalPreservesPitch() {
     return &mPreservesPitch;
   }
   AbstractCanonical<bool>* CanonicalLooping() { return &mLooping; }
+  AbstractCanonical<RefPtr<AudioDeviceInfo>>* CanonicalSinkDevice() {
+    return &mSinkDevice;
+  }
+  AbstractCanonical<bool>* CanonicalOutputCaptured() {
+    return &mOutputCaptured;
+  }
+  AbstractCanonical<nsTArray<RefPtr<ProcessedMediaTrack>>>*
+  CanonicalOutputTracks() {
+    return &mOutputTracks;
+  }
+  AbstractCanonical<PrincipalHandle>* CanonicalOutputPrincipal() {
+    return &mOutputPrincipal;
+  }
   AbstractCanonical<PlayState>* CanonicalPlayState() { return &mPlayState; }
 
  private:
   // Notify owner when the audible state changed
   void NotifyAudibleStateChanged();
 
   bool mTelemetryReported;
   const MediaContainerType mContainerType;
--- a/dom/media/MediaDecoderOwner.h
+++ b/dom/media/MediaDecoderOwner.h
@@ -134,24 +134,16 @@ class MediaDecoderOwner {
   virtual void NotifyXPCOMShutdown() = 0;
 
   // Dispatches a "encrypted" event to the HTMLMediaElement, with the
   // provided init data. Actual dispatch may be delayed until HAVE_METADATA.
   // Main thread only.
   virtual void DispatchEncrypted(const nsTArray<uint8_t>& aInitData,
                                  const nsAString& aInitDataType) = 0;
 
-  // Called by the media decoder to create audio/video tracks and add to its
-  // owner's track list.
-  virtual void ConstructMediaTracks(const MediaInfo* aInfo) = 0;
-
-  // Called by the media decoder to removes all audio/video tracks from its
-  // owner's track list.
-  virtual void RemoveMediaTracks() = 0;
-
   // Notified by the decoder that a decryption key is required before emitting
   // further output.
   virtual void NotifyWaitingForKey() {}
 
   /*
    * Methods that are used only in Gecko go here. We provide defaul
    * implementations so they can compile in Servo without modification.
    */
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -6,17 +6,16 @@
 
 #include <algorithm>
 #include <stdint.h>
 #include <utility>
 
 #include "mediasink/AudioSink.h"
 #include "mediasink/AudioSinkWrapper.h"
 #include "mediasink/DecodedStream.h"
-#include "mediasink/OutputStreamManager.h"
 #include "mediasink/VideoSink.h"
 #include "mozilla/Logging.h"
 #include "mozilla/MathAlgorithms.h"
 #include "mozilla/NotNull.h"
 #include "mozilla/SharedThreadPool.h"
 #include "mozilla/Sprintf.h"
 #include "mozilla/StaticPrefs_media.h"
 #include "mozilla/Telemetry.h"
@@ -2587,16 +2586,20 @@ RefPtr<ShutdownPromise> MediaDecoderStat
   master->mOnMediaNotSeekable.Disconnect();
 
   // Disconnect canonicals and mirrors before shutting down our task queue.
   master->mBuffered.DisconnectIfConnected();
   master->mPlayState.DisconnectIfConnected();
   master->mVolume.DisconnectIfConnected();
   master->mPreservesPitch.DisconnectIfConnected();
   master->mLooping.DisconnectIfConnected();
+  master->mSinkDevice.DisconnectIfConnected();
+  master->mOutputCaptured.DisconnectIfConnected();
+  master->mOutputTracks.DisconnectIfConnected();
+  master->mOutputPrincipal.DisconnectIfConnected();
 
   master->mDuration.DisconnectAll();
   master->mCurrentPosition.DisconnectAll();
   master->mIsAudioDataAudible.DisconnectAll();
 
   // Shut down the watch manager to stop further notifications.
   master->mWatchManager.Shutdown();
 
@@ -2622,34 +2625,38 @@ MediaDecoderStateMachine::MediaDecoderSt
                                /* aSupportsTailDispatch = */ true)),
       mWatchManager(this, mTaskQueue),
       mDispatchedStateMachine(false),
       mDelayedScheduler(mTaskQueue, true /*aFuzzy*/),
       mCurrentFrameID(0),
       mReader(new ReaderProxy(mTaskQueue, aReader)),
       mPlaybackRate(1.0),
       mAmpleAudioThreshold(detail::AMPLE_AUDIO_THRESHOLD),
-      mAudioCaptured(false),
       mMinimizePreroll(aDecoder->GetMinimizePreroll()),
       mSentFirstFrameLoadedEvent(false),
       mVideoDecodeSuspended(false),
       mVideoDecodeSuspendTimer(mTaskQueue),
-      mOutputStreamManager(nullptr),
       mVideoDecodeMode(VideoDecodeMode::Normal),
       mIsMSE(aDecoder->IsMSE()),
       mSeamlessLoopingAllowed(false),
       INIT_MIRROR(mBuffered, TimeIntervals()),
       INIT_MIRROR(mPlayState, MediaDecoder::PLAY_STATE_LOADING),
       INIT_MIRROR(mVolume, 1.0),
       INIT_MIRROR(mPreservesPitch, true),
       INIT_MIRROR(mLooping, false),
+      INIT_MIRROR(mSinkDevice, nullptr),
+      INIT_MIRROR(mOutputCaptured, false),
+      INIT_MIRROR(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
+      INIT_MIRROR(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
+      INIT_CANONICAL(mCanonicalOutputTracks,
+                     nsTArray<RefPtr<ProcessedMediaTrack>>()),
+      INIT_CANONICAL(mCanonicalOutputPrincipal, PRINCIPAL_HANDLE_NONE),
       INIT_CANONICAL(mDuration, NullableTimeUnit()),
       INIT_CANONICAL(mCurrentPosition, TimeUnit::Zero()),
-      INIT_CANONICAL(mIsAudioDataAudible, false),
-      mSetSinkRequestsCount(0) {
+      INIT_CANONICAL(mIsAudioDataAudible, false) {
   MOZ_COUNT_CTOR(MediaDecoderStateMachine);
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
 
   InitVideoQueuePrefs();
 
   DDLINKCHILD("reader", aReader);
 }
 
@@ -2666,25 +2673,39 @@ void MediaDecoderStateMachine::Initializ
   MOZ_ASSERT(OnTaskQueue());
 
   // Connect mirrors.
   mBuffered.Connect(mReader->CanonicalBuffered());
   mPlayState.Connect(aDecoder->CanonicalPlayState());
   mVolume.Connect(aDecoder->CanonicalVolume());
   mPreservesPitch.Connect(aDecoder->CanonicalPreservesPitch());
   mLooping.Connect(aDecoder->CanonicalLooping());
+  mSinkDevice.Connect(aDecoder->CanonicalSinkDevice());
+  mOutputCaptured.Connect(aDecoder->CanonicalOutputCaptured());
+  mOutputTracks.Connect(aDecoder->CanonicalOutputTracks());
+  mOutputPrincipal.Connect(aDecoder->CanonicalOutputPrincipal());
 
   // Initialize watchers.
   mWatchManager.Watch(mBuffered,
                       &MediaDecoderStateMachine::BufferedRangeUpdated);
   mWatchManager.Watch(mVolume, &MediaDecoderStateMachine::VolumeChanged);
   mWatchManager.Watch(mPreservesPitch,
                       &MediaDecoderStateMachine::PreservesPitchChanged);
   mWatchManager.Watch(mPlayState, &MediaDecoderStateMachine::PlayStateChanged);
   mWatchManager.Watch(mLooping, &MediaDecoderStateMachine::LoopingChanged);
+  mWatchManager.Watch(mOutputCaptured,
+                      &MediaDecoderStateMachine::UpdateOutputCaptured);
+  mWatchManager.Watch(mOutputTracks,
+                      &MediaDecoderStateMachine::UpdateOutputCaptured);
+  mWatchManager.Watch(mOutputTracks,
+                      &MediaDecoderStateMachine::OutputTracksChanged);
+  mWatchManager.Watch(mOutputPrincipal,
+                      &MediaDecoderStateMachine::OutputPrincipalChanged);
+
+  mMediaSink = CreateMediaSink();
 
   MOZ_ASSERT(!mStateObj);
   auto* s = new DecodeMetadataState(this);
   mStateObj.reset(s);
   s->Enter();
 }
 
 void MediaDecoderStateMachine::AudioAudibleChanged(bool aAudible) {
@@ -2692,33 +2713,34 @@ void MediaDecoderStateMachine::AudioAudi
 }
 
 MediaSink* MediaDecoderStateMachine::CreateAudioSink() {
   RefPtr<MediaDecoderStateMachine> self = this;
   auto audioSinkCreator = [self]() {
     MOZ_ASSERT(self->OnTaskQueue());
     AudioSink* audioSink =
         new AudioSink(self->mTaskQueue, self->mAudioQueue, self->GetMediaTime(),
-                      self->Info().mAudio);
+                      self->Info().mAudio, self->mSinkDevice.Ref());
 
     self->mAudibleListener = audioSink->AudibleEvent().Connect(
         self->mTaskQueue, self.get(),
         &MediaDecoderStateMachine::AudioAudibleChanged);
     return audioSink;
   };
-  return new AudioSinkWrapper(mTaskQueue, mAudioQueue, audioSinkCreator);
+  return new AudioSinkWrapper(mTaskQueue, mAudioQueue, audioSinkCreator,
+                              mVolume, mPlaybackRate, mPreservesPitch);
 }
 
-already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink(
-    bool aAudioCaptured, OutputStreamManager* aManager) {
-  MOZ_ASSERT_IF(aAudioCaptured, aManager);
+already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink() {
+  MOZ_ASSERT(OnTaskQueue());
   RefPtr<MediaSink> audioSink =
-      aAudioCaptured ? new DecodedStream(mTaskQueue, mAbstractMainThread,
-                                         mAudioQueue, mVideoQueue, aManager)
-                     : CreateAudioSink();
+      mOutputCaptured
+          ? new DecodedStream(this, mOutputTracks, mVolume, mPlaybackRate,
+                              mPreservesPitch, mAudioQueue, mVideoQueue)
+          : CreateAudioSink();
 
   RefPtr<MediaSink> mediaSink =
       new VideoSink(mTaskQueue, audioSink, mVideoQueue, mVideoFrameContainer,
                     *mFrameStats, sVideoQueueSendToCompositorSize);
   return mediaSink.forget();
 }
 
 TimeUnit MediaDecoderStateMachine::GetDecodedAudioDuration() {
@@ -2798,18 +2820,16 @@ nsresult MediaDecoderStateMachine::Init(
   mVideoQueueListener = VideoQueue().PopFrontEvent().Connect(
       mTaskQueue, this, &MediaDecoderStateMachine::OnVideoPopped);
 
   mMetadataManager.Connect(mReader->TimedMetadataEvent(), OwnerThread());
 
   mOnMediaNotSeekable = mReader->OnMediaNotSeekable().Connect(
       OwnerThread(), this, &MediaDecoderStateMachine::SetMediaNotSeekable);
 
-  mMediaSink = CreateMediaSink(mAudioCaptured, mOutputStreamManager);
-
   nsresult rv = mReader->Init();
   NS_ENSURE_SUCCESS(rv, rv);
 
   mReader->SetCanonicalDuration(&mDuration);
 
   return NS_OK;
 }
 
@@ -3335,19 +3355,16 @@ void MediaDecoderStateMachine::FinishDec
   // Get potentially updated metadata
   mReader->ReadUpdatedMetadata(mInfo.ptr());
 
   EnqueueFirstFrameLoadedEvent();
 }
 
 RefPtr<ShutdownPromise> MediaDecoderStateMachine::BeginShutdown() {
   MOZ_ASSERT(NS_IsMainThread());
-  if (mOutputStreamManager) {
-    mOutputStreamManager->Disconnect();
-  }
   return InvokeAsync(OwnerThread(), this, __func__,
                      &MediaDecoderStateMachine::Shutdown);
 }
 
 RefPtr<ShutdownPromise> MediaDecoderStateMachine::FinishShutdown() {
   MOZ_ASSERT(OnTaskQueue());
   LOG("Shutting down state machine task queue");
   return OwnerThread()->BeginShutdown();
@@ -3426,17 +3443,17 @@ void MediaDecoderStateMachine::UpdatePla
     auto t = std::min(clockTime, maxEndTime);
     // FIXME: Bug 1091422 - chained ogg files hit this assertion.
     // MOZ_ASSERT(t >= GetMediaTime());
     if (loopback || t > GetMediaTime()) {
       UpdatePlaybackPosition(t);
     }
   }
   // Note we have to update playback position before releasing the monitor.
-  // Otherwise, MediaDecoder::AddOutputStream could kick in when we are outside
+  // Otherwise, MediaDecoder::AddOutputTrack could kick in when we are outside
   // the monitor and get a staled value from GetCurrentTimeUs() which hits the
   // assertion in GetClock().
 
   int64_t delay = std::max<int64_t>(1, AUDIO_DURATION_USECS / mPlaybackRate);
   ScheduleStateMachineIn(TimeUnit::FromMicroseconds(delay));
 
   // Notify the listener as we progress in the playback offset. Note it would
   // be too intensive to send notifications for each popped audio/video sample.
@@ -3512,57 +3529,85 @@ void MediaDecoderStateMachine::Preserves
 void MediaDecoderStateMachine::LoopingChanged() {
   MOZ_ASSERT(OnTaskQueue());
   LOGV("LoopingChanged, looping=%d", mLooping.Ref());
   if (mSeamlessLoopingAllowed) {
     mStateObj->HandleLoopingChanged();
   }
 }
 
+void MediaDecoderStateMachine::UpdateOutputCaptured() {
+  MOZ_ASSERT(OnTaskQueue());
+
+  // Reset these flags so they are consistent with the status of the sink.
+  // TODO: Move these flags into MediaSink to improve cohesion so we don't need
+  // to reset these flags when switching MediaSinks.
+  mAudioCompleted = false;
+  mVideoCompleted = false;
+
+  // Stop and shut down the existing sink.
+  StopMediaSink();
+  mMediaSink->Shutdown();
+
+  // Create a new sink according to whether output is captured.
+  mMediaSink = CreateMediaSink();
+
+  // Don't buffer as much when audio is captured because we don't need to worry
+  // about high latency audio devices.
+  mAmpleAudioThreshold = mOutputCaptured ? detail::AMPLE_AUDIO_THRESHOLD / 2
+                                         : detail::AMPLE_AUDIO_THRESHOLD;
+
+  mStateObj->HandleAudioCaptured();
+}
+
+void MediaDecoderStateMachine::OutputTracksChanged() {
+  MOZ_ASSERT(OnTaskQueue());
+  LOG("OutputTracksChanged, tracks=%zu", mOutputTracks.Ref().Length());
+  mCanonicalOutputTracks = mOutputTracks;
+}
+
+void MediaDecoderStateMachine::OutputPrincipalChanged() {
+  MOZ_ASSERT(OnTaskQueue());
+  mCanonicalOutputPrincipal = mOutputPrincipal;
+}
+
 RefPtr<GenericPromise> MediaDecoderStateMachine::InvokeSetSink(
     RefPtr<AudioDeviceInfo> aSink) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(aSink);
 
-  Unused << ++mSetSinkRequestsCount;
   return InvokeAsync(OwnerThread(), this, __func__,
                      &MediaDecoderStateMachine::SetSink, aSink);
 }
 
 RefPtr<GenericPromise> MediaDecoderStateMachine::SetSink(
-    RefPtr<AudioDeviceInfo> aSink) {
+    RefPtr<AudioDeviceInfo> aSinkDevice) {
   MOZ_ASSERT(OnTaskQueue());
-  if (mAudioCaptured) {
+  if (mOutputCaptured) {
     // Not supported yet.
     return GenericPromise::CreateAndReject(NS_ERROR_ABORT, __func__);
   }
 
-  // Backup current playback parameters.
-  bool wasPlaying = mMediaSink->IsPlaying();
-
-  if (--mSetSinkRequestsCount > 0) {
-    MOZ_ASSERT(mSetSinkRequestsCount > 0);
-    return GenericPromise::CreateAndResolve(wasPlaying, __func__);
+  if (mSinkDevice.Ref() != aSinkDevice) {
+    // A new sink was set before this ran.
+    return GenericPromise::CreateAndResolve(IsPlaying(), __func__);
   }
 
-  MediaSink::PlaybackParams params = mMediaSink->GetPlaybackParams();
-  params.mSink = std::move(aSink);
-
-  if (!mMediaSink->IsStarted()) {
-    mMediaSink->SetPlaybackParams(params);
-    return GenericPromise::CreateAndResolve(false, __func__);
+  if (mMediaSink->AudioDevice() == aSinkDevice) {
+    // The sink has not changed.
+    return GenericPromise::CreateAndResolve(IsPlaying(), __func__);
   }
 
+  const bool wasPlaying = IsPlaying();
+
   // Stop and shutdown the existing sink.
   StopMediaSink();
   mMediaSink->Shutdown();
   // Create a new sink according to whether audio is captured.
-  mMediaSink = CreateMediaSink(false);
-  // Restore playback parameters.
-  mMediaSink->SetPlaybackParams(params);
+  mMediaSink = CreateMediaSink();
   // Start the new sink
   if (wasPlaying) {
     nsresult rv = StartMediaSink();
     if (NS_FAILED(rv)) {
       return GenericPromise::CreateAndReject(NS_ERROR_ABORT, __func__);
     }
   }
   return GenericPromise::CreateAndResolve(wasPlaying, __func__);
@@ -3651,53 +3696,16 @@ void MediaDecoderStateMachine::OnMediaSi
     return;
   }
 
   // Otherwise notify media decoder/element about this error for it makes
   // no sense to play an audio-only file without sound output.
   DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__));
 }
 
-void MediaDecoderStateMachine::SetAudioCaptured(bool aCaptured,
-                                                OutputStreamManager* aManager) {
-  MOZ_ASSERT(OnTaskQueue());
-
-  if (aCaptured == mAudioCaptured) {
-    return;
-  }
-
-  // Rest these flags so they are consistent with the status of the sink.
-  // TODO: Move these flags into MediaSink to improve cohesion so we don't need
-  // to reset these flags when switching MediaSinks.
-  mAudioCompleted = false;
-  mVideoCompleted = false;
-
-  // Backup current playback parameters.
-  MediaSink::PlaybackParams params = mMediaSink->GetPlaybackParams();
-
-  // Stop and shut down the existing sink.
-  StopMediaSink();
-  mMediaSink->Shutdown();
-
-  // Create a new sink according to whether audio is captured.
-  mMediaSink = CreateMediaSink(aCaptured, aManager);
-
-  // Restore playback parameters.
-  mMediaSink->SetPlaybackParams(params);
-
-  mAudioCaptured = aCaptured;
-
-  // Don't buffer as much when audio is captured because we don't need to worry
-  // about high latency audio devices.
-  mAmpleAudioThreshold = mAudioCaptured ? detail::AMPLE_AUDIO_THRESHOLD / 2
-                                        : detail::AMPLE_AUDIO_THRESHOLD;
-
-  mStateObj->HandleAudioCaptured();
-}
-
 uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const {
   MOZ_ASSERT(OnTaskQueue());
   return mReader->VideoIsHardwareAccelerated()
              ? std::max<uint32_t>(sVideoQueueHWAccelSize, MIN_VIDEO_QUEUE_SIZE)
              : std::max<uint32_t>(sVideoQueueDefaultSize, MIN_VIDEO_QUEUE_SIZE);
 }
 
 void MediaDecoderStateMachine::GetDebugInfo(
@@ -3731,96 +3739,16 @@ RefPtr<GenericPromise> MediaDecoderState
                                p->Resolve(true, __func__);
                              }),
       AbstractThread::TailDispatch);
   MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
   Unused << rv;
   return p.forget();
 }
 
-void MediaDecoderStateMachine::SetOutputStreamPrincipal(
-    nsIPrincipal* aPrincipal) {
-  MOZ_ASSERT(NS_IsMainThread());
-  mOutputStreamPrincipal = aPrincipal;
-  if (mOutputStreamManager) {
-    mOutputStreamManager->SetPrincipal(mOutputStreamPrincipal);
-  }
-}
-
-void MediaDecoderStateMachine::AddOutputStream(DOMMediaStream* aStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-  LOG("AddOutputStream aStream=%p!", aStream);
-  mOutputStreamManager->Add(aStream);
-  nsCOMPtr<nsIRunnable> r =
-      NS_NewRunnableFunction("MediaDecoderStateMachine::SetAudioCaptured",
-                             [self = RefPtr<MediaDecoderStateMachine>(this),
-                              manager = mOutputStreamManager]() {
-                               self->SetAudioCaptured(true, manager);
-                             });
-  nsresult rv = OwnerThread()->Dispatch(r.forget());
-  MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
-  Unused << rv;
-}
-
-void MediaDecoderStateMachine::RemoveOutputStream(DOMMediaStream* aStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-  LOG("RemoveOutputStream=%p!", aStream);
-  mOutputStreamManager->Remove(aStream);
-  if (mOutputStreamManager->IsEmpty()) {
-    mOutputStreamManager->Disconnect();
-    mOutputStreamManager = nullptr;
-    nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction(
-        "MediaDecoderStateMachine::SetAudioCaptured",
-        [self = RefPtr<MediaDecoderStateMachine>(this)]() {
-          self->SetAudioCaptured(false);
-        });
-    nsresult rv = OwnerThread()->Dispatch(r.forget());
-    MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
-    Unused << rv;
-  }
-}
-
-void MediaDecoderStateMachine::EnsureOutputStreamManager(
-    SharedDummyTrack* aDummyStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-  if (mOutputStreamManager) {
-    return;
-  }
-  mOutputStreamManager = new OutputStreamManager(
-      aDummyStream, mOutputStreamPrincipal, mAbstractMainThread);
-}
-
-void MediaDecoderStateMachine::EnsureOutputStreamManagerHasTracks(
-    const MediaInfo& aLoadedInfo) {
-  MOZ_ASSERT(NS_IsMainThread());
-  if (!mOutputStreamManager) {
-    return;
-  }
-  if ((!aLoadedInfo.HasAudio() ||
-       mOutputStreamManager->HasTrackType(MediaSegment::AUDIO)) &&
-      (!aLoadedInfo.HasVideo() ||
-       mOutputStreamManager->HasTrackType(MediaSegment::VIDEO))) {
-    return;
-  }
-  if (aLoadedInfo.HasAudio()) {
-    MOZ_ASSERT(!mOutputStreamManager->HasTrackType(MediaSegment::AUDIO));
-    RefPtr<SourceMediaTrack> dummy =
-        mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
-    LOG("Pre-created audio track with underlying track %p", dummy.get());
-    Unused << dummy;
-  }
-  if (aLoadedInfo.HasVideo()) {
-    MOZ_ASSERT(!mOutputStreamManager->HasTrackType(MediaSegment::VIDEO));
-    RefPtr<SourceMediaTrack> dummy =
-        mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
-    LOG("Pre-created video track with underlying track %p", dummy.get());
-    Unused << dummy;
-  }
-}
-
 class VideoQueueMemoryFunctor : public nsDequeFunctor {
  public:
   VideoQueueMemoryFunctor() : mSize(0) {}
 
   MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf);
 
   virtual void operator()(void* aObject) override {
     const VideoData* v = static_cast<const VideoData*>(aObject);
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -101,17 +101,16 @@ hardware (via AudioStream).
 #  include "nsThreadUtils.h"
 
 namespace mozilla {
 
 class AbstractThread;
 class AudioSegment;
 class DecodedStream;
 class DOMMediaStream;
-class OutputStreamManager;
 class ReaderProxy;
 class TaskQueue;
 
 extern LazyLogModule gMediaDecoderLog;
 
 struct MediaPlaybackEvent {
   enum EventType {
     PlaybackStarted,
@@ -181,29 +180,16 @@ class MediaDecoderStateMachine
   };
 
   // Returns the state machine task queue.
   TaskQueue* OwnerThread() const { return mTaskQueue; }
 
   RefPtr<GenericPromise> RequestDebugInfo(
       dom::MediaDecoderStateMachineDebugInfo& aInfo);
 
-  void SetOutputStreamPrincipal(nsIPrincipal* aPrincipal);
-  // If an OutputStreamManager does not exist, one will be created.
-  void EnsureOutputStreamManager(SharedDummyTrack* aDummyStream);
-  // If an OutputStreamManager exists, tracks matching aLoadedInfo will be
-  // created unless they already exist in the manager.
-  void EnsureOutputStreamManagerHasTracks(const MediaInfo& aLoadedInfo);
-  // Add an output stream to the output stream manager. The manager must have
-  // been created through EnsureOutputStreamManager() before this.
-  void AddOutputStream(DOMMediaStream* aStream);
-  // Remove an output stream added with AddOutputStream. If the last output
-  // stream was removed, we will also tear down the OutputStreamManager.
-  void RemoveOutputStream(DOMMediaStream* aStream);
-
   // Seeks to the decoder to aTarget asynchronously.
   RefPtr<MediaDecoder::SeekPromise> InvokeSeek(const SeekTarget& aTarget);
 
   void DispatchSetPlaybackRate(double aPlaybackRate) {
     OwnerThread()->DispatchStateChange(NewRunnableMethod<double>(
         "MediaDecoderStateMachine::SetPlaybackRate", this,
         &MediaDecoderStateMachine::SetPlaybackRate, aPlaybackRate));
   }
@@ -311,21 +297,16 @@ class MediaDecoderStateMachine
   // on the appropriate threads.
   bool OnTaskQueue() const;
 
   // Initialization that needs to happen on the task queue. This is the first
   // task that gets run on the task queue, and is dispatched from the MDSM
   // constructor immediately after the task queue is created.
   void InitializationTask(MediaDecoder* aDecoder);
 
-  // Sets the audio-captured state and recreates the media sink if needed.
-  // A manager must be passed in if setting the audio-captured state to true.
-  void SetAudioCaptured(bool aCaptured,
-                        OutputStreamManager* aManager = nullptr);
-
   RefPtr<MediaDecoder::SeekPromise> Seek(const SeekTarget& aTarget);
 
   RefPtr<ShutdownPromise> Shutdown();
 
   RefPtr<ShutdownPromise> FinishShutdown();
 
   // Update the playback position. This can result in a timeupdate event
   // and an invalidate of the frame being dispatched asynchronously if
@@ -389,16 +370,19 @@ class MediaDecoderStateMachine
   void OnVideoPopped(const RefPtr<VideoData>& aSample);
 
   void AudioAudibleChanged(bool aAudible);
 
   void VolumeChanged();
   void SetPlaybackRate(double aPlaybackRate);
   void PreservesPitchChanged();
   void LoopingChanged();
+  void UpdateOutputCaptured();
+  void OutputTracksChanged();
+  void OutputPrincipalChanged();
 
   MediaQueue<AudioData>& AudioQueue() { return mAudioQueue; }
   MediaQueue<VideoData>& VideoQueue() { return mVideoQueue; }
 
   // True if we are low in decoded audio/video data.
   // May not be invoked when mReader->UseBufferingHeuristics() is false.
   bool HasLowDecodedData();
 
@@ -433,20 +417,19 @@ class MediaDecoderStateMachine
   void UpdatePlaybackPositionInternal(const media::TimeUnit& aTime);
 
   // Update playback position and trigger next update by default time period.
   // Called on the state machine thread.
   void UpdatePlaybackPositionPeriodically();
 
   MediaSink* CreateAudioSink();
 
-  // Always create mediasink which contains an AudioSink or StreamSink inside.
-  // A manager must be passed in if aAudioCaptured is true.
-  already_AddRefed<MediaSink> CreateMediaSink(
-      bool aAudioCaptured, OutputStreamManager* aManager = nullptr);
+  // Always create mediasink which contains an AudioSink or DecodedStream
+  // inside.
+  already_AddRefed<MediaSink> CreateMediaSink();
 
   // Stops the media sink and shut it down.
   // The decoder monitor must be held with exactly one lock count.
   // Called on the state machine thread.
   void StopMediaSink();
 
   // Create and start the media sink.
   // The decoder monitor must be held with exactly one lock count.
@@ -621,21 +604,16 @@ class MediaDecoderStateMachine
   void CancelSuspendTimer();
 
   bool IsInSeamlessLooping() const;
 
   bool mCanPlayThrough = false;
 
   bool mIsLiveStream = false;
 
-  // True if we shouldn't play our audio (but still write it to any capturing
-  // streams). When this is true, the audio thread will never start again after
-  // it has stopped.
-  bool mAudioCaptured;
-
   // True if all audio frames are already rendered.
   bool mAudioCompleted = false;
 
   // True if all video frames are already rendered.
   bool mVideoCompleted = false;
 
   // True if we should not decode/preroll unnecessary samples, unless we're
   // played. "Prerolling" in this context refers to when we decode and
@@ -667,23 +645,16 @@ class MediaDecoderStateMachine
   bool mMediaSeekable = true;
 
   // True if the media is seekable only in buffered ranges.
   bool mMediaSeekableOnlyInBufferedRanges = false;
 
   // Track enabling video decode suspension via timer
   DelayedScheduler mVideoDecodeSuspendTimer;
 
-  // Data about MediaStreams that are being fed by the decoder.
-  // Main thread only.
-  RefPtr<OutputStreamManager> mOutputStreamManager;
-
-  // Principal used by output streams. Main thread only.
-  nsCOMPtr<nsIPrincipal> mOutputStreamPrincipal;
-
   // Track the current video decode mode.
   VideoDecodeMode mVideoDecodeMode;
 
   // Track the complete & error for audio/video separately
   MozPromiseRequestHolder<MediaSink::EndedPromise> mMediaSinkAudioEndedPromise;
   MozPromiseRequestHolder<MediaSink::EndedPromise> mMediaSinkVideoEndedPromise;
 
   MediaEventListener mAudioQueueListener;
@@ -728,34 +699,55 @@ class MediaDecoderStateMachine
 
   // Pitch preservation for the playback rate.
   Mirror<bool> mPreservesPitch;
 
   // Whether to seek back to the start of the media resource
   // upon reaching the end.
   Mirror<bool> mLooping;
 
+  // The device used with SetSink, or nullptr if no explicit device has been
+  // set.
+  Mirror<RefPtr<AudioDeviceInfo>> mSinkDevice;
+
+  // Whether all output should be captured into mOutputTracks. While true, the
+  // media sink will only play if there are output tracks.
+  Mirror<bool> mOutputCaptured;
+
+  // Tracks to capture data into.
+  Mirror<nsTArray<RefPtr<ProcessedMediaTrack>>> mOutputTracks;
+
+  // PrincipalHandle to feed with data captured into mOutputTracks.
+  Mirror<PrincipalHandle> mOutputPrincipal;
+
+  Canonical<nsTArray<RefPtr<ProcessedMediaTrack>>> mCanonicalOutputTracks;
+  Canonical<PrincipalHandle> mCanonicalOutputPrincipal;
+
   // Duration of the media. This is guaranteed to be non-null after we finish
   // decoding the first frame.
   Canonical<media::NullableTimeUnit> mDuration;
 
   // The time of the current frame, corresponding to the "current
   // playback position" in HTML5. This is referenced from 0, which is the
   // initial playback position.
   Canonical<media::TimeUnit> mCurrentPosition;
 
   // Used to distinguish whether the audio is producing sound.
   Canonical<bool> mIsAudioDataAudible;
 
-  // Used to count the number of pending requests to set a new sink.
-  Atomic<int> mSetSinkRequestsCount;
-
  public:
   AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() const;
 
+  AbstractCanonical<nsTArray<RefPtr<ProcessedMediaTrack>>>*
+  CanonicalOutputTracks() {
+    return &mCanonicalOutputTracks;
+  }
+  AbstractCanonical<PrincipalHandle>* CanonicalOutputPrincipal() {
+    return &mCanonicalOutputPrincipal;
+  }
   AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration() {
     return &mDuration;
   }
   AbstractCanonical<media::TimeUnit>* CanonicalCurrentPosition() {
     return &mCurrentPosition;
   }
   AbstractCanonical<bool>* CanonicalIsAudioDataAudible() {
     return &mIsAudioDataAudible;
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -1521,17 +1521,17 @@ void MediaFormatReader::NotifyNewOutput(
                     "{\"type\":\"AudioData\", \"offset\":%" PRIi64
                     ", \"time_us\":%" PRIi64 ", \"timecode_us\":%" PRIi64
                     ", \"duration_us\":%" PRIi64 ", \"frames\":%" PRIu32
                     ", \"channels\":%" PRIu32 ", \"rate\":%" PRIu32
                     ", \"bytes\":%zu}",
                     sample->mOffset, sample->mTime.ToMicroseconds(),
                     sample->mTimecode.ToMicroseconds(),
                     sample->mDuration.ToMicroseconds(),
-                    static_cast<AudioData*>(sample.get())->Frames(),
+                    sample->As<AudioData>()->Frames(),
                     sample->As<AudioData>()->mChannels,
                     sample->As<AudioData>()->mRate,
                     sample->As<AudioData>()->Data().Length());
             break;
           case MediaData::Type::VIDEO_DATA:
             DDLOGPR(DDLogCategory::Log,
                     aTrack == TrackInfo::kVideoTrack ? "decoded_video"
                                                      : "decoded_got_video!?",
@@ -2113,17 +2113,17 @@ void MediaFormatReader::Update(TrackType
           mPreviousDecodedKeyframeTime_us = output->mTime.ToMicroseconds();
         }
         nsCString error;
         mVideo.mIsHardwareAccelerated =
             mVideo.mDecoder && mVideo.mDecoder->IsHardwareAccelerated(error);
 #ifdef XP_WIN
         // D3D11_YCBCR_IMAGE images are GPU based, we try to limit the amount
         // of GPU RAM used.
-        VideoData* videoData = static_cast<VideoData*>(output.get());
+        VideoData* videoData = output->As<VideoData>();
         mVideo.mIsHardwareAccelerated =
             mVideo.mIsHardwareAccelerated ||
             (videoData->mImage &&
              videoData->mImage->GetFormat() == ImageFormat::D3D11_YCBCR_IMAGE);
 #endif
       }
     } else if (decoder.HasFatalError()) {
       LOG("Rejecting %s promise: DECODE_ERROR", TrackTypeToStr(aTrack));
@@ -2295,32 +2295,32 @@ void MediaFormatReader::Update(TrackType
 void MediaFormatReader::ReturnOutput(MediaData* aData, TrackType aTrack) {
   MOZ_ASSERT(GetDecoderData(aTrack).HasPromise());
   MOZ_DIAGNOSTIC_ASSERT(aData->mType != MediaData::Type::NULL_DATA);
   LOG("Resolved data promise for %s [%" PRId64 ", %" PRId64 "]",
       TrackTypeToStr(aTrack), aData->mTime.ToMicroseconds(),
       aData->GetEndTime().ToMicroseconds());
 
   if (aTrack == TrackInfo::kAudioTrack) {
-    AudioData* audioData = static_cast<AudioData*>(aData);
+    AudioData* audioData = aData->As<AudioData>();
 
     if (audioData->mChannels != mInfo.mAudio.mChannels ||
         audioData->mRate != mInfo.mAudio.mRate) {
       LOG("change of audio format (rate:%d->%d). "
           "This is an unsupported configuration",
           mInfo.mAudio.mRate, audioData->mRate);
       mInfo.mAudio.mRate = audioData->mRate;
       mInfo.mAudio.mChannels = audioData->mChannels;
       MutexAutoLock lock(mAudio.mMutex);
       mAudio.mWorkingInfo->GetAsAudioInfo()->mRate = audioData->mRate;
       mAudio.mWorkingInfo->GetAsAudioInfo()->mChannels = audioData->mChannels;
     }
     mAudio.ResolvePromise(audioData, __func__);
   } else if (aTrack == TrackInfo::kVideoTrack) {
-    VideoData* videoData = static_cast<VideoData*>(aData);
+    VideoData* videoData = aData->As<VideoData>();
 
     if (videoData->mDisplay != mInfo.mVideo.mDisplay) {
       LOG("change of video display size (%dx%d->%dx%d)",
           mInfo.mVideo.mDisplay.width, mInfo.mVideo.mDisplay.height,
           videoData->mDisplay.width, videoData->mDisplay.height);
       mInfo.mVideo.mDisplay = videoData->mDisplay;
       MutexAutoLock lock(mVideo.mMutex);
       mVideo.mWorkingInfo->GetAsVideoInfo()->mDisplay = videoData->mDisplay;
--- a/dom/media/MediaResource.h
+++ b/dom/media/MediaResource.h
@@ -55,18 +55,21 @@ class MediaResource : public DecoderDoct
   // Note that this means it's safe for references to this object to be
   // released on a non main thread, but the destructor will always run on
   // the main thread.
   NS_METHOD_(MozExternalRefCountType) AddRef(void);
   NS_METHOD_(MozExternalRefCountType) Release(void);
 
   // Close the resource, stop any listeners, channels, etc.
   // Cancels any currently blocking Read request and forces that request to
-  // return an error.
-  virtual nsresult Close() { return NS_OK; }
+  // return an error. This must be called (and resolve) before the MediaResource
+  // is deleted.
+  virtual RefPtr<GenericPromise> Close() {
+    return GenericPromise::CreateAndResolve(true, __func__);
+  }
 
   // These methods are called off the main thread.
   // Read up to aCount bytes from the stream. The read starts at
   // aOffset in the stream, seeking to that location initially if
   // it is not the current stream offset. The remaining arguments,
   // results and requirements are the same as per the Read method.
   virtual nsresult ReadAt(int64_t aOffset, char* aBuffer, uint32_t aCount,
                           uint32_t* aBytes) = 0;
--- a/dom/media/MediaStreamTrack.h
+++ b/dom/media/MediaStreamTrack.h
@@ -303,17 +303,17 @@ class MediaStreamTrackSource : public ns
         mSinks.RemoveElement(sink);
         continue;
       }
       sink->OverrideEnded();
     }
   }
 
   // Principal identifying who may access the contents of this source.
-  nsCOMPtr<nsIPrincipal> mPrincipal;
+  RefPtr<nsIPrincipal> mPrincipal;
 
   // Currently registered sinks.
   nsTArray<WeakPtr<Sink>> mSinks;
 
   // The label of the track we are the source of per the MediaStreamTrack spec.
   const nsString mLabel;
 
   // True if all MediaStreamTrack users have unregistered from this source and
--- a/dom/media/eme/MediaKeys.cpp
+++ b/dom/media/eme/MediaKeys.cpp
@@ -564,26 +564,16 @@ already_AddRefed<MediaKeySession> MediaK
 
 void MediaKeys::OnSessionLoaded(PromiseId aId, bool aSuccess) {
   EME_LOG("MediaKeys[%p]::OnSessionLoaded() resolve promise id=%" PRIu32, this,
           aId);
 
   ResolvePromiseWithResult(aId, aSuccess);
 }
 
-template <typename T>
-void MediaKeys::ResolvePromiseWithResult(PromiseId aId, const T& aResult) {
-  RefPtr<DetailedPromise> promise(RetrievePromise(aId));
-  if (!promise) {
-    return;
-  }
-
-  promise->MaybeResolve(aResult);
-}
-
 void MediaKeys::OnSessionClosed(MediaKeySession* aSession) {
   nsAutoString id;
   aSession->GetSessionId(id);
   mKeySessions.Remove(id);
 }
 
 already_AddRefed<MediaKeySession> MediaKeys::GetSession(
     const nsAString& aSessionId) {
--- a/dom/media/eme/MediaKeys.h
+++ b/dom/media/eme/MediaKeys.h
@@ -144,17 +144,23 @@ class MediaKeys final : public nsIDocume
   // JavaScript: MediaKeys.GetStatusForPolicy()
   already_AddRefed<Promise> GetStatusForPolicy(const MediaKeysPolicy& aPolicy,
                                                ErrorResult& aR);
   // Called by CDMProxy when CDM successfully GetStatusForPolicy.
   void ResolvePromiseWithKeyStatus(PromiseId aId,
                                    dom::MediaKeyStatus aMediaKeyStatus);
 
   template <typename T>
-  void ResolvePromiseWithResult(PromiseId aId, const T& aResult);
+  void ResolvePromiseWithResult(PromiseId aId, const T& aResult) {
+    RefPtr<DetailedPromise> promise(RetrievePromise(aId));
+    if (!promise) {
+      return;
+    }
+    promise->MaybeResolve(aResult);
+  }
 
  private:
   // Instantiate CDMProxy instance.
   // It could be MediaDrmCDMProxy (Widevine on Fennec) or ChromiumCDMProxy (the
   // rest).
   already_AddRefed<CDMProxy> CreateCDMProxy(nsISerialEventTarget* aMainThread);
 
   // Removes promise from mPromises, and returns it.
--- a/dom/media/mediasink/AudioSink.cpp
+++ b/dom/media/mediasink/AudioSink.cpp
@@ -1,16 +1,17 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioSink.h"
 #include "AudioConverter.h"
+#include "AudioDeviceInfo.h"
 #include "MediaQueue.h"
 #include "VideoUtils.h"
 #include "mozilla/CheckedInt.h"
 #include "mozilla/DebugOnly.h"
 #include "mozilla/IntegerPrintfMacros.h"
 #include "mozilla/StaticPrefs_media.h"
 #include "nsPrintfCString.h"
 
@@ -29,19 +30,21 @@ static const int64_t AUDIO_FUZZ_FRAMES =
 
 // Amount of audio frames we will be processing ahead of use
 static const int32_t LOW_AUDIO_USECS = 300000;
 
 using media::TimeUnit;
 
 AudioSink::AudioSink(AbstractThread* aThread,
                      MediaQueue<AudioData>& aAudioQueue,
-                     const TimeUnit& aStartTime, const AudioInfo& aInfo)
+                     const TimeUnit& aStartTime, const AudioInfo& aInfo,
+                     AudioDeviceInfo* aAudioDevice)
     : mStartTime(aStartTime),
       mInfo(aInfo),
+      mAudioDevice(aAudioDevice),
       mPlaying(true),
       mMonitor("AudioSink"),
       mWritten(0),
       mErrored(false),
       mPlaybackComplete(false),
       mOwnerThread(aThread),
       mProcessedQueueLength(0),
       mFramesParsed(0),
@@ -178,17 +181,17 @@ nsresult AudioSink::InitializeAudioStrea
   AudioConfig::ChannelLayout::ChannelMap channelMap =
       mConverter ? mConverter->OutputConfig().Layout().Map()
                  : AudioConfig::ChannelLayout(mOutputChannels).Map();
   // The layout map used here is already processed by mConverter with
   // mOutputChannels into SMPTE format, so there is no need to worry if
   // StaticPrefs::accessibility_monoaudio_enable() or
   // StaticPrefs::media_forcestereo_enabled() is applied.
   nsresult rv = mAudioStream->Init(mOutputChannels, channelMap, mOutputRate,
-                                   aParams.mSink);
+                                   mAudioDevice);
   if (NS_FAILED(rv)) {
     mAudioStream->Shutdown();
     mAudioStream = nullptr;
     return rv;
   }
 
   // Set playback params before calling Start() so they can take effect
   // as soon as the 1st DataCallback of the AudioStream fires.
--- a/dom/media/mediasink/AudioSink.h
+++ b/dom/media/mediasink/AudioSink.h
@@ -18,21 +18,30 @@
 #include "mozilla/RefPtr.h"
 #include "nsISupportsImpl.h"
 
 namespace mozilla {
 
 class AudioConverter;
 
 class AudioSink : private AudioStream::DataSource {
-  using PlaybackParams = MediaSink::PlaybackParams;
+ public:
+  struct PlaybackParams {
+    PlaybackParams(double aVolume, double aPlaybackRate, bool aPreservesPitch)
+        : mVolume(aVolume),
+          mPlaybackRate(aPlaybackRate),
+          mPreservesPitch(aPreservesPitch) {}
+    double mVolume;
+    double mPlaybackRate;
+    bool mPreservesPitch;
+  };
 
- public:
   AudioSink(AbstractThread* aThread, MediaQueue<AudioData>& aAudioQueue,
-            const media::TimeUnit& aStartTime, const AudioInfo& aInfo);
+            const media::TimeUnit& aStartTime, const AudioInfo& aInfo,
+            AudioDeviceInfo* aAudioDevice);
 
   ~AudioSink();
 
   // Return a promise which will be resolved when AudioSink
   // finishes playing, or rejected if any error.
   nsresult Init(const PlaybackParams& aParams,
                 RefPtr<MediaSink::EndedPromise>& aEndedPromise);
 
@@ -54,16 +63,18 @@ class AudioSink : private AudioStream::D
   void SetPlaybackRate(double aPlaybackRate);
   void SetPreservesPitch(bool aPreservesPitch);
   void SetPlaying(bool aPlaying);
 
   MediaEventSource<bool>& AudibleEvent() { return mAudibleEvent; }
 
   void GetDebugInfo(dom::MediaSinkDebugInfo& aInfo);
 
+  const RefPtr<AudioDeviceInfo>& AudioDevice() { return mAudioDevice; }
+
  private:
   // Allocate and initialize mAudioStream. Returns NS_OK on success.
   nsresult InitializeAudioStream(const PlaybackParams& aParams);
 
   // Interface of AudioStream::DataSource.
   // Called on the callback thread of cubeb.
   UniquePtr<AudioStream::Chunk> PopFrames(uint32_t aFrames) override;
   bool Ended() const override;
@@ -82,16 +93,20 @@ class AudioSink : private AudioStream::D
 
   // Keep the last good position returned from the audio stream. Used to ensure
   // position returned by GetPosition() is mono-increasing in spite of audio
   // stream error. Used on the task queue of MDSM only.
   media::TimeUnit mLastGoodPosition;
 
   const AudioInfo mInfo;
 
+  // The output device this AudioSink is playing data to. The system's default
+  // device is used if this is null.
+  const RefPtr<AudioDeviceInfo> mAudioDevice;
+
   // Used on the task queue of MDSM only.
   bool mPlaying;
 
   MozPromiseHolder<MediaSink::EndedPromise> mEndedPromise;
 
   /*
    * Members to implement AudioStream::DataSource.
    * Used on the callback thread of cubeb.
--- a/dom/media/mediasink/AudioSinkWrapper.cpp
+++ b/dom/media/mediasink/AudioSinkWrapper.cpp
@@ -16,31 +16,16 @@ using media::TimeUnit;
 AudioSinkWrapper::~AudioSinkWrapper() {}
 
 void AudioSinkWrapper::Shutdown() {
   AssertOwnerThread();
   MOZ_ASSERT(!mIsStarted, "Must be called after playback stopped.");
   mCreator = nullptr;
 }
 
-const MediaSink::PlaybackParams& AudioSinkWrapper::GetPlaybackParams() const {
-  AssertOwnerThread();
-  return mParams;
-}
-
-void AudioSinkWrapper::SetPlaybackParams(const PlaybackParams& aParams) {
-  AssertOwnerThread();
-  if (mAudioSink) {
-    mAudioSink->SetVolume(aParams.mVolume);
-    mAudioSink->SetPlaybackRate(aParams.mPlaybackRate);
-    mAudioSink->SetPreservesPitch(aParams.mPreservesPitch);
-  }
-  mParams = aParams;
-}
-
 RefPtr<MediaSink::EndedPromise> AudioSinkWrapper::OnEnded(TrackType aType) {
   AssertOwnerThread();
   MOZ_ASSERT(mIsStarted, "Must be called after playback starts.");
   if (aType == TrackInfo::kAudioTrack) {
     return mEndedPromise;
   }
   return nullptr;
 }
@@ -149,16 +134,21 @@ void AudioSinkWrapper::SetPlaying(bool a
     // Remember how long we've played.
     mPlayDuration = GetPosition();
     // mPlayStartTime must be updated later since GetPosition()
     // depends on the value of mPlayStartTime.
     mPlayStartTime = TimeStamp();
   }
 }
 
+double AudioSinkWrapper::PlaybackRate() const {
+  AssertOwnerThread();
+  return mParams.mPlaybackRate;
+}
+
 nsresult AudioSinkWrapper::Start(const TimeUnit& aStartTime,
                                  const MediaInfo& aInfo) {
   AssertOwnerThread();
   MOZ_ASSERT(!mIsStarted, "playback already started.");
 
   mIsStarted = true;
   mPlayDuration = aStartTime;
   mPlayStartTime = TimeStamp::Now();
--- a/dom/media/mediasink/AudioSinkWrapper.h
+++ b/dom/media/mediasink/AudioSinkWrapper.h
@@ -19,16 +19,18 @@ class AudioSink;
 class MediaData;
 template <class T>
 class MediaQueue;
 
 /**
  * A wrapper around AudioSink to provide the interface of MediaSink.
  */
 class AudioSinkWrapper : public MediaSink {
+  using PlaybackParams = AudioSink::PlaybackParams;
+
   // An AudioSink factory.
   class Creator {
    public:
     virtual ~Creator() {}
     virtual AudioSink* Create() = 0;
   };
 
   // Wrap around a function object which creates AudioSinks.
@@ -41,39 +43,40 @@ class AudioSinkWrapper : public MediaSin
    private:
     Function mFunction;
   };
 
  public:
   template <typename Function>
   AudioSinkWrapper(AbstractThread* aOwnerThread,
                    const MediaQueue<AudioData>& aAudioQueue,
-                   const Function& aFunc)
+                   const Function& aFunc, double aVolume, double aPlaybackRate,
+                   bool aPreservesPitch)
       : mOwnerThread(aOwnerThread),
         mCreator(new CreatorImpl<Function>(aFunc)),
         mIsStarted(false),
+        mParams(aVolume, aPlaybackRate, aPreservesPitch),
         // Give an invalid value to facilitate debug if used before playback
         // starts.
         mPlayDuration(media::TimeUnit::Invalid()),
         mAudioEnded(true),
         mAudioQueue(aAudioQueue) {}
 
-  const PlaybackParams& GetPlaybackParams() const override;
-  void SetPlaybackParams(const PlaybackParams& aParams) override;
-
   RefPtr<EndedPromise> OnEnded(TrackType aType) override;
   media::TimeUnit GetEndTime(TrackType aType) const override;
   media::TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
   bool HasUnplayedFrames(TrackType aType) const override;
 
   void SetVolume(double aVolume) override;
   void SetPlaybackRate(double aPlaybackRate) override;
   void SetPreservesPitch(bool aPreservesPitch) override;
   void SetPlaying(bool aPlaying) override;
 
+  double PlaybackRate() const override;
+
   nsresult Start(const media::TimeUnit& aStartTime,
                  const MediaInfo& aInfo) override;
   void Stop() override;
   bool IsStarted() const override;
   bool IsPlaying() const override;
 
   void Shutdown() override;
 
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -2,20 +2,20 @@
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "DecodedStream.h"
 #include "AudioSegment.h"
 #include "MediaData.h"
+#include "MediaDecoderStateMachine.h"
 #include "MediaQueue.h"
 #include "MediaTrackGraph.h"
 #include "MediaTrackListener.h"
-#include "OutputStreamManager.h"
 #include "SharedBuffer.h"
 #include "VideoSegment.h"
 #include "VideoUtils.h"
 #include "mozilla/AbstractThread.h"
 #include "mozilla/CheckedInt.h"
 #include "mozilla/SyncRunnable.h"
 #include "mozilla/gfx/Point.h"
 #include "nsProxyRelease.h"
@@ -49,75 +49,73 @@ class DecodedStreamTrackListener : publi
   const RefPtr<DecodedStreamGraphListener> mGraphListener;
   const RefPtr<SourceMediaTrack> mTrack;
 };
 
 class DecodedStreamGraphListener {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStreamGraphListener)
  public:
   DecodedStreamGraphListener(
-      SourceMediaTrack* aAudioStream,
+      SourceMediaTrack* aAudioTrack,
       MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedHolder,
-      SourceMediaTrack* aVideoStream,
-      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder,
-      AbstractThread* aMainThread)
+      SourceMediaTrack* aVideoTrack,
+      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder)
       : mAudioTrackListener(
-            aAudioStream
-                ? MakeRefPtr<DecodedStreamTrackListener>(this, aAudioStream)
+            aAudioTrack
+                ? MakeRefPtr<DecodedStreamTrackListener>(this, aAudioTrack)
                 : nullptr),
         mAudioEndedHolder(std::move(aAudioEndedHolder)),
         mVideoTrackListener(
-            aVideoStream
-                ? MakeRefPtr<DecodedStreamTrackListener>(this, aVideoStream)
+            aVideoTrack
+                ? MakeRefPtr<DecodedStreamTrackListener>(this, aVideoTrack)
                 : nullptr),
         mVideoEndedHolder(std::move(aVideoEndedHolder)),
-        mAudioStream(aAudioStream),
-        mVideoStream(aVideoStream),
-        mAbstractMainThread(aMainThread) {
+        mAudioTrack(aAudioTrack),
+        mVideoTrack(aVideoTrack) {
     MOZ_ASSERT(NS_IsMainThread());
     if (mAudioTrackListener) {
-      mAudioStream->AddListener(mAudioTrackListener);
+      mAudioTrack->AddListener(mAudioTrackListener);
     } else {
       mAudioEnded = true;
       mAudioEndedHolder.ResolveIfExists(true, __func__);
     }
 
     if (mVideoTrackListener) {
-      mVideoStream->AddListener(mVideoTrackListener);
+      mVideoTrack->AddListener(mVideoTrackListener);
     } else {
       mVideoEnded = true;
       mVideoEndedHolder.ResolveIfExists(true, __func__);
     }
   }
 
   void NotifyOutput(SourceMediaTrack* aTrack, TrackTime aCurrentTrackTime) {
-    if (aTrack == mAudioStream) {
+    if (aTrack == mAudioTrack) {
       if (aCurrentTrackTime >= mAudioEnd) {
-        mAudioStream->End();
+        mAudioTrack->End();
       }
-    } else if (aTrack == mVideoStream) {
+    } else if (aTrack == mVideoTrack) {
       if (aCurrentTrackTime >= mVideoEnd) {
-        mVideoStream->End();
+        mVideoTrack->End();
       }
     } else {
       MOZ_CRASH("Unexpected source track");
     }
-    if (aTrack != mAudioStream && mAudioStream && !mAudioEnded) {
+    if (aTrack != mAudioTrack && mAudioTrack && !mAudioEnded) {
       // Only audio playout drives the clock forward, if present and live.
       return;
     }
-    MOZ_ASSERT_IF(aTrack == mAudioStream, !mAudioEnded);
-    MOZ_ASSERT_IF(aTrack == mVideoStream, !mVideoEnded);
+    MOZ_ASSERT_IF(aTrack == mAudioTrack, !mAudioEnded);
+    MOZ_ASSERT_IF(aTrack == mVideoTrack, !mVideoEnded);
     mOnOutput.Notify(aTrack->TrackTimeToMicroseconds(aCurrentTrackTime));
   }
 
   void NotifyEnded(SourceMediaTrack* aTrack) {
-    if (aTrack == mAudioStream) {
+    if (aTrack == mAudioTrack) {
       mAudioEnded = true;
-    } else if (aTrack == mVideoStream) {
+    } else if (aTrack == mVideoTrack) {
       mVideoEnded = true;
     } else {
       MOZ_CRASH("Unexpected source track");
     }
     aTrack->Graph()->DispatchToMainThreadStableState(
         NewRunnableMethod<RefPtr<SourceMediaTrack>>(
             "DecodedStreamGraphListener::DoNotifyTrackEnded", this,
             &DecodedStreamGraphListener::DoNotifyTrackEnded, aTrack));
@@ -140,49 +138,49 @@ class DecodedStreamGraphListener {
    * to a MediaStreamTrack ending on main thread (it uses another listener)
    * before the listeners to render the track get added, potentially meaning a
    * media element doesn't progress before reaching the end although data was
    * available.
    *
    * Callable from any thread.
    */
   void EndTrackAt(SourceMediaTrack* aTrack, TrackTime aEnd) {
-    if (aTrack == mAudioStream) {
+    if (aTrack == mAudioTrack) {
       mAudioEnd = aEnd;
-    } else if (aTrack == mVideoStream) {
+    } else if (aTrack == mVideoTrack) {
       mVideoEnd = aEnd;
     } else {
       MOZ_CRASH("Unexpected source track");
     }
   }
 
   void DoNotifyTrackEnded(SourceMediaTrack* aTrack) {
     MOZ_ASSERT(NS_IsMainThread());
-    if (aTrack == mAudioStream) {
+    if (aTrack == mAudioTrack) {
       mAudioEndedHolder.ResolveIfExists(true, __func__);
-    } else if (aTrack == mVideoStream) {
+    } else if (aTrack == mVideoTrack) {
       mVideoEndedHolder.ResolveIfExists(true, __func__);
     } else {
       MOZ_CRASH("Unexpected source track");
     }
   }
 
   void Forget() {
     MOZ_ASSERT(NS_IsMainThread());
 
-    if (mAudioTrackListener && !mAudioStream->IsDestroyed()) {
-      mAudioStream->End();
-      mAudioStream->RemoveListener(mAudioTrackListener);
+    if (mAudioTrackListener && !mAudioTrack->IsDestroyed()) {
+      mAudioTrack->End();
+      mAudioTrack->RemoveListener(mAudioTrackListener);
     }
     mAudioTrackListener = nullptr;
     mAudioEndedHolder.ResolveIfExists(false, __func__);
 
-    if (mVideoTrackListener && !mVideoStream->IsDestroyed()) {
-      mVideoStream->End();
-      mVideoStream->RemoveListener(mVideoTrackListener);
+    if (mVideoTrackListener && !mVideoTrack->IsDestroyed()) {
+      mVideoTrack->End();
+      mVideoTrack->RemoveListener(mVideoTrackListener);
     }
     mVideoTrackListener = nullptr;
     mVideoEndedHolder.ResolveIfExists(false, __func__);
   }
 
   MediaEventSource<int64_t>& OnOutput() { return mOnOutput; }
 
  private:
@@ -199,21 +197,20 @@ class DecodedStreamGraphListener {
   RefPtr<DecodedStreamTrackListener> mVideoTrackListener;
   MozPromiseHolder<DecodedStream::EndedPromise> mVideoEndedHolder;
 
   // Graph thread only.
   bool mAudioEnded = false;
   bool mVideoEnded = false;
 
   // Any thread.
-  const RefPtr<SourceMediaTrack> mAudioStream;
-  const RefPtr<SourceMediaTrack> mVideoStream;
+  const RefPtr<SourceMediaTrack> mAudioTrack;
+  const RefPtr<SourceMediaTrack> mVideoTrack;
   Atomic<TrackTime> mAudioEnd{TRACK_TIME_MAX};
   Atomic<TrackTime> mVideoEnd{TRACK_TIME_MAX};
-  const RefPtr<AbstractThread> mAbstractMainThread;
 };
 
 DecodedStreamTrackListener::DecodedStreamTrackListener(
     DecodedStreamGraphListener* aGraphListener, SourceMediaTrack* aTrack)
     : mGraphListener(aGraphListener), mTrack(aTrack) {}
 
 void DecodedStreamTrackListener::NotifyOutput(MediaTrackGraph* aGraph,
                                               TrackTime aCurrentTrackTime) {
@@ -221,31 +218,30 @@ void DecodedStreamTrackListener::NotifyO
 }
 
 void DecodedStreamTrackListener::NotifyEnded(MediaTrackGraph* aGraph) {
   mGraphListener->NotifyEnded(mTrack);
 }
 
 /**
  * All MediaStream-related data is protected by the decoder's monitor. We have
- * at most one DecodedStreamData per MediaDecoder. Its tracks are used as
+ * at most one DecodedStreamData per MediaDecoder. XXX Its tracks are used as
  * inputs for all output tracks created by OutputStreamManager after calls to
  * captureStream/UntilEnded. Seeking creates new source tracks, as does
  * replaying after the input as ended. In the latter case, the new sources are
  * not connected to tracks created by captureStreamUntilEnded.
  */
 class DecodedStreamData final {
  public:
   DecodedStreamData(
-      OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
-      RefPtr<SourceMediaTrack> aAudioStream,
-      RefPtr<SourceMediaTrack> aVideoStream,
+      PlaybackInfoInit&& aInit, MediaTrackGraph* aGraph,
+      RefPtr<ProcessedMediaTrack> aAudioOutputTrack,
+      RefPtr<ProcessedMediaTrack> aVideoOutputTrack,
       MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
-      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
-      AbstractThread* aMainThread);
+      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise);
   ~DecodedStreamData();
   MediaEventSource<int64_t>& OnOutput();
   void Forget();
   void GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo);
 
   void WriteVideoToSegment(layers::Image* aImage, const TimeUnit& aStart,
                            const TimeUnit& aEnd,
                            const gfx::IntSize& aIntrinsicSize,
@@ -253,19 +249,19 @@ class DecodedStreamData final {
                            const PrincipalHandle& aPrincipalHandle);
 
   /* The following group of fields are protected by the decoder's monitor
    * and can be read or written on any thread.
    */
   // Count of audio frames written to the track
   int64_t mAudioFramesWritten;
   // Count of video frames written to the track in the track's rate
-  TrackTime mVideoStreamWritten;
+  TrackTime mVideoTrackWritten;
   // Count of audio frames written to the track in the track's rate
-  TrackTime mAudioStreamWritten;
+  TrackTime mAudioTrackWritten;
   // mNextAudioTime is the end timestamp for the last packet sent to the track.
   // Therefore audio packets starting at or after this time need to be copied
   // to the output track.
   TimeUnit mNextAudioTime;
   // mLastVideoStartTime is the start timestamp for the last packet sent to the
   // track. Therefore video packets starting after this time need to be copied
   // to the output track.
   NullableTimeUnit mLastVideoStartTime;
@@ -278,108 +274,121 @@ class DecodedStreamData final {
   TimeStamp mLastVideoTimeStamp;
   // The last video image sent to the track. Useful if we need to replicate
   // the image.
   RefPtr<layers::Image> mLastVideoImage;
   gfx::IntSize mLastVideoImageDisplaySize;
   bool mHaveSentFinishAudio;
   bool mHaveSentFinishVideo;
 
-  const RefPtr<SourceMediaTrack> mAudioStream;
-  const RefPtr<SourceMediaTrack> mVideoStream;
+  const RefPtr<SourceMediaTrack> mAudioTrack;
+  const RefPtr<SourceMediaTrack> mVideoTrack;
+  const RefPtr<ProcessedMediaTrack> mAudioOutputTrack;
+  const RefPtr<ProcessedMediaTrack> mVideoOutputTrack;
+  const RefPtr<MediaInputPort> mAudioPort;
+  const RefPtr<MediaInputPort> mVideoPort;
   const RefPtr<DecodedStreamGraphListener> mListener;
-
-  const RefPtr<OutputStreamManager> mOutputStreamManager;
-  const RefPtr<AbstractThread> mAbstractMainThread;
 };
 
 DecodedStreamData::DecodedStreamData(
-    OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
-    RefPtr<SourceMediaTrack> aAudioStream,
-    RefPtr<SourceMediaTrack> aVideoStream,
+    PlaybackInfoInit&& aInit, MediaTrackGraph* aGraph,
+    RefPtr<ProcessedMediaTrack> aAudioOutputTrack,
+    RefPtr<ProcessedMediaTrack> aVideoOutputTrack,
     MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
-    MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
-    AbstractThread* aMainThread)
+    MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise)
     : mAudioFramesWritten(0),
-      mVideoStreamWritten(0),
-      mAudioStreamWritten(0),
+      mVideoTrackWritten(0),
+      mAudioTrackWritten(0),
       mNextAudioTime(aInit.mStartTime),
       mHaveSentFinishAudio(false),
       mHaveSentFinishVideo(false),
-      mAudioStream(std::move(aAudioStream)),
-      mVideoStream(std::move(aVideoStream)),
+      mAudioTrack(aInit.mInfo.HasAudio()
+                      ? aGraph->CreateSourceTrack(MediaSegment::AUDIO)
+                      : nullptr),
+      mVideoTrack(aInit.mInfo.HasVideo()
+                      ? aGraph->CreateSourceTrack(MediaSegment::VIDEO)
+                      : nullptr),
+      mAudioOutputTrack(std::move(aAudioOutputTrack)),
+      mVideoOutputTrack(std::move(aVideoOutputTrack)),
+      mAudioPort((mAudioOutputTrack && mAudioTrack)
+                     ? mAudioOutputTrack->AllocateInputPort(mAudioTrack)
+                     : nullptr),
+      mVideoPort((mVideoOutputTrack && mVideoTrack)
+                     ? mVideoOutputTrack->AllocateInputPort(mVideoTrack)
+                     : nullptr),
       // DecodedStreamGraphListener will resolve these promises.
       mListener(MakeRefPtr<DecodedStreamGraphListener>(
-          mAudioStream, std::move(aAudioEndedPromise), mVideoStream,
-          std::move(aVideoEndedPromise), aMainThread)),
-      mOutputStreamManager(aOutputStreamManager),
-      mAbstractMainThread(aMainThread) {
+          mAudioTrack, std::move(aAudioEndedPromise), mVideoTrack,
+          std::move(aVideoEndedPromise))) {
   MOZ_ASSERT(NS_IsMainThread());
-  MOZ_DIAGNOSTIC_ASSERT(
-      mOutputStreamManager->HasTracks(mAudioStream, mVideoStream),
-      "Tracks must be pre-created on main thread");
+  if (mAudioTrack) {
+    mAudioTrack->SetAppendDataSourceRate(aInit.mInfo.mAudio.mRate);
+  }
 }
 
-DecodedStreamData::~DecodedStreamData() { MOZ_ASSERT(NS_IsMainThread()); }
+DecodedStreamData::~DecodedStreamData() {
+  MOZ_ASSERT(NS_IsMainThread());
+  if (mAudioTrack) {
+    mAudioTrack->Destroy();
+  }
+  if (mVideoTrack) {
+    mVideoTrack->Destroy();
+  }
+  if (mAudioPort) {
+    mAudioPort->Destroy();
+  }
+  if (mVideoPort) {
+    mVideoPort->Destroy();
+  }
+}
 
 MediaEventSource<int64_t>& DecodedStreamData::OnOutput() {
   return mListener->OnOutput();
 }
 
 void DecodedStreamData::Forget() { mListener->Forget(); }
 
 void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) {
   aInfo.mInstance = NS_ConvertUTF8toUTF16(nsPrintfCString("%p", this));
   aInfo.mAudioFramesWritten = mAudioFramesWritten;
-  aInfo.mStreamAudioWritten = mAudioStreamWritten;
+  aInfo.mStreamAudioWritten = mAudioTrackWritten;
   aInfo.mNextAudioTime = mNextAudioTime.ToMicroseconds();
   aInfo.mLastVideoStartTime =
       mLastVideoStartTime.valueOr(TimeUnit::FromMicroseconds(-1))
           .ToMicroseconds();
   aInfo.mLastVideoEndTime =
       mLastVideoEndTime.valueOr(TimeUnit::FromMicroseconds(-1))
           .ToMicroseconds();
   aInfo.mHaveSentFinishAudio = mHaveSentFinishAudio;
   aInfo.mHaveSentFinishVideo = mHaveSentFinishVideo;
 }
 
-DecodedStream::DecodedStream(AbstractThread* aOwnerThread,
-                             AbstractThread* aMainThread,
-                             MediaQueue<AudioData>& aAudioQueue,
-                             MediaQueue<VideoData>& aVideoQueue,
-                             OutputStreamManager* aOutputStreamManager)
-    : mOwnerThread(aOwnerThread),
-      mAbstractMainThread(aMainThread),
-      mOutputStreamManager(aOutputStreamManager),
+DecodedStream::DecodedStream(
+    MediaDecoderStateMachine* aStateMachine,
+    nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks, double aVolume,
+    double aPlaybackRate, bool aPreservesPitch,
+    MediaQueue<AudioData>& aAudioQueue, MediaQueue<VideoData>& aVideoQueue)
+    : mOwnerThread(aStateMachine->OwnerThread()),
       mWatchManager(this, mOwnerThread),
       mPlaying(false, "DecodedStream::mPlaying"),
-      mPrincipalHandle(aOwnerThread, PRINCIPAL_HANDLE_NONE,
+      mPrincipalHandle(aStateMachine->OwnerThread(), PRINCIPAL_HANDLE_NONE,
                        "DecodedStream::mPrincipalHandle (Mirror)"),
+      mOutputTracks(std::move(aOutputTracks)),
+      mVolume(aVolume),
+      mPlaybackRate(aPlaybackRate),
+      mPreservesPitch(aPreservesPitch),
       mAudioQueue(aAudioQueue),
       mVideoQueue(aVideoQueue) {
-  mPrincipalHandle.Connect(mOutputStreamManager->CanonicalPrincipalHandle());
+  mPrincipalHandle.Connect(aStateMachine->CanonicalOutputPrincipal());
 
   mWatchManager.Watch(mPlaying, &DecodedStream::PlayingChanged);
-  PlayingChanged();  // Notify of the initial state
 }
 
 DecodedStream::~DecodedStream() {
   MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended.");
-  NS_ProxyRelease("DecodedStream::mOutputStreamManager", mAbstractMainThread,
-                  do_AddRef(mOutputStreamManager));
-}
-
-const MediaSink::PlaybackParams& DecodedStream::GetPlaybackParams() const {
-  AssertOwnerThread();
-  return mParams;
-}
-
-void DecodedStream::SetPlaybackParams(const PlaybackParams& aParams) {
-  AssertOwnerThread();
-  mParams = aParams;
 }
 
 RefPtr<DecodedStream::EndedPromise> DecodedStream::OnEnded(TrackType aType) {
   AssertOwnerThread();
   MOZ_ASSERT(mStartTime.isSome());
 
   if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio()) {
     return mAudioEndedPromise;
@@ -388,89 +397,87 @@ RefPtr<DecodedStream::EndedPromise> Deco
   }
   return nullptr;
 }
 
 nsresult DecodedStream::Start(const TimeUnit& aStartTime,
                               const MediaInfo& aInfo) {
   AssertOwnerThread();
   MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");
+  MOZ_DIAGNOSTIC_ASSERT(!mOutputTracks.IsEmpty());
 
   mStartTime.emplace(aStartTime);
   mLastOutputTime = TimeUnit::Zero();
   mInfo = aInfo;
   mPlaying = true;
   ConnectListener();
 
   class R : public Runnable {
     typedef MozPromiseHolder<MediaSink::EndedPromise> Promise;
 
    public:
-    R(PlaybackInfoInit&& aInit, Promise&& aAudioEndedPromise,
-      Promise&& aVideoEndedPromise, OutputStreamManager* aManager,
-      AbstractThread* aMainThread)
+    R(PlaybackInfoInit&& aInit,
+      nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
+      Promise&& aAudioEndedPromise, Promise&& aVideoEndedPromise)
         : Runnable("CreateDecodedStreamData"),
           mInit(std::move(aInit)),
+          mOutputTracks(std::move(aOutputTracks)),
           mAudioEndedPromise(std::move(aAudioEndedPromise)),
-          mVideoEndedPromise(std::move(aVideoEndedPromise)),
-          mOutputStreamManager(aManager),
-          mAbstractMainThread(aMainThread) {}
+          mVideoEndedPromise(std::move(aVideoEndedPromise)) {}
     NS_IMETHOD Run() override {
       MOZ_ASSERT(NS_IsMainThread());
-      // No need to create a source track when there are no output tracks.
-      // This happens when RemoveOutput() is called immediately after
-      // StartPlayback().
-      if (mOutputStreamManager->IsEmpty()) {
-        // Resolve the promise to indicate the end of playback.
-        mAudioEndedPromise.Resolve(true, __func__);
-        mVideoEndedPromise.Resolve(true, __func__);
+      RefPtr<ProcessedMediaTrack> audioOutputTrack;
+      RefPtr<ProcessedMediaTrack> videoOutputTrack;
+      for (const auto& track : mOutputTracks) {
+        if (track->mType == MediaSegment::AUDIO) {
+          MOZ_DIAGNOSTIC_ASSERT(
+              !audioOutputTrack,
+              "We only support capturing to one output track per kind");
+          audioOutputTrack = track;
+        } else if (track->mType == MediaSegment::VIDEO) {
+          MOZ_DIAGNOSTIC_ASSERT(
+              !videoOutputTrack,
+              "We only support capturing to one output track per kind");
+          videoOutputTrack = track;
+        } else {
+          MOZ_CRASH("Unknown media type");
+        }
+      }
+      if ((!audioOutputTrack && !videoOutputTrack) ||
+          (audioOutputTrack && audioOutputTrack->IsDestroyed()) ||
+          (videoOutputTrack && videoOutputTrack->IsDestroyed())) {
+        // No output tracks yet, or they're going away. Halt playback by not
+        // creating DecodedStreamData. MDSM will try again with a new
+        // DecodedStream sink when tracks are available.
         return NS_OK;
       }
-      RefPtr<SourceMediaTrack> audioStream =
-          mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::AUDIO);
-      if (mInit.mInfo.HasAudio() && !audioStream) {
-        MOZ_DIAGNOSTIC_ASSERT(
-            !mOutputStreamManager->HasTrackType(MediaSegment::AUDIO));
-        audioStream = mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
-      }
-      if (audioStream) {
-        audioStream->SetAppendDataSourceRate(mInit.mInfo.mAudio.mRate);
-      }
-      RefPtr<SourceMediaTrack> videoStream =
-          mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::VIDEO);
-      if (mInit.mInfo.HasVideo() && !videoStream) {
-        MOZ_DIAGNOSTIC_ASSERT(
-            !mOutputStreamManager->HasTrackType(MediaSegment::VIDEO));
-        videoStream = mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
-      }
       mData = MakeUnique<DecodedStreamData>(
-          mOutputStreamManager, std::move(mInit), std::move(audioStream),
-          std::move(videoStream), std::move(mAudioEndedPromise),
-          std::move(mVideoEndedPromise), mAbstractMainThread);
+          std::move(mInit), mOutputTracks[0]->Graph(),
+          std::move(audioOutputTrack), std::move(videoOutputTrack),
+          std::move(mAudioEndedPromise), std::move(mVideoEndedPromise));
       return NS_OK;
     }
     UniquePtr<DecodedStreamData> ReleaseData() { return std::move(mData); }
 
    private:
     PlaybackInfoInit mInit;
+    const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
     Promise mAudioEndedPromise;
     Promise mVideoEndedPromise;
-    RefPtr<OutputStreamManager> mOutputStreamManager;
     UniquePtr<DecodedStreamData> mData;
-    const RefPtr<AbstractThread> mAbstractMainThread;
   };
 
   MozPromiseHolder<DecodedStream::EndedPromise> audioEndedHolder;
   mAudioEndedPromise = audioEndedHolder.Ensure(__func__);
   MozPromiseHolder<DecodedStream::EndedPromise> videoEndedHolder;
   mVideoEndedPromise = videoEndedHolder.Ensure(__func__);
   PlaybackInfoInit init{aStartTime, aInfo};
-  nsCOMPtr<nsIRunnable> r = new R(std::move(init), std::move(audioEndedHolder),
-                                  std::move(videoEndedHolder),
-                                  mOutputStreamManager, mAbstractMainThread);
+  nsCOMPtr<nsIRunnable> r = new R(
+      std::move(init), nsTArray<RefPtr<ProcessedMediaTrack>>(mOutputTracks),
+      std::move(audioEndedHolder), std::move(videoEndedHolder));
   SyncRunnable::DispatchToThread(
       SystemGroup::EventTargetFor(TaskCategory::Other), r);
   mData = static_cast<R*>(r.get())->ReleaseData();
 
   if (mData) {
     mOutputListener = mData->OnOutput().Connect(mOwnerThread, this,
                                                 &DecodedStream::NotifyOutput);
     SendData();
@@ -513,48 +520,50 @@ void DecodedStream::DestroyData(UniquePt
   AssertOwnerThread();
 
   if (!aData) {
     return;
   }
 
   mOutputListener.Disconnect();
 
-  NS_DispatchToMainThread(NS_NewRunnableFunction(
-      "DecodedStream::DestroyData",
-      [data = std::move(aData), manager = mOutputStreamManager]() {
-        data->Forget();
-        manager->RemoveTracks();
-      }));
+  NS_DispatchToMainThread(
+      NS_NewRunnableFunction("DecodedStream::DestroyData",
+                             [data = std::move(aData)]() { data->Forget(); }));
 }
 
 void DecodedStream::SetPlaying(bool aPlaying) {
   AssertOwnerThread();
 
   // Resume/pause matters only when playback started.
   if (mStartTime.isNothing()) {
     return;
   }
 
   mPlaying = aPlaying;
 }
 
 void DecodedStream::SetVolume(double aVolume) {
   AssertOwnerThread();
-  mParams.mVolume = aVolume;
+  mVolume = aVolume;
 }
 
 void DecodedStream::SetPlaybackRate(double aPlaybackRate) {
   AssertOwnerThread();
-  mParams.mPlaybackRate = aPlaybackRate;
+  mPlaybackRate = aPlaybackRate;
 }
 
 void DecodedStream::SetPreservesPitch(bool aPreservesPitch) {
   AssertOwnerThread();
-  mParams.mPreservesPitch = aPreservesPitch;
+  mPreservesPitch = aPreservesPitch;
+}
+
+double DecodedStream::PlaybackRate() const {
+  AssertOwnerThread();
+  return mPlaybackRate;
 }
 
 static void SendStreamAudio(DecodedStreamData* aStream,
                             const TimeUnit& aStartTime, AudioData* aData,
                             AudioSegment* aOutput, uint32_t aRate,
                             const PrincipalHandle& aPrincipalHandle) {
   // The amount of audio frames that is used to fuzz rounding errors.
   static const int64_t AUDIO_FUZZ_FRAMES = 1;
@@ -623,35 +632,34 @@ void DecodedStream::SendAudio(double aVo
                     aPrincipalHandle);
   }
 
   output.ApplyVolume(aVolume);
 
   // |mNextAudioTime| is updated as we process each audio sample in
   // SendStreamAudio().
   if (output.GetDuration() > 0) {
-    mData->mAudioStreamWritten += mData->mAudioStream->AppendData(&output);
+    mData->mAudioTrackWritten += mData->mAudioTrack->AppendData(&output);
   }
 
   if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
-    mData->mListener->EndTrackAt(mData->mAudioStream,
-                                 mData->mAudioStreamWritten);
+    mData->mListener->EndTrackAt(mData->mAudioTrack, mData->mAudioTrackWritten);
     mData->mHaveSentFinishAudio = true;
   }
 }
 
 void DecodedStreamData::WriteVideoToSegment(
     layers::Image* aImage, const TimeUnit& aStart, const TimeUnit& aEnd,
     const gfx::IntSize& aIntrinsicSize, const TimeStamp& aTimeStamp,
     VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle) {
   RefPtr<layers::Image> image = aImage;
   auto end =
-      mVideoStream->MicrosecondsToTrackTimeRoundDown(aEnd.ToMicroseconds());
+      mVideoTrack->MicrosecondsToTrackTimeRoundDown(aEnd.ToMicroseconds());
   auto start =
-      mVideoStream->MicrosecondsToTrackTimeRoundDown(aStart.ToMicroseconds());
+      mVideoTrack->MicrosecondsToTrackTimeRoundDown(aStart.ToMicroseconds());
   aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false,
                        aTimeStamp);
   // Extend this so we get accurate durations for all frames.
   // Because this track is pushed, we need durations so the graph can track
   // when playout of the track has finished.
   aOutput->ExtendLastFrameBy(end - start);
 
   mLastVideoStartTime = Some(aStart);
@@ -687,17 +695,17 @@ void DecodedStream::ResetVideo(const Pri
   // nullptr) at an earlier time than the previous, will signal to that consumer
   // to discard any frames ahead in time of the new frame. To be honest, this is
   // an ugly hack because the direct listeners of the MediaTrackGraph do not
   // have an API that supports clearing the future frames. ImageContainer and
   // VideoFrameContainer do though, and we will need to move to a similar API
   // for video tracks as part of bug 1493618.
   resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize,
                        aPrincipalHandle, false, currentTime);
-  mData->mVideoStream->AppendData(&resetter);
+  mData->mVideoTrack->AppendData(&resetter);
 
   // Consumer buffers have been reset. We now set the next time to the start
   // time of the current frame, so that it can be displayed again on resuming.
   if (RefPtr<VideoData> v = mVideoQueue.PeekFront()) {
     mData->mLastVideoStartTime = Some(v->mTime - TimeUnit::FromMicroseconds(1));
     mData->mLastVideoEndTime = Some(v->mTime);
   } else {
     // There was no current frame in the queue. We set the next time to the
@@ -767,33 +775,33 @@ void DecodedStream::SendVideo(const Prin
       // the track's lifetime in the MTG, as rendering is based on timestamps,
       // aka frame start times.
       TimeStamp t =
           std::max(mData->mLastVideoTimeStamp,
                    currentTime + (lastEnd - currentPosition).ToTimeDuration());
       TimeUnit end = std::max(
           v->GetEndTime(),
           lastEnd + TimeUnit::FromMicroseconds(
-                        mData->mVideoStream->TrackTimeToMicroseconds(1) + 1));
+                        mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1));
       mData->mLastVideoImage = v->mImage;
       mData->mLastVideoImageDisplaySize = v->mDisplay;
       mData->WriteVideoToSegment(v->mImage, lastEnd, end, v->mDisplay, t,
                                  &output, aPrincipalHandle);
     }
   }
 
   // Check the output is not empty.
   bool compensateEOS = false;
   bool forceBlack = false;
   if (output.GetLastFrame()) {
     compensateEOS = ZeroDurationAtLastChunk(output);
   }
 
   if (output.GetDuration() > 0) {
-    mData->mVideoStreamWritten += mData->mVideoStream->AppendData(&output);
+    mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&output);
   }
 
   if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
     if (!mData->mLastVideoImage) {
       // We have video, but the video queue finished before we received any
       // frame. We insert a black frame to progress any consuming
       // HTMLMediaElement. This mirrors the behavior of VideoSink.
 
@@ -805,49 +813,47 @@ void DecodedStream::SendVideo(const Prin
       mData->mLastVideoImageDisplaySize = mInfo.mVideo.mDisplay;
     }
     if (compensateEOS) {
       VideoSegment endSegment;
       // Calculate the deviation clock time from DecodedStream.
       // We round the nr of microseconds up, because WriteVideoToSegment
       // will round the conversion from microseconds to TrackTime down.
       auto deviation = TimeUnit::FromMicroseconds(
-          mData->mVideoStream->TrackTimeToMicroseconds(1) + 1);
+          mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1);
       auto start = mData->mLastVideoEndTime.valueOr(mStartTime.ref());
       mData->WriteVideoToSegment(
           mData->mLastVideoImage, start, start + deviation,
           mData->mLastVideoImageDisplaySize,
           currentTime + (start + deviation - currentPosition).ToTimeDuration(),
           &endSegment, aPrincipalHandle);
       MOZ_ASSERT(endSegment.GetDuration() > 0);
       if (forceBlack) {
         endSegment.ReplaceWithDisabled();
       }
-      mData->mVideoStreamWritten +=
-          mData->mVideoStream->AppendData(&endSegment);
+      mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&endSegment);
     }
-    mData->mListener->EndTrackAt(mData->mVideoStream,
-                                 mData->mVideoStreamWritten);
+    mData->mListener->EndTrackAt(mData->mVideoTrack, mData->mVideoTrackWritten);
     mData->mHaveSentFinishVideo = true;
   }
 }
 
 void DecodedStream::SendData() {
   AssertOwnerThread();
 
   // Not yet created on the main thread. MDSM will try again later.
   if (!mData) {
     return;
   }
 
   if (!mPlaying) {
     return;
   }
 
-  SendAudio(mParams.mVolume, mPrincipalHandle);
+  SendAudio(mVolume, mPrincipalHandle);
   SendVideo(mPrincipalHandle);
 }
 
 TimeUnit DecodedStream::GetEndTime(TrackType aType) const {
   AssertOwnerThread();
   if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) {
     auto t = mStartTime.ref() +
              FramesToTimeUnit(mData->mAudioFramesWritten, mInfo.mAudio.mRate);
@@ -891,20 +897,16 @@ void DecodedStream::NotifyOutput(int64_t
 
 void DecodedStream::PlayingChanged() {
   AssertOwnerThread();
 
   if (!mPlaying) {
     // On seek or pause we discard future frames.
     ResetVideo(mPrincipalHandle);
   }
-
-  mAbstractMainThread->Dispatch(NewRunnableMethod<bool>(
-      "OutputStreamManager::SetPlaying", mOutputStreamManager,
-      &OutputStreamManager::SetPlaying, mPlaying));
 }
 
 void DecodedStream::ConnectListener() {
   AssertOwnerThread();
 
   mAudioPushListener = mAudioQueue.PushEvent().Connect(
       mOwnerThread, this, &DecodedStream::SendData);
   mAudioFinishListener = mAudioQueue.FinishEvent().Connect(
--- a/dom/media/mediasink/DecodedStream.h
+++ b/dom/media/mediasink/DecodedStream.h
@@ -17,52 +17,49 @@
 #include "mozilla/MozPromise.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/StateMirroring.h"
 #include "mozilla/UniquePtr.h"
 
 namespace mozilla {
 
 class DecodedStreamData;
+class MediaDecoderStateMachine;
 class AudioData;
 class VideoData;
-class OutputStreamManager;
 struct PlaybackInfoInit;
 class ProcessedMediaTrack;
 class TimeStamp;
 
 template <class T>
 class MediaQueue;
 
 class DecodedStream : public MediaSink {
-  using MediaSink::PlaybackParams;
-
  public:
-  DecodedStream(AbstractThread* aOwnerThread, AbstractThread* aMainThread,
+  DecodedStream(MediaDecoderStateMachine* aStateMachine,
+                nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
+                double aVolume, double aPlaybackRate, bool aPreservesPitch,
                 MediaQueue<AudioData>& aAudioQueue,
-                MediaQueue<VideoData>& aVideoQueue,
-                OutputStreamManager* aOutputStreamManager);
-
-  // MediaSink functions.
-  const PlaybackParams& GetPlaybackParams() const override;
-  void SetPlaybackParams(const PlaybackParams& aParams) override;
+                MediaQueue<VideoData>& aVideoQueue);
 
   RefPtr<EndedPromise> OnEnded(TrackType aType) override;
   media::TimeUnit GetEndTime(TrackType aType) const override;
   media::TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
   bool HasUnplayedFrames(TrackType aType) const override {
     // TODO: implement this.
     return false;
   }
 
   void SetVolume(double aVolume) override;
   void SetPlaybackRate(double aPlaybackRate) override;
   void SetPreservesPitch(bool aPreservesPitch) override;
   void SetPlaying(bool aPlaying) override;
 
+  double PlaybackRate() const override;
+
   nsresult Start(const media::TimeUnit& aStartTime,
                  const MediaInfo& aInfo) override;
   void Stop() override;
   bool IsStarted() const override;
   bool IsPlaying() const override;
   void Shutdown() override;
   void GetDebugInfo(dom::MediaSinkDebugInfo& aInfo) override;
 
@@ -83,36 +80,31 @@ class DecodedStream : public MediaSink {
 
   void PlayingChanged();
 
   void ConnectListener();
   void DisconnectListener();
 
   const RefPtr<AbstractThread> mOwnerThread;
 
-  const RefPtr<AbstractThread> mAbstractMainThread;
-
-  /*
-   * Main thread only members.
-   */
-  // Data about MediaStreams that are being fed by the decoder.
-  const RefPtr<OutputStreamManager> mOutputStreamManager;
-
   /*
    * Worker thread only members.
    */
   WatchManager<DecodedStream> mWatchManager;
   UniquePtr<DecodedStreamData> mData;
   RefPtr<EndedPromise> mAudioEndedPromise;
   RefPtr<EndedPromise> mVideoEndedPromise;
 
   Watchable<bool> mPlaying;
   Mirror<PrincipalHandle> mPrincipalHandle;
+  const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
 
-  PlaybackParams mParams;
+  double mVolume;
+  double mPlaybackRate;
+  bool mPreservesPitch;
 
   media::NullableTimeUnit mStartTime;
   media::TimeUnit mLastOutputTime;
   MediaInfo mInfo;
 
   MediaQueue<AudioData>& mAudioQueue;
   MediaQueue<VideoData>& mVideoQueue;
 
--- a/dom/media/mediasink/MediaSink.h
+++ b/dom/media/mediasink/MediaSink.h
@@ -2,17 +2,16 @@
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MediaSink_h_
 #define MediaSink_h_
 
-#include "AudioDeviceInfo.h"
 #include "MediaInfo.h"
 #include "mozilla/MozPromise.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/dom/MediaDebugInfoBinding.h"
 #include "nsISupportsImpl.h"
 
 namespace mozilla {
 
@@ -34,33 +33,16 @@ class VideoFrameContainer;
  * Note this class is not thread-safe and should be called from the state
  * machine thread only.
  */
 class MediaSink {
  public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSink);
   typedef mozilla::TrackInfo::TrackType TrackType;
 
-  struct PlaybackParams {
-    PlaybackParams()
-        : mVolume(1.0), mPlaybackRate(1.0), mPreservesPitch(true) {}
-    double mVolume;
-    double mPlaybackRate;
-    bool mPreservesPitch;
-    RefPtr<AudioDeviceInfo> mSink;
-  };
-
-  // Return the playback parameters of this sink.
-  // Can be called in any state.
-  virtual const PlaybackParams& GetPlaybackParams() const = 0;
-
-  // Set the playback parameters of this sink.
-  // Can be called in any state.
-  virtual void SetPlaybackParams(const PlaybackParams& aParams) = 0;
-
   // EndedPromise needs to be a non-exclusive promise as it is shared between
   // both the AudioSink and VideoSink.
   typedef MozPromise<bool, nsresult, /* IsExclusive = */ false> EndedPromise;
 
   // Return a promise which is resolved when the track finishes
   // or null if no such track.
   // Must be called after playback starts.
   virtual RefPtr<EndedPromise> OnEnded(TrackType aType) = 0;
@@ -95,16 +77,20 @@ class MediaSink {
   // Whether to preserve pitch of the audio track.
   // Do nothing if this sink has no audio track.
   // Can be called in any state.
   virtual void SetPreservesPitch(bool aPreservesPitch) {}
 
   // Pause/resume the playback. Only work after playback starts.
   virtual void SetPlaying(bool aPlaying) = 0;
 
+  // Get the playback rate.
+  // Can be called in any state.
+  virtual double PlaybackRate() const = 0;
+
   // Single frame rendering operation may need to be done before playback
   // started (1st frame) or right after seek completed or playback stopped.
   // Do nothing if this sink has no video track. Can be called in any state.
   virtual void Redraw(const VideoInfo& aInfo){};
 
   // Begin a playback session with the provided start time and media info.
   // Must be called when playback is stopped.
   virtual nsresult Start(const media::TimeUnit& aStartTime,
@@ -117,16 +103,20 @@ class MediaSink {
   // Return true if playback has started.
   // Can be called in any state.
   virtual bool IsStarted() const = 0;
 
   // Return true if playback is started and not paused otherwise false.
   // Can be called in any state.
   virtual bool IsPlaying() const = 0;
 
+  // The audio output device this MediaSink is playing audio data to. The
+  // default device is used if this returns null.
+  virtual const AudioDeviceInfo* AudioDevice() { return nullptr; }
+
   // Called on the state machine thread to shut down the sink. All resources
   // allocated by this sink should be released.
   // Must be called after playback stopped.
   virtual void Shutdown() {}
 
   virtual void SetSecondaryVideoContainer(VideoFrameContainer* aSecondary) {}
   virtual void ClearSecondaryVideoContainer() {}
 
deleted file mode 100644
--- a/dom/media/mediasink/OutputStreamManager.cpp
+++ /dev/null
@@ -1,357 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim: set ts=8 sts=2 et sw=2 tw=80: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "OutputStreamManager.h"
-
-#include "DOMMediaStream.h"
-#include "../MediaTrackGraph.h"
-#include "mozilla/dom/MediaStreamTrack.h"
-#include "mozilla/dom/AudioStreamTrack.h"
-#include "mozilla/dom/VideoStreamTrack.h"
-#include "nsContentUtils.h"
-
-namespace mozilla {
-
-#define LOG(level, msg, ...) \
-  MOZ_LOG(gMediaDecoderLog, level, (msg, ##__VA_ARGS__))
-
-class DecodedStreamTrackSource : public dom::MediaStreamTrackSource {
- public:
-  NS_DECL_ISUPPORTS_INHERITED
-  NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(DecodedStreamTrackSource,
-                                           dom::MediaStreamTrackSource)
-
-  explicit DecodedStreamTrackSource(SourceMediaTrack* aSourceStream,
-                                    nsIPrincipal* aPrincipal)
-      : dom::MediaStreamTrackSource(aPrincipal, nsString()),
-        mTrack(aSourceStream->Graph()->CreateForwardedInputTrack(
-            aSourceStream->mType)),
-        mPort(mTrack->AllocateInputPort(aSourceStream)) {
-    MOZ_ASSERT(NS_IsMainThread());
-  }
-
-  dom::MediaSourceEnum GetMediaSource() const override {
-    return dom::MediaSourceEnum::Other;
-  }
-
-  void Stop() override {
-    MOZ_ASSERT(NS_IsMainThread());
-
-    // We don't notify the source that a track was stopped since it will keep
-    // producing tracks until the element ends. The decoder also needs the
-    // tracks it created to be live at the source since the decoder's clock is
-    // based on MediaStreams during capture. We do however, disconnect this
-    // track's underlying track.
-    if (!mTrack->IsDestroyed()) {
-      mTrack->Destroy();
-      mPort->Destroy();
-    }
-  }
-
-  void Disable() override {}
-
-  void Enable() override {}
-
-  void SetPrincipal(nsIPrincipal* aPrincipal) {
-    MOZ_ASSERT(NS_IsMainThread());
-    mPrincipal = aPrincipal;
-    PrincipalChanged();
-  }
-
-  void ForceEnded() { OverrideEnded(); }
-
-  const RefPtr<ProcessedMediaTrack> mTrack;
-  const RefPtr<MediaInputPort> mPort;
-
- protected:
-  virtual ~DecodedStreamTrackSource() {
-    MOZ_ASSERT(NS_IsMainThread());
-    MOZ_ASSERT(mTrack->IsDestroyed());
-  }
-};
-
-NS_IMPL_ADDREF_INHERITED(DecodedStreamTrackSource, dom::MediaStreamTrackSource)
-NS_IMPL_RELEASE_INHERITED(DecodedStreamTrackSource, dom::MediaStreamTrackSource)
-NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(DecodedStreamTrackSource)
-NS_INTERFACE_MAP_END_INHERITING(dom::MediaStreamTrackSource)
-NS_IMPL_CYCLE_COLLECTION_INHERITED(DecodedStreamTrackSource,
-                                   dom::MediaStreamTrackSource)
-
-OutputStreamData::OutputStreamData(OutputStreamManager* aManager,
-                                   AbstractThread* aAbstractMainThread,
-                                   DOMMediaStream* aDOMStream)
-    : mManager(aManager),
-      mAbstractMainThread(aAbstractMainThread),
-      mDOMStream(aDOMStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-}
-
-OutputStreamData::~OutputStreamData() = default;
-
-void OutputStreamData::AddTrack(SourceMediaTrack* aTrack,
-                                MediaSegment::Type aType,
-                                nsIPrincipal* aPrincipal, bool aAsyncAddTrack) {
-  MOZ_ASSERT(NS_IsMainThread());
-  MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
-
-  LOG(LogLevel::Debug,
-      "Adding output %s track sourced from track %p to MediaStream %p%s",
-      aType == MediaSegment::AUDIO ? "audio" : "video", aTrack,
-      mDOMStream.get(), aAsyncAddTrack ? " (async)" : "");
-
-  auto source = MakeRefPtr<DecodedStreamTrackSource>(aTrack, aPrincipal);
-  RefPtr<dom::MediaStreamTrack> track;
-  if (aType == MediaSegment::AUDIO) {
-    track = new dom::AudioStreamTrack(mDOMStream->GetParentObject(),
-                                      source->mTrack, source);
-  } else {
-    MOZ_ASSERT(aType == MediaSegment::VIDEO);
-    track = new dom::VideoStreamTrack(mDOMStream->GetParentObject(),
-                                      source->mTrack, source);
-  }
-  mTracks.AppendElement(track.get());
-  if (aAsyncAddTrack) {
-    GetMainThreadEventTarget()->Dispatch(
-        NewRunnableMethod<RefPtr<dom::MediaStreamTrack>>(
-            "DOMMediaStream::AddTrackInternal", mDOMStream.get(),
-            &DOMMediaStream::AddTrackInternal, track));
-  } else {
-    mDOMStream->AddTrackInternal(track);
-  }
-}
-
-void OutputStreamData::RemoveTrack(SourceMediaTrack* aTrack) {
-  MOZ_ASSERT(NS_IsMainThread());
-  MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
-
-  LOG(LogLevel::Debug,
-      "Removing output track sourced by track %p from MediaStream %p", aTrack,
-      mDOMStream.get());
-
-  for (const auto& t : nsTArray<WeakPtr<dom::MediaStreamTrack>>(mTracks)) {
-    mTracks.RemoveElement(t);
-    if (!t || t->Ended()) {
-      continue;
-    }
-    DecodedStreamTrackSource& source =
-        static_cast<DecodedStreamTrackSource&>(t->GetSource());
-    GetMainThreadEventTarget()->Dispatch(
-        NewRunnableMethod("DecodedStreamTrackSource::ForceEnded", &source,
-                          &DecodedStreamTrackSource::ForceEnded));
-  }
-}
-
-void OutputStreamData::SetPrincipal(nsIPrincipal* aPrincipal) {
-  MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
-  for (const WeakPtr<dom::MediaStreamTrack>& track : mTracks) {
-    if (!track || track->Ended()) {
-      continue;
-    }
-    DecodedStreamTrackSource& source =
-        static_cast<DecodedStreamTrackSource&>(track->GetSource());
-    source.SetPrincipal(aPrincipal);
-  }
-}
-
-OutputStreamManager::OutputStreamManager(SharedDummyTrack* aDummyStream,
-                                         nsIPrincipal* aPrincipal,
-                                         AbstractThread* aAbstractMainThread)
-    : mAbstractMainThread(aAbstractMainThread),
-      mDummyStream(aDummyStream),
-      mPrincipalHandle(
-          aAbstractMainThread,
-          aPrincipal ? MakePrincipalHandle(aPrincipal) : PRINCIPAL_HANDLE_NONE,
-          "OutputStreamManager::mPrincipalHandle (Canonical)") {
-  MOZ_ASSERT(NS_IsMainThread());
-}
-
-void OutputStreamManager::Add(DOMMediaStream* aDOMStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  LOG(LogLevel::Info, "Adding MediaStream %p", aDOMStream);
-
-  OutputStreamData* p = mStreams
-                            .AppendElement(new OutputStreamData(
-                                this, mAbstractMainThread, aDOMStream))
-                            ->get();
-  for (const auto& lt : mLiveTracks) {
-    p->AddTrack(lt->mSourceTrack, lt->mType, mPrincipalHandle.Ref(), false);
-  }
-}
-
-void OutputStreamManager::Remove(DOMMediaStream* aDOMStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  LOG(LogLevel::Info, "Removing MediaStream %p", aDOMStream);
-
-  AutoRemoveDestroyedStreams();
-  mStreams.ApplyIf(
-      aDOMStream, 0, StreamComparator(),
-      [&](const UniquePtr<OutputStreamData>& aData) {
-        for (const auto& lt : mLiveTracks) {
-          aData->RemoveTrack(lt->mSourceTrack);
-        }
-      },
-      []() { MOZ_ASSERT_UNREACHABLE("Didn't exist"); });
-  DebugOnly<bool> rv = mStreams.RemoveElement(aDOMStream, StreamComparator());
-  MOZ_ASSERT(rv);
-}
-
-bool OutputStreamManager::HasTrackType(MediaSegment::Type aType) {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  return mLiveTracks.Contains(aType, TrackTypeComparator());
-}
-
-bool OutputStreamManager::HasTracks(SourceMediaTrack* aAudioStream,
-                                    SourceMediaTrack* aVideoStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  size_t nrExpectedTracks = 0;
-  bool asExpected = true;
-  if (aAudioStream) {
-    Unused << ++nrExpectedTracks;
-    asExpected = asExpected && mLiveTracks.Contains(
-                                   MakePair(aAudioStream, MediaSegment::AUDIO),
-                                   TrackComparator());
-  }
-  if (aVideoStream) {
-    Unused << ++nrExpectedTracks;
-    asExpected = asExpected && mLiveTracks.Contains(
-                                   MakePair(aVideoStream, MediaSegment::VIDEO),
-                                   TrackComparator());
-  }
-  asExpected = asExpected && mLiveTracks.Length() == nrExpectedTracks;
-  return asExpected;
-}
-
-SourceMediaTrack* OutputStreamManager::GetPrecreatedTrackOfType(
-    MediaSegment::Type aType) const {
-  auto i = mLiveTracks.IndexOf(aType, 0, PrecreatedTrackTypeComparator());
-  return i == nsTArray<UniquePtr<LiveTrack>>::NoIndex
-             ? nullptr
-             : mLiveTracks[i]->mSourceTrack.get();
-}
-
-size_t OutputStreamManager::NumberOfTracks() {
-  MOZ_ASSERT(NS_IsMainThread());
-  return mLiveTracks.Length();
-}
-
-already_AddRefed<SourceMediaTrack> OutputStreamManager::AddTrack(
-    MediaSegment::Type aType) {
-  MOZ_ASSERT(NS_IsMainThread());
-  MOZ_ASSERT(!HasTrackType(aType),
-             "Cannot have two tracks of the same type at the same time");
-
-  RefPtr<SourceMediaTrack> track =
-      mDummyStream->mTrack->Graph()->CreateSourceTrack(aType);
-  if (!mPlaying) {
-    track->Suspend();
-  }
-
-  LOG(LogLevel::Info, "Adding %s track sourced by track %p",
-      aType == MediaSegment::AUDIO ? "audio" : "video", track.get());
-
-  mLiveTracks.AppendElement(MakeUnique<LiveTrack>(track, aType));
-  AutoRemoveDestroyedStreams();
-  for (const auto& data : mStreams) {
-    data->AddTrack(track, aType, mPrincipalHandle.Ref(), true);
-  }
-
-  return track.forget();
-}
-
-OutputStreamManager::LiveTrack::LiveTrack(SourceMediaTrack* aSourceTrack,
-                                          MediaSegment::Type aType)
-    : mSourceTrack(aSourceTrack), mType(aType) {}
-
-OutputStreamManager::LiveTrack::~LiveTrack() { mSourceTrack->Destroy(); }
-
-void OutputStreamManager::AutoRemoveDestroyedStreams() {
-  MOZ_ASSERT(NS_IsMainThread());
-  for (size_t i = mStreams.Length(); i > 0; --i) {
-    const auto& data = mStreams[i - 1];
-    if (!data->mDOMStream) {
-      // If the mDOMStream WeakPtr is now null, mDOMStream has been destructed.
-      mStreams.RemoveElementAt(i - 1);
-    }
-  }
-}
-
-void OutputStreamManager::RemoveTrack(SourceMediaTrack* aTrack) {
-  MOZ_ASSERT(NS_IsMainThread());
-  LOG(LogLevel::Info, "Removing track with source track %p", aTrack);
-  DebugOnly<bool> rv =
-      mLiveTracks.RemoveElement(aTrack, TrackStreamComparator());
-  MOZ_ASSERT(rv);
-  AutoRemoveDestroyedStreams();
-  for (const auto& data : mStreams) {
-    data->RemoveTrack(aTrack);
-  }
-}
-
-void OutputStreamManager::RemoveTracks() {
-  MOZ_ASSERT(NS_IsMainThread());
-  for (size_t i = mLiveTracks.Length(); i > 0; --i) {
-    RemoveTrack(mLiveTracks[i - 1]->mSourceTrack);
-  }
-}
-
-void OutputStreamManager::Disconnect() {
-  MOZ_ASSERT(NS_IsMainThread());
-  RemoveTracks();
-  MOZ_ASSERT(mLiveTracks.IsEmpty());
-  AutoRemoveDestroyedStreams();
-  nsTArray<RefPtr<DOMMediaStream>> domStreams(mStreams.Length());
-  for (const auto& data : mStreams) {
-    domStreams.AppendElement(data->mDOMStream);
-  }
-  for (auto& domStream : domStreams) {
-    Remove(domStream);
-  }
-  MOZ_ASSERT(mStreams.IsEmpty());
-}
-
-AbstractCanonical<PrincipalHandle>*
-OutputStreamManager::CanonicalPrincipalHandle() {
-  return &mPrincipalHandle;
-}
-
-void OutputStreamManager::SetPrincipal(nsIPrincipal* aPrincipal) {
-  MOZ_ASSERT(NS_IsMainThread());
-  nsCOMPtr<nsIPrincipal> principal = GetPrincipalFromHandle(mPrincipalHandle);
-  if (nsContentUtils::CombineResourcePrincipals(&principal, aPrincipal)) {
-    AutoRemoveDestroyedStreams();
-    for (const UniquePtr<OutputStreamData>& data : mStreams) {
-      data->SetPrincipal(principal);
-    }
-    mPrincipalHandle = MakePrincipalHandle(principal);
-  }
-}
-
-void OutputStreamManager::SetPlaying(bool aPlaying) {
-  MOZ_ASSERT(NS_IsMainThread());
-  if (mPlaying == aPlaying) {
-    return;
-  }
-
-  mPlaying = aPlaying;
-  for (auto& lt : mLiveTracks) {
-    if (mPlaying) {
-      lt->mSourceTrack->Resume();
-      lt->mEverPlayed = true;
-    } else {
-      lt->mSourceTrack->Suspend();
-    }
-  }
-}
-
-OutputStreamManager::~OutputStreamManager() = default;
-
-#undef LOG
-
-}  // namespace mozilla
deleted file mode 100644
--- a/dom/media/mediasink/OutputStreamManager.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim: set ts=8 sts=2 et sw=2 tw=80: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef OutputStreamManager_h
-#define OutputStreamManager_h
-
-#include "mozilla/RefPtr.h"
-#include "mozilla/StateMirroring.h"
-#include "mozilla/WeakPtr.h"
-#include "nsTArray.h"
-
-namespace mozilla {
-
-class DOMMediaStream;
-class MediaInputPort;
-class OutputStreamManager;
-class ProcessedMediaTrack;
-class SourceMediaTrack;
-
-namespace dom {
-class MediaStreamTrack;
-}
-
-class OutputStreamData {
- public:
-  OutputStreamData(OutputStreamManager* aManager,
-                   AbstractThread* aAbstractMainThread,
-                   DOMMediaStream* aDOMStream);
-  OutputStreamData(const OutputStreamData& aOther) = delete;
-  OutputStreamData(OutputStreamData&& aOther) = delete;
-  ~OutputStreamData();
-
-  // Creates and adds a MediaStreamTrack to mDOMStream so that we can feed data
-  // to it. For a true aAsyncAddTrack we will dispatch a task to add the
-  // created track to mDOMStream, as is required by spec for the "addtrack"
-  // event.
-  void AddTrack(SourceMediaTrack* aTrack, MediaSegment::Type aType,
-                nsIPrincipal* aPrincipal, bool aAsyncAddTrack);
-  // Ends any MediaStreamTracks sourced from aTrack.
-  void RemoveTrack(SourceMediaTrack* aTrack);
-
-  void SetPrincipal(nsIPrincipal* aPrincipal);
-
-  const RefPtr<OutputStreamManager> mManager;
-  const RefPtr<AbstractThread> mAbstractMainThread;
-  // The DOMMediaStream we add tracks to and represent.
-  const WeakPtr<DOMMediaStream> mDOMStream;
-
- private:
-  // Tracks that have been added and not yet removed.
-  nsTArray<WeakPtr<dom::MediaStreamTrack>> mTracks;
-};
-
-class OutputStreamManager {
-  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(OutputStreamManager);
-
- public:
-  OutputStreamManager(SharedDummyTrack* aDummyStream, nsIPrincipal* aPrincipal,
-                      AbstractThread* aAbstractMainThread);
-  // Add the output stream to the collection.
-  void Add(DOMMediaStream* aDOMStream);
-  // Remove the output stream from the collection.
-  void Remove(DOMMediaStream* aDOMStream);
-  // Returns true if there's a live track of the given type.
-  bool HasTrackType(MediaSegment::Type aType);
-  // Returns true if the given tracks are sourcing all currently live tracks.
-  // Use nullptr to make it ignored for that type.
-  bool HasTracks(SourceMediaTrack* aAudioStream,
-                 SourceMediaTrack* aVideoStream);
-  // Gets the underlying track for the given type if it has never been played,
-  // or nullptr if there is none.
-  SourceMediaTrack* GetPrecreatedTrackOfType(MediaSegment::Type aType) const;
-  // Returns the number of live tracks.
-  size_t NumberOfTracks();
-  // Add a track sourced to all output tracks and return the MediaTrack that
-  // sources it.
-  already_AddRefed<SourceMediaTrack> AddTrack(MediaSegment::Type aType);
-  // Remove all currently live tracks.
-  void RemoveTracks();
-  // Remove all currently live tracks and all output streams.
-  void Disconnect();
-  // The principal handle for the underlying decoder.
-  AbstractCanonical<PrincipalHandle>* CanonicalPrincipalHandle();
-  // Called when the underlying decoder's principal has changed.
-  void SetPrincipal(nsIPrincipal* aPrincipal);
-  // Called by DecodedStream when its playing state changes. While not playing
-  // we suspend mSourceTrack.
-  void SetPlaying(bool aPlaying);
-  // Return true if the collection of output streams is empty.
-  bool IsEmpty() const {
-    MOZ_ASSERT(NS_IsMainThread());
-    return mStreams.IsEmpty();
-  }
-
-  const RefPtr<AbstractThread> mAbstractMainThread;
-
- private:
-  ~OutputStreamManager();
-
-  class LiveTrack {
-   public:
-    LiveTrack(SourceMediaTrack* aSourceTrack, MediaSegment::Type aType);
-    ~LiveTrack();
-
-    const RefPtr<SourceMediaTrack> mSourceTrack;
-    const MediaSegment::Type mType;
-    bool mEverPlayed = false;
-  };
-
-  struct StreamComparator {
-    static bool Equals(const UniquePtr<OutputStreamData>& aData,
-                       DOMMediaStream* aStream) {
-      return aData->mDOMStream == aStream;
-    }
-  };
-  struct TrackStreamComparator {
-    static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
-                       SourceMediaTrack* aTrack) {
-      return aLiveTrack->mSourceTrack == aTrack;
-    }
-  };
-  struct TrackTypeComparator {
-    static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
-                       MediaSegment::Type aType) {
-      return aLiveTrack->mType == aType;
-    }
-  };
-  struct PrecreatedTrackTypeComparator {
-    static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
-                       MediaSegment::Type aType) {
-      return !aLiveTrack->mEverPlayed && aLiveTrack->mType == aType;
-    }
-  };
-  struct TrackComparator {
-    static bool Equals(
-        const UniquePtr<LiveTrack>& aLiveTrack,
-        const Pair<SourceMediaTrack*, MediaSegment::Type>& aOther) {
-      return aLiveTrack->mSourceTrack == aOther.first() &&
-             aLiveTrack->mType == aOther.second();
-    }
-  };
-
-  // Goes through mStreams and removes any entries that have been destroyed.
-  void AutoRemoveDestroyedStreams();
-
-  // Remove tracks sourced from aTrack from all output tracks.
-  void RemoveTrack(SourceMediaTrack* aTrack);
-
-  const RefPtr<SharedDummyTrack> mDummyStream;
-  nsTArray<UniquePtr<OutputStreamData>> mStreams;
-  nsTArray<UniquePtr<LiveTrack>> mLiveTracks;
-  Canonical<PrincipalHandle> mPrincipalHandle;
-  bool mPlaying = false;
-};
-
-}  // namespace mozilla
-
-#endif  // OutputStreamManager_h
--- a/dom/media/mediasink/VideoSink.cpp
+++ b/dom/media/mediasink/VideoSink.cpp
@@ -151,28 +151,16 @@ VideoSink::VideoSink(AbstractThread* aTh
 }
 
 VideoSink::~VideoSink() {
 #ifdef XP_WIN
   MOZ_ASSERT(!mHiResTimersRequested);
 #endif
 }
 
-const MediaSink::PlaybackParams& VideoSink::GetPlaybackParams() const {
-  AssertOwnerThread();
-
-  return mAudioSink->GetPlaybackParams();
-}
-
-void VideoSink::SetPlaybackParams(const PlaybackParams& aParams) {
-  AssertOwnerThread();
-
-  mAudioSink->SetPlaybackParams(aParams);
-}
-
 RefPtr<VideoSink::EndedPromise> VideoSink::OnEnded(TrackType aType) {
   AssertOwnerThread();
   MOZ_ASSERT(mAudioSink->IsStarted(), "Must be called after playback starts.");
 
   if (aType == TrackInfo::kAudioTrack) {
     return mAudioSink->OnEnded(aType);
   } else if (aType == TrackInfo::kVideoTrack) {
     return mEndPromise;
@@ -218,16 +206,22 @@ void VideoSink::SetVolume(double aVolume
 }
 
 void VideoSink::SetPreservesPitch(bool aPreservesPitch) {
   AssertOwnerThread();
 
   mAudioSink->SetPreservesPitch(aPreservesPitch);
 }
 
+double VideoSink::PlaybackRate() const {
+  AssertOwnerThread();
+
+  return mAudioSink->PlaybackRate();
+}
+
 void VideoSink::EnsureHighResTimersOnOnlyIfPlaying() {
 #ifdef XP_WIN
   const bool needed = IsPlaying();
   if (needed == mHiResTimersRequested) {
     return;
   }
   if (needed) {
     // Ensure high precision timers are enabled on Windows, otherwise the
@@ -435,18 +429,18 @@ void VideoSink::TryUpdateRenderedVideoFr
     // Time to render this frame.
     UpdateRenderedVideoFrames();
     return;
   }
 
   // If we send this future frame to the compositor now, it will be rendered
   // immediately and break A/V sync. Instead, we schedule a timer to send it
   // later.
-  int64_t delta = (v->mTime - clockTime).ToMicroseconds() /
-                  mAudioSink->GetPlaybackParams().mPlaybackRate;
+  int64_t delta =
+      (v->mTime - clockTime).ToMicroseconds() / mAudioSink->PlaybackRate();
   TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta);
   RefPtr<VideoSink> self = this;
   mUpdateScheduler.Ensure(
       target, [self]() { self->UpdateRenderedVideoFramesByTimer(); },
       [self]() { self->UpdateRenderedVideoFramesByTimer(); });
 }
 
 void VideoSink::UpdateRenderedVideoFramesByTimer() {
@@ -476,17 +470,17 @@ void VideoSink::RenderVideoFrames(int32_
   AutoTArray<RefPtr<VideoData>, 16> frames;
   VideoQueue().GetFirstElements(aMaxFrames, &frames);
   if (frames.IsEmpty() || !mContainer) {
     return;
   }
 
   AutoTArray<ImageContainer::NonOwningImage, 16> images;
   TimeStamp lastFrameTime;
-  MediaSink::PlaybackParams params = mAudioSink->GetPlaybackParams();
+  double playbackRate = mAudioSink->PlaybackRate();
   for (uint32_t i = 0; i < frames.Length(); ++i) {
     VideoData* frame = frames[i];
     bool wasSent = frame->IsSentToCompositor();
     frame->MarkSentToCompositor();
 
     if (!frame->mImage || !frame->mImage->IsValid() ||
         !frame->mImage->GetSize().width || !frame->mImage->GetSize().height) {
       continue;
@@ -494,18 +488,18 @@ void VideoSink::RenderVideoFrames(int32_
 
     if (frame->mTime.IsNegative()) {
       // Frame times before the start time are invalid; drop such frames
       continue;
     }
 
     MOZ_ASSERT(!aClockTimeStamp.IsNull());
     int64_t delta = frame->mTime.ToMicroseconds() - aClockTime;
-    TimeStamp t = aClockTimeStamp +
-                  TimeDuration::FromMicroseconds(delta / params.mPlaybackRate);
+    TimeStamp t =
+        aClockTimeStamp + TimeDuration::FromMicroseconds(delta / playbackRate);
     if (!lastFrameTime.IsNull() && t <= lastFrameTime) {
       // Timestamps out of order; drop the new frame. In theory we should
       // probably replace the previous frame with the new frame if the
       // timestamps are equal, but this is a corrupt video file already so
       // never mind.
       continue;
     }
     MOZ_ASSERT(!t.IsNull());
@@ -608,19 +602,18 @@ void VideoSink::UpdateRenderedVideoFrame
   VideoQueue().GetFirstElements(2, &frames);
   if (frames.Length() < 2) {
     return;
   }
 
   int64_t nextFrameTime = frames[1]->mTime.ToMicroseconds();
   int64_t delta = std::max(nextFrameTime - clockTime.ToMicroseconds(),
                            MIN_UPDATE_INTERVAL_US);
-  TimeStamp target =
-      nowTime + TimeDuration::FromMicroseconds(
-                    delta / mAudioSink->GetPlaybackParams().mPlaybackRate);
+  TimeStamp target = nowTime + TimeDuration::FromMicroseconds(
+                                   delta / mAudioSink->PlaybackRate());
 
   RefPtr<VideoSink> self = this;
   mUpdateScheduler.Ensure(
       target, [self]() { self->UpdateRenderedVideoFramesByTimer(); },
       [self]() { self->UpdateRenderedVideoFramesByTimer(); });
 }
 
 void VideoSink::MaybeResolveEndPromise() {
@@ -642,17 +635,17 @@ void VideoSink::MaybeResolveEndPromise()
     TimeStamp nowTime;
     const auto clockTime = mAudioSink->GetPosition(&nowTime);
     if (clockTime < mVideoFrameEndTime) {
       VSINK_LOG_V(
           "Not reach video end time yet, reschedule timer to resolve "
           "end promise. clockTime=%" PRId64 ", endTime=%" PRId64,
           clockTime.ToMicroseconds(), mVideoFrameEndTime.ToMicroseconds());
       int64_t delta = (mVideoFrameEndTime - clockTime).ToMicroseconds() /
-                      mAudioSink->GetPlaybackParams().mPlaybackRate;
+                      mAudioSink->PlaybackRate();
       TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta);
       auto resolveEndPromise = [self = RefPtr<VideoSink>(this)]() {
         self->mEndPromiseHolder.ResolveIfExists(true, __func__);
         self->mUpdateScheduler.CompleteRequest();
       };
       mUpdateScheduler.Ensure(target, std::move(resolveEndPromise),
                               std::move(resolveEndPromise));
     } else {
--- a/dom/media/mediasink/VideoSink.h
+++ b/dom/media/mediasink/VideoSink.h
@@ -27,36 +27,34 @@ class MediaQueue;
 class VideoSink : public MediaSink {
   typedef mozilla::layers::ImageContainer::ProducerID ProducerID;
 
  public:
   VideoSink(AbstractThread* aThread, MediaSink* aAudioSink,
             MediaQueue<VideoData>& aVideoQueue, VideoFrameContainer* aContainer,
             FrameStatistics& aFrameStats, uint32_t aVQueueSentToCompositerSize);
 
-  const PlaybackParams& GetPlaybackParams() const override;
-
-  void SetPlaybackParams(const PlaybackParams& aParams) override;
-
   RefPtr<EndedPromise> OnEnded(TrackType aType) override;
 
   TimeUnit GetEndTime(TrackType aType) const override;
 
   TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
 
   bool HasUnplayedFrames(TrackType aType) const override;
 
   void SetPlaybackRate(double aPlaybackRate) override;
 
   void SetVolume(double aVolume) override;
 
   void SetPreservesPitch(bool aPreservesPitch) override;
 
   void SetPlaying(bool aPlaying) override;
 
+  double PlaybackRate() const override;
+
   void Redraw(const VideoInfo& aInfo) override;
 
   nsresult Start(const TimeUnit& aStartTime, const MediaInfo& aInfo) override;
 
   void Stop() override;
 
   bool IsStarted() const override;
 
--- a/dom/media/mediasink/moz.build
+++ b/dom/media/mediasink/moz.build
@@ -3,17 +3,16 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 UNIFIED_SOURCES += [
     'AudioSink.cpp',
     'AudioSinkWrapper.cpp',
     'DecodedStream.cpp',
-    'OutputStreamManager.cpp',
     'VideoSink.cpp',
 ]
 
 EXPORTS += [
     'MediaSink.h'
 ]
 
 include('/ipc/chromium/chromium-config.mozbuild')
--- a/dom/media/mediasource/SourceBufferResource.cpp
+++ b/dom/media/mediasource/SourceBufferResource.cpp
@@ -19,21 +19,21 @@ mozilla::LogModule* GetSourceBufferResou
   DDMOZ_LOG(GetSourceBufferResourceLog(), mozilla::LogLevel::Debug, \
             "::%s: " arg, __func__, ##__VA_ARGS__)
 #define SBR_DEBUGV(arg, ...)                                          \
   DDMOZ_LOG(GetSourceBufferResourceLog(), mozilla::LogLevel::Verbose, \
             "::%s: " arg, __func__, ##__VA_ARGS__)
 
 namespace mozilla {
 
-nsresult SourceBufferResource::Close() {
+RefPtr<GenericPromise> SourceBufferResource::Close() {
   MOZ_ASSERT(OnThread());
   SBR_DEBUG("Close");
   mClosed = true;
-  return NS_OK;
+  return GenericPromise::CreateAndResolve(true, __func__);
 }
 
 nsresult SourceBufferResource::ReadAt(int64_t aOffset, char* aBuffer,
                                       uint32_t aCount, uint32_t* aBytes) {
   SBR_DEBUG("ReadAt(aOffset=%" PRId64 ", aBuffer=%p, aCount=%u, aBytes=%p)",
             aOffset, aBytes, aCount, aBytes);
   return ReadAtInternal(aOffset, aBuffer, aCount, aBytes);
 }
--- a/dom/media/mediasource/SourceBufferResource.h
+++ b/dom/media/mediasource/SourceBufferResource.h
@@ -31,17 +31,17 @@ class SourceBuffer;
 DDLoggedTypeDeclNameAndBase(SourceBufferResource, MediaResource);
 
 // SourceBufferResource is not thread safe.
 class SourceBufferResource final
     : public MediaResource,
       public DecoderDoctorLifeLogger<SourceBufferResource> {
  public:
   SourceBufferResource();
-  nsresult Close() override;
+  RefPtr<GenericPromise> Close() override;
   nsresult ReadAt(int64_t aOffset, char* aBuffer, uint32_t aCount,
                   uint32_t* aBytes) override;
   // Memory-based and no locks, caching discouraged.
   bool ShouldCacheReads() override { return false; }
   void Pin() override { UNIMPLEMENTED(); }
   void Unpin() override { UNIMPLEMENTED(); }
   int64_t GetLength() override { return mInputBuffer.GetLength(); }
   int64_t GetNextCachedData(int64_t aOffset) override {
--- a/dom/media/test/test_mediatrack_consuming_mediaresource.html
+++ b/dom/media/test/test_mediatrack_consuming_mediaresource.html
@@ -5,29 +5,29 @@
   <script src="/tests/SimpleTest/SimpleTest.js"></script>
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
   <script type="text/javascript" src="manifest.js"></script>
 </head>
 <body>
 <pre id="test">
 <script class="testbody" type="text/javascript">
 
-var manager = new MediaTestManager;
+const manager = new MediaTestManager;
 
 function startTest(test, token) {
-  var elemType = getMajorMimeType(test.type);
-  var element = document.createElement(elemType);
+  const elemType = getMajorMimeType(test.type);
+  const element = document.createElement(elemType);
 
-  var audioOnchange = 0;
-  var audioOnaddtrack = 0;
-  var audioOnremovetrack = 0;
-  var videoOnchange = 0;
-  var videoOnaddtrack = 0;
-  var videoOnremovetrack = 0;
-  var isPlaying = false;
+  let audioOnchange = 0;
+  let audioOnaddtrack = 0;
+  let audioOnremovetrack = 0;
+  let videoOnchange = 0;
+  let videoOnaddtrack = 0;
+  let videoOnremovetrack = 0;
+  let isPlaying = false;
 
   isnot(element.audioTracks, undefined,
         'HTMLMediaElement::AudioTracks() property should be available.');
   isnot(element.videoTracks, undefined,
         'HTMLMediaElement::VideoTracks() property should be available.');
 
   element.audioTracks.onaddtrack = function(e) {
     audioOnaddtrack++;
@@ -48,36 +48,53 @@ function startTest(test, token) {
   element.videoTracks.onremovetrack = function(e) {
     videoOnremovetrack++;
   }
 
   element.videoTracks.onchange = function(e) {
     videoOnchange++;
   }
 
-  function checkTrackRemoved() {
+  function checkTrackNotRemoved() {
+    is(audioOnremovetrack, 0, 'Should have no calls of onremovetrack on audioTracks.');
+    is(videoOnremovetrack, 0, 'Should have no calls of onremovetrack on videoTracks.');
     if (isPlaying) {
-      if (test.hasAudio) {
-        is(audioOnremovetrack, 1, 'Calls of onremovetrack on audioTracks should be 1.');
-        is(element.audioTracks.length, 0, 'The length of audioTracks should be 0.');
-      }
-      if (test.hasVideo) {
-        is(videoOnremovetrack, 1, 'Calls of onremovetrack on videoTracks should be 1.');
-        is(element.videoTracks.length, 0, 'The length of videoTracks should be 0.');
-      }
+      is(element.audioTracks.length, test.hasAudio ? 1 : 0,
+        'Expected length of audioTracks.');
+      is(element.videoTracks.length, test.hasVideo ? 1 : 0,
+        'Expected length of videoTracks.');
+    }
+  }
+
+  function checkTrackRemoved() {
+    is(element.audioTracks.length, 0, 'The length of audioTracks should be 0.');
+    is(element.videoTracks.length, 0, 'The length of videoTracks should be 0.');
+    if (isPlaying) {
+      is(audioOnremovetrack, test.hasAudio ? 1 : 0,
+        'Expected calls of onremovetrack on audioTracks.');
+      is(videoOnremovetrack, test.hasVideo ? 1 : 0,
+        'Expected calls of onremovetrack on videoTracks.');
     }
   }
 
   function onended() {
     ok(true, 'Event ended is expected to be fired on element.');
-    checkTrackRemoved();
+    checkTrackNotRemoved();
     element.onended = null;
     element.onplaying = null;
     element.onpause = null;
-    manager.finished(element.token);
+    element.src = "";
+    is(element.audioTracks.length, 0, 'audioTracks have been forgotten');
+    is(element.videoTracks.length, 0, 'videoTracks have been forgotten');
+    is(audioOnremovetrack, 0, 'No audio removetrack events yet');
+    is(videoOnremovetrack, 0, 'No video removetrack events yet');
+    setTimeout(() => {
+      checkTrackRemoved();
+      manager.finished(element.token);
+    }, 100);
   }
 
   function checkTrackAdded() {
     isPlaying = true;
     if (test.hasAudio) {
       is(audioOnaddtrack, 1, 'Calls of onaddtrack on audioTracks should be 1.');
       is(element.audioTracks.length, 1, 'The length of audioTracks should be 1.');
       ok(element.audioTracks[0].enabled, 'Audio track should be enabled as default.');
--- a/dom/media/test/test_mediatrack_replay_from_end.html
+++ b/dom/media/test/test_mediatrack_replay_from_end.html
@@ -5,38 +5,39 @@
   <script src="/tests/SimpleTest/SimpleTest.js"></script>
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
   <script type="text/javascript" src="manifest.js"></script>
 </head>
 <body>
 <pre id="test">
 <script class="testbody" type="text/javascript">
 
-var manager = new MediaTestManager;
+const manager = new MediaTestManager;
 
 function startTest(test, token) {
   // Scenario to test:
   // 1. Audio tracks and video tracks should be added to the track list when
-  //    playing, and all tracks should be removed from the list after we seek
-  //    to the end.
-  // 2. All tracks should be added back to the list if we replay from the end,
-  //    and all tracks should be removed from the list after we seek to the end.
-  // 3. After seek to the middle from end of playback, all tracks should be
-  //    added back to the list if we play from here, and all tracks should be
-  //    removed from the list after we seek to the end.
+  //    metadata has loaded, and all tracks should remain even after we seek to
+  //    the end.
+  // 2. No tracks should be added back to the list if we replay from the end,
+  //    and no tracks should be removed from the list after we seek to the end.
+  // 3. After seek to the middle from end of playback, all tracks should remain
+  //    in the list if we play from here, and no tracks should be removed from
+  //    the list after we seek to the end.
+  // 4. Unsetting the media element's src attribute should remove all tracks.
 
-  var elemType = getMajorMimeType(test.type);
-  var element = document.createElement(elemType);
+  const elemType = getMajorMimeType(test.type);
+  const element = document.createElement(elemType);
 
-  var audioOnaddtrack = 0;
-  var audioOnremovetrack = 0;
-  var videoOnaddtrack = 0;
-  var videoOnremovetrack = 0;
-  var isPlaying = false;
-  var steps = 0;
+  let audioOnaddtrack = 0;
+  let audioOnremovetrack = 0;
+  let videoOnaddtrack = 0;
+  let videoOnremovetrack = 0;
+  let isPlaying = false;
+  let steps = 0;
 
   element.audioTracks.onaddtrack = function(e) {
     audioOnaddtrack++;
   }
 
   element.audioTracks.onremovetrack = function(e) {
     audioOnremovetrack++;
   }
@@ -44,26 +45,33 @@ function startTest(test, token) {
   element.videoTracks.onaddtrack = function(e) {
     videoOnaddtrack++;
   }
 
   element.videoTracks.onremovetrack = function(e) {
     videoOnremovetrack++;
   }
 
-  function testTrackEventCalls(expectedCalls) {
+  function testExpectedAddtrack(expectedCalls) {
     if (test.hasAudio) {
       is(audioOnaddtrack, expectedCalls,
          'Calls of onaddtrack on audioTracks should be '+expectedCalls+' times.');
+    }
+    if (test.hasVideo) {
+      is(videoOnaddtrack, expectedCalls,
+         'Calls of onaddtrack on videoTracks should be '+expectedCalls+' times.');
+    }
+  }
+
+  function testExpectedRemovetrack(expectedCalls) {
+    if (test.hasAudio) {
       is(audioOnremovetrack, expectedCalls,
          'Calls of onremovetrack on audioTracks should be '+expectedCalls+' times.');
     }
     if (test.hasVideo) {
-      is(videoOnaddtrack, expectedCalls,
-         'Calls of onaddtrack on videoTracks should be '+expectedCalls+' times.');
       is(videoOnremovetrack, expectedCalls,
          'Calls of onremovetrack on videoTracks should be '+expectedCalls+' times.');
     }
   }
 
   function finishTesting() {
     element.onpause = null;
     element.onseeked = null;
@@ -71,31 +79,39 @@ function startTest(test, token) {
     element.onended = null;
     manager.finished(element.token);
   }
 
   function onended() {
     if (isPlaying) {
       switch(steps) {
         case 1:
-          testTrackEventCalls(1);
+          testExpectedAddtrack(1);
+          testExpectedRemovetrack(0);
           element.onplaying = onplaying;
           element.play();
           steps++;
           break;
         case 2:
-          testTrackEventCalls(2);
+          testExpectedAddtrack(1);
+          testExpectedRemovetrack(0);
           element.currentTime = element.duration * 0.5;
           element.onplaying = onplaying;
           element.play();
           steps++;
           break;
         case 3:
-          testTrackEventCalls(3);
-          finishTesting();
+          testExpectedAddtrack(1);
+          testExpectedRemovetrack(0);
+          element.src = "";
+          setTimeout(() => {
+            testExpectedAddtrack(1);
+            testExpectedRemovetrack(1);
+            finishTesting();
+          }, 0);
           break;
       }
     } else {
       ok(true, 'Finish the test anyway if ended is fired before other events.');
       finishTesting();
     }
   }
 
--- a/dom/media/test/test_streams_element_capture_reset.html
+++ b/dom/media/test/test_streams_element_capture_reset.html
@@ -1,134 +1,137 @@
 <!DOCTYPE HTML>
 <html>
 <head>
-  <title>Test that reloading and seeking in a media element that's being captured doesn't crash</title>
+  <title>Test that reloading and seeking in a media element that's being captured behaves as expected</title>
   <script src="/tests/SimpleTest/SimpleTest.js"></script>
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-  <script type="text/javascript" src="manifest.js"></script>
+  <script src="manifest.js"></script>
 </head>
 <body>
 <video id="v"></video>
 <video id="vout"></video>
 <video id="vout_untilended"></video>
 <pre id="test">
-<script class="testbody" type="text/javascript">
-SimpleTest.waitForExplicitFinish();
-
-var v = document.getElementById('v');
-var vout = document.getElementById('vout');
-var vout_untilended = document.getElementById('vout_untilended');
+<script>
+const v = document.getElementById('v');
+const vout = document.getElementById('vout');
+const vout_untilended = document.getElementById('vout_untilended');
 
 function dumpEvent(event) {
-  var video = event.target;
-  info(video.name + " GOT EVENT " + event.type +
-       " currentTime=" + video.currentTime +
-       " paused=" + video.paused +
-       " ended=" + video.ended +
-       " readyState=" + video.readyState);
+  const video = event.target;
+  info(
+    `${video.name}:${video.id} GOT EVENT ${event.type} ` +
+    `currentTime=${video.currentTime} paused=${video.paused} ` +
+    `ended=${video.ended} readyState=${video.readyState}`
+  );
 }
 
-var events = ["timeupdate", "seeking", "seeked", "ended", "playing", "pause"];
-for (var i = 0; i < events.length; ++i) {
-  v.addEventListener(events[i], dumpEvent);
+function unexpected(event) {
+  ok(false, `${event.type} event received on ${event.target.id} unexpectedly`);
+};
+
+const events = ["timeupdate", "seeking", "seeked", "ended", "playing", "pause"];
+for (const e of events) {
+  v.addEventListener(e, dumpEvent);
+  vout.addEventListener(e, dumpEvent);
+  vout_untilended.addEventListener(e, dumpEvent);
 }
 
 function isWithinEps(a, b, msg) {
   ok(Math.abs(a - b) < 0.01,
      "Got " + a + ", expected " + b + "; " + msg);
 }
 
 function isGreaterThanOrEqualEps(a, b, msg) {
   ok(a >= b - 0.01,
      "Got " + a + ", expected at least " + b + "; " + msg);
 }
 
-function startTest(test) {
-  var seekTime = test.duration/2;
-
-  function endedAfterReplay() {
-    isGreaterThanOrEqualEps(v.currentTime, test.duration, "checking v.currentTime at third 'ended' event");
-    isGreaterThanOrEqualEps(vout.currentTime, (test.duration - seekTime) + test.duration*2,
-	            "checking vout.currentTime after seeking, playing through and reloading");
-    SimpleTest.finish();
-  };
-
-  function endedAfterSeek() {
-    isGreaterThanOrEqualEps(v.currentTime, test.duration, "checking v.currentTime at second 'ended' event");
-    isGreaterThanOrEqualEps(vout.currentTime, (test.duration - seekTime) + test.duration,
-                "checking vout.currentTime after seeking and playing through again");
-    v.removeEventListener("ended", endedAfterSeek);
-    v.addEventListener("ended", endedAfterReplay);
-    v.src = test.name + "?1";
-    v.play();
-  };
-
-  function seeked() {
-    isGreaterThanOrEqualEps(v.currentTime, seekTime, "Finished seeking");
-    isGreaterThanOrEqualEps(vout.currentTime, test.duration,
-                "checking vout.currentTime has not changed after seeking");
-    v.removeEventListener("seeked", seeked);
-    function dontPlayAgain() {
-      ok(false, "vout_untilended should not play again");
-    }
-    vout_untilended.addEventListener("playing", dontPlayAgain);
-    vout_untilended.addEventListener("ended", dontPlayAgain);
-    v.addEventListener("ended", endedAfterSeek);
-    v.play();
-  };
-
-  function ended() {
-    // Don't compare current time until both v and vout_untilended are ended,
-    // otherwise, current time could be smaller than the duration.
-    if (!v.ended || !vout_untilended.ended) {
-      return;
-    }
-
-    isGreaterThanOrEqualEps(vout.currentTime, test.duration, "checking vout.currentTime at first 'ended' event");
-    isGreaterThanOrEqualEps(v.currentTime, test.duration, "checking v.currentTime at first 'ended' event");
-    is(vout.ended, false, "checking vout has not ended");
-    is(vout_untilended.ended, true, "checking vout_untilended has actually ended");
-
-    v.removeEventListener("ended", ended);
-    vout_untilended.removeEventListener("ended", ended);
-
-    v.pause();
-    v.currentTime = seekTime;
-    v.addEventListener("seeked", seeked);
-  };
-
-  v.addEventListener("ended", ended);
-  vout_untilended.addEventListener("ended", ended);
-
-  function checkNoEnded() {
-    ok(false, "ended event received unexpectedly");
-  };
-
-  vout.addEventListener("ended", checkNoEnded);
+async function startTest(test) {
+  const seekTime = test.duration/2;
 
   v.src = test.name;
   v.name = test.name;
+  vout.name = test.name;
+  vout_untilended.name = test.name;
   v.preload = "metadata";
+  await new Promise(r => v.onloadedmetadata = r);
+
+  vout.srcObject = v.mozCaptureStream();
+  vout.play();
+
+  vout_untilended.srcObject = v.mozCaptureStreamUntilEnded();
+  vout_untilended.play();
+
+  v.play();
 
-  function loadedmetadata() {
-    vout.srcObject = v.mozCaptureStream();
-    vout.play();
+  await new Promise(r => v.onended = r);
+  isGreaterThanOrEqualEps(v.currentTime, test.duration,
+    "checking v.currentTime at first 'ended' event");
+
+  await Promise.all([
+    new Promise(r => vout.onended = r),
+    new Promise(r => vout_untilended.onended = r),
+  ]);
+
+  isGreaterThanOrEqualEps(vout.currentTime, test.duration,
+    "checking vout.currentTime at first 'ended' event");
+  ok(vout.ended, "checking vout has actually ended");
+  ok(vout_untilended.ended, "checking vout_untilended has actually ended");
+
+  vout_untilended.srcObject.onaddtrack = unexpected;
+  vout_untilended.onplaying = unexpected;
+  vout_untilended.onended = unexpected;
 
-    vout_untilended.srcObject = v.mozCaptureStreamUntilEnded();
-    vout_untilended.play();
+  const voutPreSeekCurrentTime = vout.currentTime;
+  v.currentTime = seekTime;
+  await new Promise(r => v.onseeked = r);
+
+  is(v.currentTime, seekTime, "Finished seeking");
+  is(vout.currentTime, voutPreSeekCurrentTime,
+    "checking vout.currentTime has not changed after seeking");
+
+  v.play();
+  vout.play();
+
+  await new Promise(r => v.onended = r);
+  isGreaterThanOrEqualEps(v.currentTime, test.duration,
+    "checking v.currentTime at second 'ended' event");
 
-    v.play();
-  };
+  await new Promise(r => vout.onended = r);
+  isGreaterThanOrEqualEps(vout.currentTime,
+    (test.duration - seekTime) + test.duration,
+    "checking vout.currentTime after seeking and playing through again");
+
+  v.src = test.name + "?1";
+  v.play();
+  vout.play();
 
-  v.addEventListener("loadedmetadata", loadedmetadata, {once: true});
+  await new Promise(r => v.onended = r);
+  isGreaterThanOrEqualEps(v.currentTime, test.duration,
+    "checking v.currentTime at third 'ended' event");
+
+  await new Promise(r => vout.onended = r);
+  isGreaterThanOrEqualEps(vout.currentTime,
+    (test.duration - seekTime) + test.duration*2,
+    "checking vout.currentTime after seeking, playing through and reloading");
 }
 
-var testVideo = getPlayableVideo(gSmallTests);
-if (testVideo) {
-  startTest(testVideo);
-} else {
-  todo(false, "No playable video");
-}
+(async () => {
+  SimpleTest.waitForExplicitFinish();
+  try {
+    const testVideo = getPlayableVideo(gSmallTests);
+    if (testVideo) {
+      await startTest(testVideo);
+    } else {
+      todo(false, "No playable video");
+    }
+  } catch(e) {
+    ok(false, `Error: ${e}`);
+  } finally {
+    SimpleTest.finish();
+  }
+})();
 </script>
 </pre>
 </body>
 </html>
--- a/dom/media/webaudio/MediaElementAudioSourceNode.cpp
+++ b/dom/media/webaudio/MediaElementAudioSourceNode.cpp
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaElementAudioSourceNode.h"
 #include "mozilla/dom/MediaElementAudioSourceNodeBinding.h"
 #include "AudioDestinationNode.h"
 #include "nsIScriptError.h"
 #include "AudioNodeTrack.h"
+#include "MediaStreamTrack.h"
 
 namespace mozilla {
 namespace dom {
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(MediaElementAudioSourceNode)
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(MediaElementAudioSourceNode)
   tmp->Destroy();
--- a/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
+++ b/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
@@ -71,18 +71,18 @@ void MediaStreamAudioSourceNode::Init(DO
   }
 
   mInputStream = aMediaStream;
   AudioNodeEngine* engine = new MediaStreamAudioSourceNodeEngine(this);
   mTrack = AudioNodeExternalInputTrack::Create(Context()->Graph(), engine);
   mInputStream->AddConsumerToKeepAlive(ToSupports(this));
 
   mInputStream->RegisterTrackListener(this);
-  if (mInputStream->Active()) {
-    NotifyActive();
+  if (mInputStream->Audible()) {
+    NotifyAudible();
   }
   AttachToRightTrack(mInputStream, aRv);
 }
 
 void MediaStreamAudioSourceNode::Destroy() {
   if (mInputStream) {
     mInputStream->UnregisterTrackListener(this);
     mInputStream = nullptr;
@@ -114,16 +114,17 @@ void MediaStreamAudioSourceNode::AttachT
   }
 
   mInputTrack = aTrack;
   ProcessedMediaTrack* outputTrack =
       static_cast<ProcessedMediaTrack*>(mTrack.get());
   mInputPort = mInputTrack->ForwardTrackContentsTo(outputTrack);
   PrincipalChanged(mInputTrack);  // trigger enabling/disabling of the connector
   mInputTrack->AddPrincipalChangeObserver(this);
+  MarkActive();
 }
 
 void MediaStreamAudioSourceNode::DetachFromTrack() {
   if (mInputTrack) {
     mInputTrack->RemovePrincipalChangeObserver(this);
     mInputTrack = nullptr;
   }
   if (mInputPort) {
@@ -160,17 +161,16 @@ void MediaStreamAudioSourceNode::AttachT
     if (mBehavior == FollowChanges) {
       if (track->Ended()) {
         continue;
       }
     }
 
     if (!track->Ended()) {
       AttachToTrack(track, aRv);
-      MarkActive();
     }
     return;
   }
 
   // There was no track available. We'll allow the node to be garbage collected.
   MarkInactive();
 }
 
@@ -197,17 +197,17 @@ void MediaStreamAudioSourceNode::NotifyT
       return;
     }
 
     DetachFromTrack();
     AttachToRightTrack(mInputStream, IgnoreErrors());
   }
 }
 
-void MediaStreamAudioSourceNode::NotifyActive() {
+void MediaStreamAudioSourceNode::NotifyAudible() {
   MOZ_ASSERT(mInputStream);
   Context()->StartBlockedAudioContextIfAllowed();
 }
 
 /**
  * Changes the principal. Note that this will be called on the main thread, but
  * changes will be enacted on the MediaTrackGraph thread. If the principal
  * change results in the document principal losing access to the stream, then
--- a/dom/media/webaudio/MediaStreamAudioSourceNode.h
+++ b/dom/media/webaudio/MediaStreamAudioSourceNode.h
@@ -86,17 +86,17 @@ class MediaStreamAudioSourceNode
   // Attaches to the first audio track in the MediaStream, when the tracks are
   // ordered by id.
   void AttachToRightTrack(const RefPtr<DOMMediaStream>& aMediaStream,
                           ErrorResult& aRv);
 
   // From DOMMediaStream::TrackListener.
   void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) override;
   void NotifyTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack) override;
-  void NotifyActive() override;
+  void NotifyAudible() override;
 
   // From PrincipalChangeObserver<MediaStreamTrack>.
   void PrincipalChanged(MediaStreamTrack* aMediaStreamTrack) override;
 
   // This allows implementing the correct behaviour for both
   // MediaElementAudioSourceNode and MediaStreamAudioSourceNode, that have most
   // of their behaviour shared.
   enum TrackChangeBehavior {
--- a/dom/messagechannel/moz.build
+++ b/dom/messagechannel/moz.build
@@ -1,16 +1,16 @@
 # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 with Files("**"):
-    BUG_COMPONENT = ("Core", "DOM: Core & HTML")
+    BUG_COMPONENT = ("Core", "DOM: postMessage")
 
 TEST_DIRS += ['tests']
 
 EXPORTS.mozilla.dom += [
     'MessageChannel.h',
     'MessagePort.h',
     'MessagePortChild.h',
     'MessagePortParent.h',
--- a/dom/webidl/moz.build
+++ b/dom/webidl/moz.build
@@ -32,16 +32,22 @@ with Files("BaseKeyframeTypes.webidl"):
     BUG_COMPONENT = ("Core", "DOM: Animation")
 
 with Files("BatteryManager.webidl"):
     BUG_COMPONENT = ("Core", "DOM: Device Interfaces")
 
 with Files("BiquadFilterNode.webidl"):
     BUG_COMPONENT = ("Core", "Web Audio")
 
+with Files("Blob*"):
+    BUG_COMPONENT = ("Core", "DOM: File")
+
+with Files("BroadcastChannel.webidl"):
+    BUG_COMPONENT = ("Core", "DOM: postMessage")
+
 with Files("BrowserElement*"):
     BUG_COMPONENT = ("Core", "DOM: Core & HTML")
 
 with Files("CSP*"):
     BUG_COMPONENT = ("Core", "DOM: Security")
 
 with Files("CSS*"):
     BUG_COMPONENT = ("Core", "CSS Parsing and Computation")
@@ -54,20 +60,20 @@ with Files("Caret*"):
 
 with Files("Channel*"):
     BUG_COMPONENT = ("Core", "Web Audio")
 
 with Files("Client*"):
     BUG_COMPONENT = ("Core", "DOM: Service Workers")
 
 with Files("Clipboard.webidl"):
-    BUG_COMPONENT = ("Core", "DOM: Events")
+    BUG_COMPONENT = ("Core", "DOM: UI Events & Focus Handling")
 
 with Files("ClipboardEvent.webidl"):
-    BUG_COMPONENT = ("Core", "DOM: Events")
+    BUG_COMPONENT = ("Core", "DOM: UI Events & Focus Handling")
 
 with Files("ConstantSourceNode.webidl"):
     BUG_COMPONENT = ("Core", "Web Audio")
 
 with Files("ConvolverNode.webidl"):
     BUG_COMPONENT = ("Core", "Web Audio")
 
 with Files("GeolocationCoordinates.webidl"):
@@ -98,24 +104,27 @@ with Files("DynamicsCompressorNode.webid
     BUG_COMPONENT = ("Core", "Web Audio")
 
 with Files("FakePluginTagInit.webidl"):
     BUG_COMPONENT = ("Core", "Plug-ins")
 
 with Files("FeaturePolicy.webidl"):
     BUG_COMPONENT = ("Core", "DOM: Security")
 
+with Files("File*"):
+    BUG_COMPONENT = ("Core", "DOM: File")
+
 with Files("FocusEvent.webidl"):
-    BUG_COMPONENT = ("Core", "DOM: Events")
+    BUG_COMPONENT = ("Core", "DOM: UI Events & Focus Handling")
 
 with Files("Font*"):
     BUG_COMPONENT = ("Core", "CSS Parsing and Computation")
 
 with Files("FormData.webidl"):
-    BUG_COMPONENT = ("Core", "DOM: Core & HTML")
+    BUG_COMPONENT = ("Core", "DOM: Forms")
 
 with Files("Geolocation.webidl"):
     BUG_COMPONENT = ("Core", "DOM: Geolocation")
 
 with Files("GainNode.webidl"):
     BUG_COMPONENT = ("Core", "Web Audio")
 
 with Files("Gamepad*"):
@@ -129,44 +138,47 @@ with Files("GetUserMediaRequest.webidl")
 
 with Files("Grid.webidl"):
     BUG_COMPONENT = ("Core", "CSS Parsing and Computation")
 
 with Files("HTML*"):
     BUG_COMPONENT = ("Core", "DOM: Core & HTML")
 
 with Files("HashChangeEvent.webidl"):
-    BUG_COMPONENT = ("Core", "DOM: Events")
+    BUG_COMPONENT = ("Core", "DOM: Navigation")
 
 with Files("HiddenPluginEvent.webidl"):
     BUG_COMPONENT = ("Core", "Plug-ins")
 
 with Files("IDB*"):
     BUG_COMPONENT = ("Core", "Storage: IndexedDB")
 
 with Files("IIRFilterNode.webidl"):
     BUG_COMPONENT = ("Core", "Web Audio")
 
 with Files("Image*"):
     BUG_COMPONENT = ("Core", "DOM: Core & HTML")
 
+with Files("ImageBitmap*"):
+    BUG_COMPONENT = ("Core", "Canvas: 2D")
+
 with Files("ImageCapture*"):
     BUG_COMPONENT = ("Core", "Audio/Video")
 
 with Files("InputEvent.webidl"):
-    BUG_COMPONENT = ("Core", "DOM: Events")
+    BUG_COMPONENT = ("Core", "DOM: UI Events & Focus Handling")
 
 with Files("InstallTrigger.webidl"):
     BUG_COMPONENT = ("Toolkit", "Add-ons Manager")
 
 with Files("KeyAlgorithm.webidl"):
     BUG_COMPONENT = ("Core", "DOM: Security")
 
 with Files("Key*Event*"):
-    BUG_COMPONENT = ("Core", "DOM: Events")
+    BUG_COMPONENT = ("Core", "DOM: UI Events & Focus Handling")
 
 with Files("KeyIdsInitData.webidl"):
     BUG_COMPONENT = ("Core", "Audio/Video: Playback")
 
 with Files("Keyframe*"):
     BUG_COMPONENT = ("Core", "DOM: Animation")
 
 with Files("MathML*"):
@@ -191,27 +203,30 @@ with Files("MediaEncryptedEvent.webidl")
     BUG_COMPONENT = ("Core", "Audio/Video")
 
 with Files("MediaKey*"):
     BUG_COMPONENT = ("Core", "Audio/Video: Playback")
 
 with Files("Media*List*"):
     BUG_COMPONENT = ("Core", "CSS Parsing and Computation")
 
+with Files("Message*"):
+    BUG_COMPONENT = ("Core", "DOM: postMessage")
+
 with Files("*Record*"):
     BUG_COMPONENT = ("Core", "Audio/Video: Recording")
 
 with Files("Media*Track*"):
     BUG_COMPONENT = ("Core", "WebRTC: Audio/Video")
 
 with Files("MIDI*"):
     BUG_COMPONENT = ("Core", "DOM: Device Interfaces")
 
 with Files("Mouse*"):
-    BUG_COMPONENT = ("Core", "DOM: Events")
+    BUG_COMPONENT = ("Core", "DOM: UI Events & Focus Handling")
 
 with Files("MutationEvent.webidl"):
     BUG_COMPONENT = ("Core", "DOM: Events")
 
 with Files("NativeOSFileInternals.webidl"):
     BUG_COMPONENT = ("Toolkit", "OS.File")
 
 with Files("Net*"):
@@ -231,20 +246,20 @@ with Files("PannerNode.webidl"):
 
 with Files("Peer*"):
     BUG_COMPONENT = ("Core", "WebRTC")
 
 with Files("PeriodicWave.webidl"):
     BUG_COMPONENT = ("Core", "Web Audio")
 
 with Files("PointerEvent.webidl"):
-    BUG_COMPONENT = ("Core", "DOM: Events")
+    BUG_COMPONENT = ("Core", "DOM: UI Events & Focus Handling")
 
 with Files("PopStateEvent.webidl*"):
-    BUG_COMPONENT = ("Core", "DOM: Events")
+    BUG_COMPONENT = ("Core", "DOM: Navigation")
 
 with Files("GeolocationPosition*"):
     BUG_COMPONENT = ("Core", "DOM: Geolocation")
 
 with Files("ProfileTimelineMarker.webidl"):
     BUG_COMPONENT = ("DevTools", "Performance Tools (Profiler/Timeline)")
 
 with Files("ProgressEvent.webidl"):
@@ -264,17 +279,17 @@ with Files("ScriptProcessorNode.webidl")
 
 with Files("Selection.webidl"):
     BUG_COMPONENT = ("Core", "DOM: Selection")
 
 with Files("ServiceWorker*"):
     BUG_COMPONENT = ("Core", "DOM: Service Workers")
 
 with Files("SimpleGestureEvent.webidl"):
-    BUG_COMPONENT = ("Core", "DOM: Events")
+    BUG_COMPONENT = ("Core", "DOM: UI Events & Focus Handling")
 
 with Files("SocketCommon.webidl"):
     BUG_COMPONENT = ("Core", "DOM: Device Interfaces")
 
 with Files("SourceBuffer*"):
     BUG_COMPONENT = ("Core", "Audio/Video")
 
 with Files("StereoPannerNode.webidl"):
@@ -300,23 +315,23 @@ with Files("TrackEvent.webidl"):
 
 with Files("U2F.webidl"):
     BUG_COMPONENT = ("Core", "DOM: Device Interfaces")
 
 with Files("UDP*"):
     BUG_COMPONENT = ("Core", "DOM: Device Interfaces")
 
 with Files("UIEvent.webidl"):
-    BUG_COMPONENT = ("Core", "DOM: Events")
+    BUG_COMPONENT = ("Core", "DOM: UI Events & Focus Handling")
 
 with Files("URL.webidl"):
     BUG_COMPONENT = ("Core", "Audio/Video")
 
 with Files("UserProximityEvent.webidl"):
-    BUG_COMPONENT = ("Core", "DOM: Events")
+    BUG_COMPONENT = ("Core", "DOM: UI Events & Focus Handling")
 
 with Files("VTT*"):
     BUG_COMPONENT = ("Core", "Audio/Video")
 
 with Files("VRDisplay.webidl"):
     BUG_COMPONENT = ("Core", "Graphics")
 
 with Files("Video*"):
@@ -333,17 +348,17 @@ with Files("WebGL*"):
 
 with Files("WebGPU*"):
     BUG_COMPONENT = ("Core", "Canvas: WebGL")
 
 with Files("Webrtc*"):
     BUG_COMPONENT = ("Core", "WebRTC")
 
 with Files("WheelEvent.webidl"):
-    BUG_COMPONENT = ("Core", "DOM: Events")
+    BUG_COMPONENT = ("Core", "DOM: UI Events & Focus Handling")
 
 with Files("WidevineCDMManifest.webidl"):
     BUG_COMPONENT = ("Core", "Audio/Video: Playback")
 
 with Files("WindowOrWorkerGlobalScope.webidl"):
     BUG_COMPONENT = ("Core", "DOM: Workers")
 
 with Files("Worker*"):
--- a/dom/websocket/moz.build
+++ b/dom/websocket/moz.build
@@ -1,16 +1,16 @@
 # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 with Files("**"):
-    BUG_COMPONENT = ("Core", "DOM: Core & HTML")
+    BUG_COMPONENT = ("Core", "DOM: Networking")
 
 EXPORTS.mozilla.dom += [
     'WebSocket.h',
 ]
 
 UNIFIED_SOURCES += [
     'WebSocket.cpp',
 ]
--- a/dom/workers/remoteworkers/RemoteWorkerManager.cpp
+++ b/dom/workers/remoteworkers/RemoteWorkerManager.cpp
@@ -234,17 +234,22 @@ RemoteWorkerManager::SelectTargetActorFo
     MOZ_ASSERT(bgParent);
 
     RefPtr<ContentParent> contentParent =
         BackgroundParent::GetContentParent(bgParent);
 
     auto scopeExit = MakeScopeExit(
         [&] { contentParents.AppendElement(std::move(contentParent)); });
 
-    if (IsWebRemoteType(contentParent->GetRemoteType())) {
+    const nsAString& remoteType = contentParent->GetRemoteType();
+    MOZ_DIAGNOSTIC_ASSERT(
+        !IsWebCoopCoepRemoteType(remoteType),
+        "COOP+COEP processes don't support remote workers right now");
+
+    if (IsWebRemoteType(remoteType)) {
       auto lock = contentParent->mRemoteWorkerActorData.Lock();
 
       if (lock->mCount || !lock->mShutdownStarted) {
         ++lock->mCount;
 
         // This won't cause any race conditions because the content process
         // should wait for the permissions to be received before executing the
         // Service Worker.
--- a/dom/xml/nsXMLPrettyPrinter.cpp
+++ b/dom/xml/nsXMLPrettyPrinter.cpp
@@ -108,18 +108,18 @@ nsresult nsXMLPrettyPrinter::PrettyPrint
   return NS_OK;
 }
 
 void nsXMLPrettyPrinter::MaybeUnhook(nsIContent* aContent) {
   // If aContent is null, the document-node was modified.
   // If it is not null but in the shadow tree or the <scrollbar> NACs,
   // the change was in the generated content, and it should be ignored.
   bool isGeneratedContent =
-      !aContent ? false
-                : aContent->GetBindingParent() || aContent->IsInShadowTree();
+      aContent &&
+      (aContent->IsInNativeAnonymousSubtree() || aContent->IsInShadowTree());
 
   if (!isGeneratedContent && !mUnhookPending) {
     // Can't blindly to mUnhookPending after AddScriptRunner,
     // since AddScriptRunner _could_ in theory run us
     // synchronously
     mUnhookPending = true;
     nsContentUtils::AddScriptRunner(NewRunnableMethod(
         "nsXMLPrettyPrinter::Unhook", this, &nsXMLPrettyPrinter::Unhook));
deleted file mode 100644
--- a/intl/hyphenation/glue/hnjalloc.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-/*
- * To enable us to load hyphenation dictionaries from arbitrary resource URIs,
- * not just through file paths using stdio, we override the (few) stdio APIs
- * that hyphen.c uses and provide our own reimplementation that calls Gecko
- * i/o methods.
- */
-
-#include <stdio.h> /* ensure stdio.h is loaded before our macros */
-
-#undef FILE
-#define FILE hnjFile
-
-#define fopen(path, mode) hnjFopen(path, mode)
-#define fclose(file) hnjFclose(file)
-#define fgets(buf, count, file) hnjFgets(buf, count, file)
-#define feof(file) hnjFeof(file)
-#define fgetc(file) hnjFgetc(file)
-
-typedef struct hnjFile_ hnjFile;
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void* hnj_malloc(size_t size);
-void* hnj_realloc(void* ptr, size_t size);
-void hnj_free(void* ptr);
-
-hnjFile* hnjFopen(const char* aURISpec, const char* aMode);
-
-int hnjFclose(hnjFile* f);
-
-char* hnjFgets(char* s, int n, hnjFile* f);
-
-int hnjFeof(hnjFile* f);
-
-int hnjFgetc(hnjFile* f);
-
-#ifdef __cplusplus
-}
-#endif
deleted file mode 100644
--- a/intl/hyphenation/glue/hnjstdio.cpp
+++ /dev/null
@@ -1,133 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-// This file provides substitutes for the basic stdio routines used by hyphen.c
-// to read its dictionary files. We #define the stdio names to these versions
-// in hnjalloc.h, so that we can use nsIURI and nsIInputStream to specify and
-// access the dictionary resources.
-
-#include "hnjalloc.h"
-
-#undef FILE  // Undo #defines from hnjalloc.h before #including other headers
-#undef fopen
-#undef fclose
-#undef fgets
-#undef feof
-#undef fgetc
-
-#include "nsNetUtil.h"
-#include "nsIInputStream.h"
-#include "nsIURI.h"
-#include "nsContentUtils.h"
-
-#define BUFSIZE 1024
-
-struct hnjFile_ {
-  nsCOMPtr<nsIInputStream> mStream;
-  char mBuffer[BUFSIZE];
-  uint32_t mCurPos;
-  uint32_t mLimit;
-  bool mEOF;
-};
-
-// replacement for fopen()
-// (not a full substitute: only supports read access)
-hnjFile* hnjFopen(const char* aURISpec, const char* aMode) {
-  // this override only needs to support "r"
-  NS_ASSERTION(!strcmp(aMode, "r"), "unsupported fopen() mode in hnjFopen");
-
-  nsCOMPtr<nsIURI> uri;
-  nsresult rv = NS_NewURI(getter_AddRefs(uri), aURISpec);
-  if (NS_FAILED(rv)) {
-    return nullptr;
-  }
-
-  nsCOMPtr<nsIChannel> channel;
-  rv = NS_NewChannel(getter_AddRefs(channel), uri,
-                     nsContentUtils::GetSystemPrincipal(),
-                     nsILoadInfo::SEC_ALLOW_CROSS_ORIGIN_DATA_IS_NULL,
-                     nsIContentPolicy::TYPE_OTHER);
-  if (NS_FAILED(rv)) {
-    return nullptr;
-  }
-
-  nsCOMPtr<nsIInputStream> instream;
-  rv = channel->Open(getter_AddRefs(instream));
-  if (NS_FAILED(rv)) {
-    return nullptr;
-  }
-
-  hnjFile* f = new hnjFile;
-  f->mStream = instream;
-  f->mCurPos = 0;
-  f->mLimit = 0;
-  f->mEOF = false;
-
-  return f;
-}
-
-// replacement for fclose()
-int hnjFclose(hnjFile* f) {
-  NS_ASSERTION(f && f->mStream, "bad argument to hnjFclose");
-
-  int result = 0;
-  nsresult rv = f->mStream->Close();
-  if (NS_FAILED(rv)) {
-    result = EOF;
-  }
-  f->mStream = nullptr;
-
-  delete f;
-  return result;
-}
-
-// replacement for fgetc()
-int hnjFgetc(hnjFile* f) {
-  if (f->mCurPos >= f->mLimit) {
-    f->mCurPos = 0;
-
-    nsresult rv = f->mStream->Read(f->mBuffer, BUFSIZE, &f->mLimit);
-    if (NS_FAILED(rv)) {
-      f->mLimit = 0;
-    }
-
-    if (f->mLimit == 0) {
-      f->mEOF = true;
-      return EOF;
-    }
-  }
-
-  return f->mBuffer[f->mCurPos++];
-}
-
-// replacement for fgets()
-// (not a full reimplementation, but sufficient for libhyphen's needs)
-char* hnjFgets(char* s, int n, hnjFile* f) {
-  NS_ASSERTION(s && f, "bad argument to hnjFgets");
-
-  int i = 0;
-  while (i < n - 1) {
-    int c = hnjFgetc(f);
-
-    if (c == EOF) {
-      break;
-    }
-
-    s[i++] = c;
-
-    if (c == '\n' || c == '\r') {
-      break;
-    }
-  }
-
-  if (i == 0) {
-    return nullptr;  // end of file
-  }
-
-  s[i] = '\0';  // null-terminate the returned string
-  return s;
-}
-
-int hnjFeof(hnjFile* f) { return f->mEOF ? EOF : 0; }
--- a/intl/hyphenation/glue/moz.build
+++ b/intl/hyphenation/glue/moz.build
@@ -9,21 +9,23 @@ EXPORTS += [
     'nsHyphenator.h',
 ]
 
 UNIFIED_SOURCES += [
     'nsHyphenationManager.cpp',
     'nsHyphenator.cpp',
 ]
 
-# These files cannot be built in unified mode because they include hnjalloc.h.
-SOURCES += [
-    'hnjstdio.cpp',
-]
-
-LOCAL_INCLUDES += [
-    '../hyphen',
-]
-
 FINAL_LIBRARY = 'xul'
 
 if CONFIG['CC_TYPE'] in ('clang', 'gcc'):
     CXXFLAGS += ['-Wno-error=shadow']
+
+if CONFIG['COMPILE_ENVIRONMENT']:
+    GENERATED_FILES += [
+        'mapped_hyph.h'
+    ]
+
+    generated = GENERATED_FILES['mapped_hyph.h']
+    generated.script = '/layout/style/RunCbindgen.py:generate'
+    generated.inputs = [
+        '/third_party/rust/mapped_hyph'
+    ]
--- a/intl/hyphenation/glue/nsHyphenationManager.cpp
+++ b/intl/hyphenation/glue/nsHyphenationManager.cpp
@@ -32,68 +32,48 @@ static const char kMemoryPressureNotific
 
 // To report memory usage via telemetry, we observe a notification when the
 // process is about to be shut down; unfortunately, parent and child processes
 // receive different notifications, so we have to account for that in order to
 // report usage from both process types.
 static const char kParentShuttingDownNotification[] = "profile-before-change";
 static const char kChildShuttingDownNotification[] = "content-child-shutdown";
 
-class HyphenReporter final : public nsIMemoryReporter,
-                             public CountingAllocatorBase<HyphenReporter> {
+class HyphenReporter final : public nsIMemoryReporter {
  private:
   ~HyphenReporter() = default;
 
  public:
   NS_DECL_ISUPPORTS
 
   // For telemetry, we report the memory rounded up to the nearest KB.
   static uint32_t MemoryAllocatedInKB() {
-    return (MemoryAllocated() + 1023) / 1024;
+    size_t total = 0;
+    if (nsHyphenationManager::Instance()) {
+      total = nsHyphenationManager::Instance()->SizeOfIncludingThis(
+          moz_malloc_size_of);
+    }
+    return (total + 1023) / 1024;
   }
 
   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
                             nsISupports* aData, bool aAnonymize) override {
-    size_t total = MemoryAllocated();
+    size_t total = 0;
     if (nsHyphenationManager::Instance()) {
-      total += nsHyphenationManager::Instance()->SizeOfIncludingThis(
+      total = nsHyphenationManager::Instance()->SizeOfIncludingThis(
           moz_malloc_size_of);
     }
     MOZ_COLLECT_REPORT("explicit/hyphenation", KIND_HEAP, UNITS_BYTES, total,
                        "Memory used by hyphenation data.");
     return NS_OK;
   }
 };
 
 NS_IMPL_ISUPPORTS(HyphenReporter, nsIMemoryReporter)
 
-template <>
-CountingAllocatorBase<HyphenReporter>::AmountType
-    CountingAllocatorBase<HyphenReporter>::sAmount(0);
-
-/**
- * Allocation wrappers to track the amount of memory allocated by libhyphen.
- * Note that libhyphen assumes its malloc/realloc functions are infallible!
- */
-extern "C" {
-void* hnj_malloc(size_t aSize);
-void* hnj_realloc(void* aPtr, size_t aSize);
-void hnj_free(void* aPtr);
-};
-
-void* hnj_malloc(size_t aSize) {
-  return HyphenReporter::InfallibleCountingMalloc(aSize);
-}
-
-void* hnj_realloc(void* aPtr, size_t aSize) {
-  return HyphenReporter::InfallibleCountingRealloc(aPtr, aSize);
-}
-
-void hnj_free(void* aPtr) { HyphenReporter::CountingFree(aPtr); }
-
 nsHyphenationManager* nsHyphenationManager::sInstance = nullptr;
 
 NS_IMPL_ISUPPORTS(nsHyphenationManager, nsIObserver)
 
 NS_IMETHODIMP
 nsHyphenationManager::Observe(nsISupports* aSubject, const char* aTopic,
                               const char16_t* aData) {
   if (!nsCRT::strcmp(aTopic, kMemoryPressureNotification)) {
@@ -252,17 +232,17 @@ void nsHyphenationManager::LoadPatternLi
   }
 
   RefPtr<nsZipArchive> zip = Omnijar::GetReader(aType);
   if (!zip) {
     return;
   }
 
   nsZipFind* find;
-  zip->FindInit("hyphenation/hyph_*.dic", &find);
+  zip->FindInit("hyphenation/hyph_*.hyf", &find);
   if (!find) {
     return;
   }
 
   const char* result;
   uint16_t len;
   while (NS_SUCCEEDED(find->FindNext(&result, &len))) {
     nsCString uriString(base);
@@ -273,17 +253,17 @@ void nsHyphenationManager::LoadPatternLi
       continue;
     }
     nsCString locale;
     rv = uri->GetPathQueryRef(locale);
     if (NS_FAILED(rv)) {
       continue;
     }
     ToLowerCase(locale);
-    locale.SetLength(locale.Length() - 4);     // strip ".dic"
+    locale.SetLength(locale.Length() - 4);     // strip ".hyf"
     locale.Cut(0, locale.RFindChar('/') + 1);  // strip directory
     if (StringBeginsWith(locale, NS_LITERAL_CSTRING("hyph_"))) {
       locale.Cut(0, 5);
     }
     for (uint32_t i = 0; i < locale.Length(); ++i) {
       if (locale[i] == '_') {
         locale.Replace(i, 1, '-');
       }
@@ -318,23 +298,23 @@ void nsHyphenationManager::LoadPatternLi
   }
 
   nsCOMPtr<nsIFile> file;
   while (NS_SUCCEEDED(files->GetNextFile(getter_AddRefs(file))) && file) {
     nsAutoString dictName;
     file->GetLeafName(dictName);
     NS_ConvertUTF16toUTF8 locale(dictName);
     ToLowerCase(locale);
-    if (!StringEndsWith(locale, NS_LITERAL_CSTRING(".dic"))) {
+    if (!StringEndsWith(locale, NS_LITERAL_CSTRING(".hyf"))) {
       continue;
     }
     if (StringBeginsWith(locale, NS_LITERAL_CSTRING("hyph_"))) {
       locale.Cut(0, 5);
     }
-    locale.SetLength(locale.Length() - 4);  // strip ".dic"
+    locale.SetLength(locale.Length() - 4);  // strip ".hyf"
     for (uint32_t i = 0; i < locale.Length(); ++i) {
       if (locale[i] == '_') {
         locale.Replace(i, 1, '-');
       }
     }
 #ifdef DEBUG_hyph
     printf("adding hyphenation patterns for %s: %s\n", locale.get(),
            NS_ConvertUTF16toUTF8(dictName).get());
@@ -378,14 +358,11 @@ size_t nsHyphenationManager::SizeOfInclu
 
   result += mHyphAliases.ShallowSizeOfExcludingThis(aMallocSizeOf);
 
   result += mPatternFiles.ShallowSizeOfExcludingThis(aMallocSizeOf);
   // Measurement of the URIs stored in mPatternFiles may be added later if DMD
   // finds it is worthwhile.
 
   result += mHyphenators.ShallowSizeOfExcludingThis(aMallocSizeOf);
-  for (auto i = mHyphenators.ConstIter(); !i.Done(); i.Next()) {
-    result += aMallocSizeOf(i.Data().get());
-  }
 
   return result;
 }
--- a/intl/hyphenation/glue/nsHyphenator.cpp
+++ b/intl/hyphenation/glue/nsHyphenator.cpp
@@ -1,42 +1,155 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsHyphenator.h"
+
+#include "mozilla/Telemetry.h"
+#include "nsContentUtils.h"
+#include "nsIChannel.h"
 #include "nsIFile.h"
-#include "nsUTF8Utils.h"
-#include "nsUnicodeProperties.h"
+#include "nsIFileURL.h"
+#include "nsIInputStream.h"
+#include "nsIJARURI.h"
 #include "nsIURI.h"
-#include "mozilla/Telemetry.h"
+#include "nsNetUtil.h"
+#include "nsUnicodeProperties.h"
+#include "nsUTF8Utils.h"
+
+#include "mapped_hyph.h"
 
-#include "hyphen.h"
+static const void* GetItemPtrFromJarURI(nsIJARURI* aJAR, uint32_t* aLength) {
+  // Try to get the jarfile's nsZipArchive, find the relevant item, and return
+  // a pointer to its data provided it is stored uncompressed.
+  nsCOMPtr<nsIURI> jarFile;
+  if (NS_FAILED(aJAR->GetJARFile(getter_AddRefs(jarFile)))) {
+    return nullptr;
+  }
+  nsCOMPtr<nsIFileURL> fileUrl = do_QueryInterface(jarFile);
+  if (!fileUrl) {
+    return nullptr;
+  }
+  nsCOMPtr<nsIFile> file;
+  fileUrl->GetFile(getter_AddRefs(file));
+  if (!file) {
+    return nullptr;
+  }
+  RefPtr<nsZipArchive> archive = mozilla::Omnijar::GetReader(file);
+  if (archive) {
+    nsCString path;
+    aJAR->GetJAREntry(path);
+    nsZipItem* item = archive->GetItem(path.get());
+    if (item && item->Compression() == 0 && item->Size() > 0) {
+      // We do NOT own this data, but it won't go away until the omnijar
+      // file is closed during shutdown.
+      const uint8_t* data = archive->GetData(item);
+      if (data) {
+        *aLength = item->Size();
+        return data;
+      }
+    }
+  }
+  return nullptr;
+}
+
+static const void* LoadResourceFromURI(nsIURI* aURI, uint32_t* aLength) {
+  nsCOMPtr<nsIChannel> channel;
+  if (NS_FAILED(NS_NewChannel(getter_AddRefs(channel), aURI,
+                              nsContentUtils::GetSystemPrincipal(),
+                              nsILoadInfo::SEC_ALLOW_CROSS_ORIGIN_DATA_IS_NULL,
+                              nsIContentPolicy::TYPE_OTHER))) {
+    return nullptr;
+  }
+  nsCOMPtr<nsIInputStream> instream;
+  if (NS_FAILED(channel->Open(getter_AddRefs(instream)))) {
+    return nullptr;
+  }
+  // Check size, bail out if it is excessively large (the largest of the
+  // hyphenation files currently shipped with Firefox is around 1MB
+  // uncompressed).
+  uint64_t available;
+  if (NS_FAILED(instream->Available(&available)) || !available ||
+      available > 16 * 1024 * 1024) {
+    return nullptr;
+  }
+  char* buffer = static_cast<char*>(malloc(available));
+  if (!buffer) {
+    return nullptr;
+  }
+  uint32_t bytesRead = 0;
+  if (NS_FAILED(instream->Read(buffer, available, &bytesRead)) ||
+      bytesRead != available) {
+    free(buffer);
+    return nullptr;
+  }
+  *aLength = bytesRead;
+  return buffer;
+}
 
 nsHyphenator::nsHyphenator(nsIURI* aURI, bool aHyphenateCapitalized)
-    : mDict(nullptr), mHyphenateCapitalized(aHyphenateCapitalized) {
-  nsCString uriSpec;
-  nsresult rv = aURI->GetSpec(uriSpec);
-  if (NS_FAILED(rv)) {
-    return;
-  }
+    : mDict(nullptr),
+      mDictSize(0),
+      mOwnsDict(false),
+      mHyphenateCapitalized(aHyphenateCapitalized) {
   Telemetry::AutoTimer<Telemetry::HYPHENATION_LOAD_TIME> telemetry;
-  mDict = hnj_hyphen_load(uriSpec.get());
-#ifdef DEBUG
-  if (mDict) {
-    printf("loaded hyphenation patterns from %s\n", uriSpec.get());
+
+  nsCOMPtr<nsIJARURI> jar = do_QueryInterface(aURI);
+  if (jar) {
+    // This gives us a raw pointer into the omnijar's data (if uncompressed);
+    // we do not own it and must not attempt to free it!
+    mDict = GetItemPtrFromJarURI(jar, &mDictSize);
+    if (!mDict) {
+      // Omnijar must be compressed: we need to decompress the item into our
+      // own buffer. (Currently this is the case on Android.)
+      // TODO: Allocate in shared memory for all content processes to use.
+      mDict = LoadResourceFromURI(aURI, &mDictSize);
+      mOwnsDict = true;
+    }
+    if (mDict) {
+      // Reject the resource from omnijar if it fails to validate. (If this
+      // happens, we will hit the MOZ_ASSERT_UNREACHABLE at the end of the
+      // constructor, indicating the build is broken in some way.)
+      if (!mapped_hyph_is_valid_hyphenator(static_cast<const uint8_t*>(mDict),
+                                           mDictSize)) {
+        if (mOwnsDict) {
+          free(const_cast<void*>(mDict));
+        }
+        mDict = nullptr;
+        mDictSize = 0;
+      }
+    }
+  } else if (mozilla::net::SchemeIsFile(aURI)) {
+    // Ask the Rust lib to mmap the file. In this case our mDictSize field
+    // remains zero; mDict is not a pointer to the raw data but an opaque
+    // reference to a Rust object, and can only be freed by passing it to
+    // mapped_hyph_free_dictionary().
+    nsAutoCString path;
+    aURI->GetFilePath(path);
+    mDict = mapped_hyph_load_dictionary(path.get());
   }
-#endif
+
+  if (!mDict) {
+    // This should never happen, unless someone has included an invalid
+    // hyphenation file that fails to load.
+    MOZ_ASSERT_UNREACHABLE("invalid hyphenation resource?");
+  }
 }
 
 nsHyphenator::~nsHyphenator() {
-  if (mDict != nullptr) {
-    hnj_hyphen_free((HyphenDict*)mDict);
-    mDict = nullptr;
+  if (mDict) {
+    if (mDictSize) {
+      if (mOwnsDict) {
+        free(const_cast<void*>(mDict));
+      }
+    } else {
+      mapped_hyph_free_dictionary((HyphDic*)mDict);
+    }
   }
 }
 
 bool nsHyphenator::IsValid() { return (mDict != nullptr); }
 
 nsresult nsHyphenator::Hyphenate(const nsAString& aString,
                                  nsTArray<bool>& aHyphens) {
   if (!aHyphens.SetLength(aString.Length(), mozilla::fallible)) {
@@ -78,53 +191,48 @@ nsresult nsHyphenator::Hyphenate(const n
     }
   }
 
   return NS_OK;
 }
 
 void nsHyphenator::HyphenateWord(const nsAString& aString, uint32_t aStart,
                                  uint32_t aLimit, nsTArray<bool>& aHyphens) {
-  // Convert word from aStart and aLimit in aString to utf-8 for libhyphen,
+  // Convert word from aStart and aLimit in aString to utf-8 for mapped_hyph,
   // lowercasing it as we go so that it will match the (lowercased) patterns
   // (bug 1105644).
   nsAutoCString utf8;
-  const char16_t* const begin = aString.BeginReading();
-  const char16_t* cur = begin + aStart;
-  const char16_t* end = begin + aLimit;
+  const char16_t* cur = aString.BeginReading() + aStart;
+  const char16_t* end = aString.BeginReading() + aLimit;
   bool firstLetter = true;
   while (cur < end) {
     uint32_t ch = *cur++;
 
     if (NS_IS_HIGH_SURROGATE(ch)) {
       if (cur < end && NS_IS_LOW_SURROGATE(*cur)) {
         ch = SURROGATE_TO_UCS4(ch, *cur++);
       } else {
-        ch = 0xfffd;  // unpaired surrogate, treat as REPLACEMENT CHAR
+        return;  // unpaired surrogate: bail out, don't hyphenate broken text
       }
     } else if (NS_IS_LOW_SURROGATE(ch)) {
-      ch = 0xfffd;  // unpaired surrogate
+      return;  // unpaired surrogate
     }
 
     // XXX What about language-specific casing? Consider Turkish I/i...
     // In practice, it looks like the current patterns will not be
     // affected by this, as they treat dotted and undotted i similarly.
     uint32_t origCh = ch;
     ch = ToLowerCase(ch);
 
     if (ch != origCh) {
-      if (firstLetter) {
-        // Avoid hyphenating capitalized words (bug 1550532) unless explicitly
-        // allowed by prefs for the language in use.
-        if (!mHyphenateCapitalized) {
-          return;
-        }
-      } else {
-        // Also never auto-hyphenate a word that has internal caps, as it may
-        // well be an all-caps acronym or a quirky name like iTunes.
+      // Avoid hyphenating capitalized words (bug 1550532) unless explicitly
+      // allowed by prefs for the language in use.
+      // Also never auto-hyphenate a word that has internal caps, as it may
+      // well be an all-caps acronym or a quirky name like iTunes.
+      if (!mHyphenateCapitalized || !firstLetter) {
         return;
       }
     }
     firstLetter = false;
 
     if (ch < 0x80) {  // U+0000 - U+007F
       utf8.Append(ch);
     } else if (ch < 0x0800) {  // U+0100 - U+07FF
@@ -137,36 +245,48 @@ void nsHyphenator::HyphenateWord(const n
     } else {
       utf8.Append(0xF0 | (ch >> 18));
       utf8.Append(0x80 | (0x003F & (ch >> 12)));
       utf8.Append(0x80 | (0x003F & (ch >> 6)));
       utf8.Append(0x80 | (0x003F & ch));
     }
   }
 
-  AutoTArray<char, 200> utf8hyphens;
-  utf8hyphens.SetLength(utf8.Length() + 5);
-  char** rep = nullptr;
-  int* pos = nullptr;
-  int* cut = nullptr;
-  int err = hnj_hyphen_hyphenate2((HyphenDict*)mDict, utf8.BeginReading(),
-                                  utf8.Length(), utf8hyphens.Elements(),
-                                  nullptr, &rep, &pos, &cut);
-  if (!err) {
-    // Surprisingly, hnj_hyphen_hyphenate2 converts the 'hyphens' buffer
-    // from utf8 code unit indexing (which would match the utf8 input
-    // string directly) to Unicode character indexing.
-    // We then need to convert this to utf16 code unit offsets for Gecko.
-    const char* hyphPtr = utf8hyphens.Elements();
-    const char16_t* cur = begin + aStart;
-    const char16_t* end = begin + aLimit;
-    while (cur < end) {
-      if (*hyphPtr & 0x01) {
-        aHyphens[cur - begin] = true;
+  AutoTArray<uint8_t, 200> hyphenValues;
+  hyphenValues.SetLength(utf8.Length());
+  int32_t result;
+  if (mDictSize > 0) {
+    result = mapped_hyph_find_hyphen_values_raw(
+        static_cast<const uint8_t*>(mDict), mDictSize, utf8.BeginReading(),
+        utf8.Length(), hyphenValues.Elements(), hyphenValues.Length());
+  } else {
+    result = mapped_hyph_find_hyphen_values_dic(
+        static_cast<const HyphDic*>(mDict), utf8.BeginReading(), utf8.Length(),
+        hyphenValues.Elements(), hyphenValues.Length());
+  }
+  if (result > 0) {
+    // We need to convert UTF-8 indexing as used by the hyphenation lib into
+    // UTF-16 indexing of the aHyphens[] array for Gecko.
+    uint32_t utf16index = 0;
+    for (uint32_t utf8index = 0; utf8index < utf8.Length();) {
+      // We know utf8 is valid, so we only need to look at the first byte of
+      // each character to determine its length and the corresponding UTF-16
+      // length to add to utf16index.
+      const uint8_t leadByte = utf8[utf8index];
+      if (leadByte < 0x80) {
+        utf8index += 1;
+      } else if (leadByte < 0xE0) {
+        utf8index += 2;
+      } else if (leadByte < 0xF0) {
+        utf8index += 3;
+      } else {
+        utf8index += 4;
       }
-      cur++;
-      if (cur < end && NS_IS_SURROGATE_PAIR(*(cur - 1), *cur)) {
-        cur++;
+      // The hyphenation value of interest is the one for the last code unit
+      // of the utf-8 character, and is recorded on the last code unit of the
+      // utf-16 character (in the case of a surrogate pair).
+      utf16index += leadByte >= 0xF0 ? 2 : 1;
+      if (utf16index > 0 && (hyphenValues[utf8index - 1] & 0x01)) {
+        aHyphens[aStart + utf16index - 1] = true;
       }
-      hyphPtr++;
     }
   }
 }
--- a/intl/hyphenation/glue/nsHyphenator.h
+++ b/intl/hyphenation/glue/nsHyphenator.h
@@ -23,13 +23,20 @@ class nsHyphenator {
   nsresult Hyphenate(const nsAString& aText, nsTArray<bool>& aHyphens);
 
  private:
   ~nsHyphenator();
 
   void HyphenateWord(const nsAString& aString, uint32_t aStart, uint32_t aLimit,
                      nsTArray<bool>& aHyphens);
 
-  void* mDict;
+  const void* mDict;  // If mDictSize > 0, this points to a raw byte buffer
+                      // containing the hyphenation dictionary data (in the
+                      // memory-mapped omnijar, or owned by us if mOwnsDict);
+                      // if mDictSize == 0, it's a HyphDic reference created
+                      // by mapped_hyph_load_dictionary() and must be released
+                      // by calling mapped_hyph_free_dictionary().
+  uint32_t mDictSize;
+  bool mOwnsDict;
   bool mHyphenateCapitalized;
 };
 
 #endif  // nsHyphenator_h__
deleted file mode 100644
--- a/intl/hyphenation/hyphen/AUTHORS
+++ /dev/null
@@ -1,17 +0,0 @@
-Libhnj was written by Raph Levien <raph at acm dot org>.
-
-Original Libhnj source with OOo's patches are managed by Rene Engelhard and
-Chris Halls at Debian: http://packages.debian.org/stable/libdevel/libhnj-dev
-and http://packages.debian.org/unstable/source/libhnj
-
-This subset of Libhnj was extended by
-Peter Novodvorsky <nidd at alt-linux dot org> (OOo integration),
-László Németh <nemeth at numbertext dot org> (non-standard and compound
-hyphenation with Unicode support),
-Nanning Buitenhuis <nanning at elvenkind dot com> (substrings.c)
-
-Write bug reports to László Németh or in the bug tracker of hunspell.sf.net.
-
----
-Please contact Raph Levien for information about licensing for
-proprietary applications.
deleted file mode 100644
--- a/intl/hyphenation/hyphen/COPYING
+++ /dev/null
@@ -1,17 +0,0 @@
-GPL 2.0/LGPL 2.1/MPL 1.1 tri-license
-
-The contents of this software may be used under the terms of
-the GNU General Public License Version 2 or later (the "GPL"), or
-the GNU Lesser General Public License Version 2.1 or later (the "LGPL",
-see COPYING.LGPL) or the Mozilla Public License Version 1.1 or later
-(the "MPL", see COPYING.MPL).
-
-The Plain TeX hyphenation tables "hyphen.tex" by Donald E. Knuth
-has a non MPL/LGPL compatible license, but freely redistributable:
-"Unlimited copying and redistribution of this file are permitted as long
-as this file is not modified. Modifications are permitted, but only if
-the resulting file is not named hyphen.tex."
-
-Software distributed under these licenses is distributed on an "AS IS" basis,
-WITHOUT WARRANTY OF ANY KIND, either express or implied. See the licences
-for the specific language governing rights and limitations under the licenses.
deleted file mode 100644
--- a/intl/hyphenation/hyphen/COPYING.LGPL
+++ /dev/null
@@ -1,515 +0,0 @@
-
-                  GNU LESSER GENERAL PUBLIC LICENSE
-                       Version 2.1, February 1999
-
- Copyright (C) 1991, 1999 Free Software Foundation, Inc.
-     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-[This is the first released version of the Lesser GPL.  It also counts
- as the successor of the GNU Library Public License, version 2, hence
- the version number 2.1.]
-
-                            Preamble
-
-  The licenses for most software are designed to take away your
-freedom to share and change it.  By contrast, the GNU General Public
-Licenses are intended to guarantee your freedom to share and change
-free software--to make sure the software is free for all its users.
-
-  This license, the Lesser General Public License, applies to some
-specially designated software packages--typically libraries--of the
-Free Software Foundation and other authors who decide to use it.  You
-can use it too, but we suggest you first think carefully about whether
-this license or the ordinary General Public License is the better
-strategy to use in any particular case, based on the explanations
-below.
-
-  When we speak of free software, we are referring to freedom of use,
-not price.  Our General Public Licenses are designed to make sure that
-you have the freedom to distribute copies of free software (and charge
-for this service if you wish); that you receive source code or can get
-it if you want it; that you can change the software and use pieces of
-it in new free programs; and that you are informed that you can do
-these things.
-
-  To protect your rights, we need to make restrictions that forbid
-distributors to deny you these rights or to ask you to surr