Merge m-c to graphics
authorKartikaya Gupta <kgupta@mozilla.com>
Mon, 06 Feb 2017 11:53:47 -0500
changeset 342201 c3195fb2f038505eb6975789ca7c5f81ae3722c8
parent 342200 4f4ba281b7ab46f48395a2129cd2211004f3c9f8 (current diff)
parent 340908 12c02bf624c48903b155428f7c8a419ba7a333a6 (diff)
child 342202 917d8c381251a7e573a242fb38dc0ec8165f33be
push id31345
push userkwierso@gmail.com
push dateFri, 10 Feb 2017 20:35:09 +0000
treeherdermozilla-central@a288fe35e494 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone54.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge m-c to graphics MozReview-Commit-ID: AGZ8pI8vmAs
config/rules.mk
dom/ipc/TabChild.cpp
dom/media/platforms/gonk/GonkAudioDecoderManager.cpp
dom/media/platforms/gonk/GonkAudioDecoderManager.h
dom/media/platforms/gonk/GonkDecoderModule.cpp
dom/media/platforms/gonk/GonkDecoderModule.h
dom/media/platforms/gonk/GonkMediaDataDecoder.cpp
dom/media/platforms/gonk/GonkMediaDataDecoder.h
dom/media/platforms/gonk/GonkVideoDecoderManager.cpp
dom/media/platforms/gonk/GonkVideoDecoderManager.h
dom/media/platforms/gonk/moz.build
dom/media/platforms/wrappers/FuzzingWrapper.cpp
dom/media/platforms/wrappers/FuzzingWrapper.h
gfx/layers/client/ClientLayerManager.h
gfx/webrender/Cargo.toml
gfx/webrender/build.rs
gfx/webrender/res/clip_shared.glsl
gfx/webrender/res/cs_blur.fs.glsl
gfx/webrender/res/cs_blur.vs.glsl
gfx/webrender/res/cs_box_shadow.vs.glsl
gfx/webrender/res/cs_clip_image.fs.glsl
gfx/webrender/res/cs_clip_image.vs.glsl
gfx/webrender/res/cs_clip_rectangle.fs.glsl
gfx/webrender/res/cs_clip_rectangle.vs.glsl
gfx/webrender/res/cs_text_run.vs.glsl
gfx/webrender/res/debug_color.fs.glsl
gfx/webrender/res/debug_color.vs.glsl
gfx/webrender/res/debug_font.fs.glsl
gfx/webrender/res/debug_font.vs.glsl
gfx/webrender/res/prim_shared.glsl
gfx/webrender/res/ps_angle_gradient.vs.glsl
gfx/webrender/res/ps_blend.vs.glsl
gfx/webrender/res/ps_border.fs.glsl
gfx/webrender/res/ps_border.glsl
gfx/webrender/res/ps_border.vs.glsl
gfx/webrender/res/ps_box_shadow.vs.glsl
gfx/webrender/res/ps_cache_image.vs.glsl
gfx/webrender/res/ps_clear.fs.glsl
gfx/webrender/res/ps_clear.vs.glsl
gfx/webrender/res/ps_composite.fs.glsl
gfx/webrender/res/ps_composite.vs.glsl
gfx/webrender/res/ps_gradient.fs.glsl
gfx/webrender/res/ps_gradient.vs.glsl
gfx/webrender/res/ps_image.fs.glsl
gfx/webrender/res/ps_image.vs.glsl
gfx/webrender/res/ps_rectangle.fs.glsl
gfx/webrender/res/ps_rectangle.vs.glsl
gfx/webrender/res/ps_text_run.fs.glsl
gfx/webrender/res/ps_text_run.vs.glsl
gfx/webrender/res/ps_yuv_image.vs.glsl
gfx/webrender/res/shared.glsl
gfx/webrender/res/shared_other.glsl
gfx/webrender/src/debug_render.rs
gfx/webrender/src/device.rs
gfx/webrender/src/frame.rs
gfx/webrender/src/gpu_store.rs
gfx/webrender/src/internal_types.rs
gfx/webrender/src/layer.rs
gfx/webrender/src/lib.rs
gfx/webrender/src/mask_cache.rs
gfx/webrender/src/platform/macos/font.rs
gfx/webrender/src/platform/unix/font.rs
gfx/webrender/src/platform/windows/font.rs
gfx/webrender/src/prim_store.rs
gfx/webrender/src/profiler.rs
gfx/webrender/src/record.rs
gfx/webrender/src/render_backend.rs
gfx/webrender/src/renderer.rs
gfx/webrender/src/resource_cache.rs
gfx/webrender/src/scene.rs
gfx/webrender/src/spring.rs
gfx/webrender/src/texture_cache.rs
gfx/webrender/src/tiling.rs
gfx/webrender/src/util.rs
gfx/webrender_bindings/Cargo.toml
gfx/webrender_bindings/src/bindings.rs
gfx/webrender_bindings/src/lib.rs
gfx/webrender_traits/Cargo.toml
gfx/webrender_traits/src/api.rs
gfx/webrender_traits/src/channel.rs
gfx/webrender_traits/src/channel_mpsc.rs
gfx/webrender_traits/src/display_item.rs
gfx/webrender_traits/src/display_list.rs
gfx/webrender_traits/src/lib.rs
gfx/webrender_traits/src/stacking_context.rs
gfx/webrender_traits/src/types.rs
gfx/webrender_traits/src/units.rs
gfx/webrender_traits/src/webgl.rs
layout/reftests/bugs/reftest.list
layout/reftests/svg/reftest.list
modules/libpref/init/all.js
taskcluster/ci/build/linux.yml
taskcluster/ci/build/macosx.yml
taskcluster/ci/build/windows.yml
taskcluster/ci/test/test-platforms.yml
taskcluster/ci/test/test-sets.yml
taskcluster/taskgraph/transforms/gecko_v2_whitelist.py
testing/mozharness/mozharness/mozilla/building/buildbase.py
third_party/rust/app_units/.cargo-ok
third_party/rust/app_units/.gitignore
third_party/rust/bincode/.cargo-checksum.json
third_party/rust/bincode/.cargo-ok
third_party/rust/bincode/Cargo.toml
third_party/rust/bincode/src/rustc_serialize/writer.rs
third_party/rust/bincode/src/serde/mod.rs
third_party/rust/bincode/src/serde/writer.rs
third_party/rust/bincode/tests/test.rs
third_party/rust/bit-set/.cargo-ok
third_party/rust/bit-set/.gitignore
third_party/rust/bit-set/LICENSE-APACHE
third_party/rust/bit-set/LICENSE-MIT
third_party/rust/bit-vec/.cargo-ok
third_party/rust/bit-vec/.gitignore
third_party/rust/bit-vec/LICENSE-APACHE
third_party/rust/bit-vec/LICENSE-MIT
third_party/rust/bitflags/.cargo-ok
third_party/rust/bitflags/LICENSE-APACHE
third_party/rust/bitflags/LICENSE-MIT
third_party/rust/cgl/.cargo-ok
third_party/rust/cgl/.gitignore
third_party/rust/cgl/LICENSE-APACHE
third_party/rust/cgl/LICENSE-MIT
third_party/rust/core-foundation-sys/.cargo-checksum.json
third_party/rust/core-foundation-sys/.cargo-ok
third_party/rust/core-foundation-sys/Cargo.toml
third_party/rust/core-foundation-sys/src/array.rs
third_party/rust/core-foundation-sys/src/base.rs
third_party/rust/core-foundation-sys/src/dictionary.rs
third_party/rust/core-foundation-sys/src/lib.rs
third_party/rust/core-foundation-sys/src/string.rs
third_party/rust/core-foundation/.cargo-checksum.json
third_party/rust/core-foundation/.cargo-ok
third_party/rust/core-foundation/Cargo.toml
third_party/rust/core-foundation/src/array.rs
third_party/rust/core-foundation/src/dictionary.rs
third_party/rust/core-foundation/src/lib.rs
third_party/rust/core-graphics/.cargo-checksum.json
third_party/rust/core-graphics/.cargo-ok
third_party/rust/core-graphics/.travis.yml
third_party/rust/core-graphics/Cargo.toml
third_party/rust/core-graphics/LICENSE-APACHE
third_party/rust/core-graphics/LICENSE-MIT
third_party/rust/core-graphics/src/context.rs
third_party/rust/core-graphics/src/lib.rs
third_party/rust/core-text/.cargo-checksum.json
third_party/rust/core-text/.cargo-ok
third_party/rust/core-text/.travis.yml
third_party/rust/core-text/Cargo.toml
third_party/rust/core-text/LICENSE-APACHE
third_party/rust/core-text/LICENSE-MIT
third_party/rust/core-text/src/font_descriptor.rs
third_party/rust/dwrote/.cargo-checksum.json
third_party/rust/dwrote/.cargo-ok
third_party/rust/dwrote/Cargo.toml
third_party/rust/dwrote/build.rs
third_party/rust/dwrote/src/bitmap_render_target.rs
third_party/rust/dwrote/src/comptr.rs
third_party/rust/dwrote/src/font.rs
third_party/rust/dwrote/src/font_face.rs
third_party/rust/dwrote/src/font_file.rs
third_party/rust/dwrote/src/gdi_interop.rs
third_party/rust/dwrote/src/glyph_run_analysis.rs
third_party/rust/dwrote/src/lib.rs
third_party/rust/dwrote/src/rendering_params.rs
third_party/rust/dwrote/src/test.rs
third_party/rust/dwrote/src/types.rs
third_party/rust/euclid/.cargo-checksum.json
third_party/rust/euclid/.cargo-ok
third_party/rust/euclid/.gitignore
third_party/rust/euclid/Cargo.toml
third_party/rust/euclid/LICENSE-APACHE
third_party/rust/euclid/LICENSE-MIT
third_party/rust/euclid/src/length.rs
third_party/rust/euclid/src/matrix2d.rs
third_party/rust/euclid/src/matrix4d.rs
third_party/rust/euclid/src/point.rs
third_party/rust/euclid/src/rect.rs
third_party/rust/euclid/src/size.rs
third_party/rust/fnv/.cargo-ok
third_party/rust/fnv/.gitignore
third_party/rust/freetype/.cargo-checksum.json
third_party/rust/freetype/.cargo-ok
third_party/rust/freetype/Cargo.toml
third_party/rust/freetype/LICENSE-APACHE
third_party/rust/freetype/LICENSE-MIT
third_party/rust/freetype/src/freetype.rs
third_party/rust/freetype/src/lib.rs
third_party/rust/freetype/src/tt_os2.rs
third_party/rust/gdi32-sys/.cargo-ok
third_party/rust/gl_generator/.cargo-ok
third_party/rust/gleam/.cargo-checksum.json
third_party/rust/gleam/.cargo-ok
third_party/rust/gleam/Cargo.toml
third_party/rust/gleam/LICENSE-APACHE
third_party/rust/gleam/LICENSE-MIT
third_party/rust/gleam/build.rs
third_party/rust/gleam/src/gl.rs
third_party/rust/heapsize/.cargo-checksum.json
third_party/rust/heapsize/.cargo-ok
third_party/rust/heapsize/.gitignore
third_party/rust/heapsize/.travis.yml
third_party/rust/heapsize/Cargo.toml
third_party/rust/heapsize/src/lib.rs
third_party/rust/kernel32-sys/.cargo-ok
third_party/rust/khronos_api/.cargo-ok
third_party/rust/lazy_static/.cargo-ok
third_party/rust/lazy_static/.gitignore
third_party/rust/log/.cargo-ok
third_party/rust/log/.gitignore
third_party/rust/log/LICENSE-APACHE
third_party/rust/log/LICENSE-MIT
third_party/rust/log/appveyor.yml
third_party/rust/matches/LICENSE
third_party/rust/num-traits/.cargo-ok
third_party/rust/offscreen_gl_context/.cargo-checksum.json
third_party/rust/offscreen_gl_context/.cargo-ok
third_party/rust/offscreen_gl_context/.gitignore
third_party/rust/offscreen_gl_context/Cargo.toml
third_party/rust/offscreen_gl_context/build.rs
third_party/rust/offscreen_gl_context/src/draw_buffer.rs
third_party/rust/offscreen_gl_context/src/gl_context.rs
third_party/rust/offscreen_gl_context/src/lib.rs
third_party/rust/offscreen_gl_context/src/platform/mod.rs
third_party/rust/offscreen_gl_context/src/platform/with_wgl/native_gl_context.rs
third_party/rust/offscreen_gl_context/src/tests.rs
third_party/rust/osmesa-sys/.cargo-ok
third_party/rust/osmesa-sys/.gitignore
third_party/rust/quote/.cargo-checksum.json
third_party/rust/quote/.cargo-ok
third_party/rust/quote/Cargo.toml
third_party/rust/quote/src/lib.rs
third_party/rust/quote/src/to_tokens.rs
third_party/rust/quote/src/tokens.rs
third_party/rust/rustc-serialize/.cargo-checksum.json
third_party/rust/rustc-serialize/.cargo-ok
third_party/rust/rustc-serialize/.travis.yml
third_party/rust/rustc-serialize/Cargo.toml
third_party/rust/rustc-serialize/LICENSE-APACHE
third_party/rust/rustc-serialize/LICENSE-MIT
third_party/rust/rustc-serialize/appveyor.yml
third_party/rust/rustc-serialize/src/json.rs
third_party/rust/rustc-serialize/src/serialize.rs
third_party/rust/serde/.cargo-checksum.json
third_party/rust/serde/.cargo-ok
third_party/rust/serde/Cargo.toml
third_party/rust/serde/src/bytes.rs
third_party/rust/serde/src/de/impls.rs
third_party/rust/serde_codegen/.cargo-checksum.json
third_party/rust/serde_codegen/.cargo-ok
third_party/rust/serde_codegen/Cargo.toml
third_party/rust/serde_codegen/src/de.rs
third_party/rust/serde_codegen/src/lib.rs
third_party/rust/serde_codegen/src/ser.rs
third_party/rust/serde_codegen_internals/.cargo-checksum.json
third_party/rust/serde_codegen_internals/.cargo-ok
third_party/rust/serde_codegen_internals/Cargo.toml
third_party/rust/serde_codegen_internals/src/attr.rs
third_party/rust/shared_library/.cargo-ok
third_party/rust/shared_library/.gitignore
third_party/rust/syn/.cargo-checksum.json
third_party/rust/syn/.cargo-ok
third_party/rust/syn/Cargo.toml
third_party/rust/syn/src/aster/lifetime.rs
third_party/rust/syn/src/aster/qpath.rs
third_party/rust/syn/src/aster/ty_param.rs
third_party/rust/syn/src/attr.rs
third_party/rust/syn/src/constant.rs
third_party/rust/syn/src/data.rs
third_party/rust/syn/src/escape.rs
third_party/rust/syn/src/expr.rs
third_party/rust/syn/src/generics.rs
third_party/rust/syn/src/helper.rs
third_party/rust/syn/src/ident.rs
third_party/rust/syn/src/item.rs
third_party/rust/syn/src/krate.rs
third_party/rust/syn/src/lib.rs
third_party/rust/syn/src/lit.rs
third_party/rust/syn/src/mac.rs
third_party/rust/syn/src/macro_input.rs
third_party/rust/syn/src/nom.rs
third_party/rust/syn/src/op.rs
third_party/rust/syn/src/registry.rs
third_party/rust/syn/src/space.rs
third_party/rust/syn/src/ty.rs
third_party/rust/syn/src/visit.rs
third_party/rust/syntex/.cargo-checksum.json
third_party/rust/syntex/.cargo-ok
third_party/rust/syntex/Cargo.toml
third_party/rust/syntex/src/registry.rs
third_party/rust/syntex/src/resolver.rs
third_party/rust/syntex_errors/.cargo-checksum.json
third_party/rust/syntex_errors/.cargo-ok
third_party/rust/syntex_errors/Cargo.toml
third_party/rust/syntex_errors/src/emitter.rs
third_party/rust/syntex_errors/src/lib.rs
third_party/rust/syntex_errors/src/snippet.rs
third_party/rust/syntex_pos/.cargo-checksum.json
third_party/rust/syntex_pos/.cargo-ok
third_party/rust/syntex_pos/Cargo.toml
third_party/rust/syntex_pos/src/lib.rs
third_party/rust/syntex_syntax/.cargo-checksum.json
third_party/rust/syntex_syntax/.cargo-ok
third_party/rust/syntex_syntax/Cargo.toml
third_party/rust/syntex_syntax/src/abi.rs
third_party/rust/syntex_syntax/src/ast.rs
third_party/rust/syntex_syntax/src/attr.rs
third_party/rust/syntex_syntax/src/codemap.rs
third_party/rust/syntex_syntax/src/config.rs
third_party/rust/syntex_syntax/src/diagnostics/plugin.rs
third_party/rust/syntex_syntax/src/entry.rs
third_party/rust/syntex_syntax/src/ext/base.rs
third_party/rust/syntex_syntax/src/ext/build.rs
third_party/rust/syntex_syntax/src/ext/decorator.rs
third_party/rust/syntex_syntax/src/ext/env.rs
third_party/rust/syntex_syntax/src/ext/expand.rs
third_party/rust/syntex_syntax/src/ext/hygiene.rs
third_party/rust/syntex_syntax/src/ext/placeholders.rs
third_party/rust/syntex_syntax/src/ext/proc_macro_shim.rs
third_party/rust/syntex_syntax/src/ext/quote.rs
third_party/rust/syntex_syntax/src/ext/source_util.rs
third_party/rust/syntex_syntax/src/ext/tt/macro_parser.rs
third_party/rust/syntex_syntax/src/ext/tt/macro_rules.rs
third_party/rust/syntex_syntax/src/ext/tt/transcribe.rs
third_party/rust/syntex_syntax/src/feature_gate.rs
third_party/rust/syntex_syntax/src/fold.rs
third_party/rust/syntex_syntax/src/lib.rs
third_party/rust/syntex_syntax/src/parse/attr.rs
third_party/rust/syntex_syntax/src/parse/lexer/mod.rs
third_party/rust/syntex_syntax/src/parse/mod.rs
third_party/rust/syntex_syntax/src/parse/parser.rs
third_party/rust/syntex_syntax/src/parse/token.rs
third_party/rust/syntex_syntax/src/print/pprust.rs
third_party/rust/syntex_syntax/src/show_span.rs
third_party/rust/syntex_syntax/src/std_inject.rs
third_party/rust/syntex_syntax/src/test.rs
third_party/rust/syntex_syntax/src/tokenstream.rs
third_party/rust/syntex_syntax/src/util/lev_distance.rs
third_party/rust/syntex_syntax/src/util/node_count.rs
third_party/rust/syntex_syntax/src/util/parser.rs
third_party/rust/syntex_syntax/src/util/parser_testing.rs
third_party/rust/syntex_syntax/src/util/small_vector.rs
third_party/rust/syntex_syntax/src/visit.rs
third_party/rust/term/.cargo-ok
third_party/rust/term/LICENSE-APACHE
third_party/rust/term/LICENSE-MIT
third_party/rust/term/appveyor.yml
third_party/rust/threadpool/.gitignore
third_party/rust/threadpool/LICENSE-APACHE
third_party/rust/threadpool/LICENSE-MIT
third_party/rust/time/.cargo-checksum.json
third_party/rust/time/.cargo-ok
third_party/rust/time/.travis.yml
third_party/rust/time/Cargo.toml
third_party/rust/time/LICENSE-APACHE
third_party/rust/time/LICENSE-MIT
third_party/rust/time/README.md
third_party/rust/time/appveyor.yml
third_party/rust/time/src/display.rs
third_party/rust/time/src/lib.rs
third_party/rust/time/src/sys.rs
third_party/rust/unicode-xid/.cargo-checksum.json
third_party/rust/unicode-xid/.cargo-ok
third_party/rust/unicode-xid/.gitignore
third_party/rust/unicode-xid/.travis.yml
third_party/rust/unicode-xid/Cargo.toml
third_party/rust/unicode-xid/LICENSE-APACHE
third_party/rust/unicode-xid/LICENSE-MIT
third_party/rust/unicode-xid/README.md
third_party/rust/unicode-xid/scripts/unicode.py
third_party/rust/unicode-xid/src/lib.rs
third_party/rust/unicode-xid/src/tables.rs
third_party/rust/unicode-xid/src/tests.rs
third_party/rust/user32-sys/.cargo-ok
third_party/rust/winapi-build/.cargo-ok
third_party/rust/winapi/.cargo-ok
third_party/rust/x11/.cargo-checksum.json
third_party/rust/x11/.cargo-ok
third_party/rust/x11/Cargo.toml
third_party/rust/x11/build.rs
third_party/rust/xml-rs/.cargo-checksum.json
third_party/rust/xml-rs/.cargo-ok
third_party/rust/xml-rs/Cargo.toml
third_party/rust/xml-rs/Changelog.md
third_party/rust/xml-rs/Readme.md
third_party/rust/xml-rs/src/common.rs
third_party/rust/xml-rs/src/reader/parser/mod.rs
third_party/rust/xml-rs/tests/documents/sample_1_full.txt
third_party/rust/xml-rs/tests/documents/sample_1_short.txt
toolkit/library/gtest/rust/Cargo.lock
toolkit/library/gtest/rust/Cargo.toml
toolkit/library/moz.build
toolkit/library/rust/Cargo.lock
toolkit/library/rust/Cargo.toml
toolkit/library/rust/shared/Cargo.toml
toolkit/library/rust/shared/lib.rs
toolkit/moz.configure
--- a/browser/base/content/test/general/browser_extension_permissions.js
+++ b/browser/base/content/test/general/browser_extension_permissions.js
@@ -1,16 +1,16 @@
 "use strict";
 
 const BASE = getRootDirectory(gTestPath)
   .replace("chrome://mochitests/content/", "https://example.com/");
 
-const PAGE = `${BASE}/file_install_extensions.html`;
-const PERMS_XPI = `${BASE}/browser_webext_permissions.xpi`;
-const NO_PERMS_XPI = `${BASE}/browser_webext_nopermissions.xpi`;
+const INSTALL_PAGE = `${BASE}/file_install_extensions.html`;
+const PERMS_XPI = "browser_webext_permissions.xpi";
+const NO_PERMS_XPI = "browser_webext_nopermissions.xpi";
 const ID = "permissions@test.mozilla.org";
 
 const DEFAULT_EXTENSION_ICON = "chrome://browser/content/extension.svg";
 
 Services.perms.add(makeURI("https://example.com/"), "install",
                    Services.perms.ALLOW_ACTION);
 
 function promisePopupNotificationShown(name) {
@@ -31,67 +31,97 @@ function promisePopupNotificationShown(n
 }
 
 function promiseGetAddonByID(id) {
   return new Promise(resolve => {
     AddonManager.getAddonByID(id, resolve);
   });
 }
 
-function checkNotification(panel, url) {
+function checkNotification(panel, filename) {
   let icon = panel.getAttribute("icon");
 
   let ul = document.getElementById("addon-webext-perm-list");
   let header = document.getElementById("addon-webext-perm-intro");
 
-  if (url == PERMS_XPI) {
+  if (filename == PERMS_XPI) {
     // The icon should come from the extension, don't bother with the precise
     // path, just make sure we've got a jar url pointing to the right path
     // inside the jar.
     ok(icon.startsWith("jar:file://"), "Icon is a jar url");
     ok(icon.endsWith("/icon.png"), "Icon is icon.png inside a jar");
 
     is(header.getAttribute("hidden"), "", "Permission list header is visible");
     is(ul.childElementCount, 4, "Permissions list has 4 entries");
     // Real checking of the contents here is deferred until bug 1316996 lands
-  } else if (url == NO_PERMS_XPI) {
+  } else if (filename == NO_PERMS_XPI) {
     // This extension has no icon, it should have the default
     is(icon, DEFAULT_EXTENSION_ICON, "Icon is the default extension icon");
 
     is(header.getAttribute("hidden"), "true", "Permission list header is hidden");
     is(ul.childElementCount, 0, "Permissions list has 0 entries");
   }
 }
 
+// Navigate the current tab to the given url and return a Promise
+// that resolves when the page is loaded.
+function load(url) {
+  gBrowser.selectedBrowser.loadURI(INSTALL_PAGE);
+  return BrowserTestUtils.browserLoaded(gBrowser.selectedBrowser);
+}
+
 const INSTALL_FUNCTIONS = [
-  function installMozAM(url) {
-    return ContentTask.spawn(gBrowser.selectedBrowser, url, function*(cUrl) {
-      yield content.wrappedJSObject.installMozAM(cUrl);
+  async function installMozAM(filename) {
+    await load(INSTALL_PAGE);
+
+    await ContentTask.spawn(gBrowser.selectedBrowser, `${BASE}/${filename}`, function*(url) {
+      yield content.wrappedJSObject.installMozAM(url);
     });
   },
 
-  function installTrigger(url) {
-    ContentTask.spawn(gBrowser.selectedBrowser, url, function*(cUrl) {
-      content.wrappedJSObject.installTrigger(cUrl);
+  async function installTrigger(filename) {
+    await load(INSTALL_PAGE);
+
+    ContentTask.spawn(gBrowser.selectedBrowser, `${BASE}/${filename}`, function*(url) {
+      content.wrappedJSObject.installTrigger(url);
     });
-    return Promise.resolve();
+  },
+
+  async function installFile(filename) {
+    const ChromeRegistry = Cc["@mozilla.org/chrome/chrome-registry;1"]
+                                       .getService(Ci.nsIChromeRegistry);
+    let chromeUrl = Services.io.newURI(gTestPath);
+    let fileUrl = ChromeRegistry.convertChromeURL(chromeUrl);
+    let file = fileUrl.QueryInterface(Ci.nsIFileURL).file;
+    file.leafName = filename;
+
+    let MockFilePicker = SpecialPowers.MockFilePicker;
+    MockFilePicker.init(window);
+    MockFilePicker.returnFiles = [file];
+
+    await BrowserOpenAddonsMgr("addons://list/extension");
+    let contentWin = gBrowser.selectedTab.linkedBrowser.contentWindow;
+
+    // Do the install...
+    contentWin.gViewController.doCommand("cmd_installFromFile");
+    MockFilePicker.cleanup();
   },
 ];
 
 add_task(function* () {
   yield SpecialPowers.pushPrefEnv({set: [
     ["extensions.webapi.testing", true],
     ["extensions.install.requireBuiltInCerts", false],
 
     // XXX remove this when prompts are enabled by default
     ["extensions.webextPermissionPrompts", true],
   ]});
 
-  function* runOnce(installFn, url, cancel) {
-    let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, PAGE);
+  function* runOnce(installFn, filename, cancel) {
+    let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser);
 
     let installPromise = new Promise(resolve => {
       let listener = {
         onDownloadCancelled() {
           AddonManager.removeInstallListener(listener);
           resolve(false);
         },
 
@@ -113,20 +143,20 @@ add_task(function* () {
         onInstallFailed() {
           AddonManager.removeInstallListener(listener);
           resolve(false);
         },
       };
       AddonManager.addInstallListener(listener);
     });
 
-    let installMethodPromise = installFn(url);
+    let installMethodPromise = installFn(filename);
 
     let panel = yield promisePopupNotificationShown("addon-webext-permissions");
-    checkNotification(panel, url);
+    checkNotification(panel, filename);
 
     if (cancel) {
       panel.secondaryButton.click();
       try {
         yield installMethodPromise;
       } catch (err) {}
     } else {
       // Look for post-install notification
@@ -135,17 +165,16 @@ add_task(function* () {
 
       // Press OK on the post-install notification
       panel = yield postInstallPromise;
       panel.button.click();
 
       yield installMethodPromise;
     }
 
-
     let result = yield installPromise;
     let addon = yield promiseGetAddonByID(ID);
     if (cancel) {
       ok(!result, "Installation was cancelled");
       is(addon, null, "Extension is not installed");
     } else {
       ok(result, "Installation completed");
       isnot(addon, null, "Extension is installed");
--- a/browser/locales/en-US/chrome/browser/browser.properties
+++ b/browser/locales/en-US/chrome/browser/browser.properties
@@ -810,17 +810,17 @@ userContext.aboutPage.accesskey = O
 
 userContextOpenLink.label = Open Link in New %S Tab
 
 muteTab.label = Mute Tab
 muteTab.accesskey = M
 unmuteTab.label = Unmute Tab
 unmuteTab.accesskey = M
 playTab.label = Play Tab
-playTab.accesskey = P
+playTab.accesskey = l
 
 # LOCALIZATION NOTE (weakCryptoOverriding.message): %S is brandShortName
 weakCryptoOverriding.message = %S recommends that you don’t enter your password, credit card and other personal information on this website.
 revokeOverride.label = Don’t Trust This Website
 revokeOverride.accesskey = D
 
 # LOCALIZATION NOTE (certErrorDetails*.label): These are text strings that
 # appear in the about:certerror page, so that the user can copy and send them to
--- a/config/rules.mk
+++ b/config/rules.mk
@@ -914,19 +914,16 @@ cargo_target_flag := --target=$(RUST_TAR
 # Permit users to pass flags to cargo from their mozconfigs (e.g. --color=always).
 cargo_build_flags = $(CARGOFLAGS)
 ifndef MOZ_DEBUG
 cargo_build_flags = --release
 endif
 ifdef MOZ_CARGO_SUPPORTS_FROZEN
 cargo_build_flags += --frozen
 endif
-ifdef MOZ_ENABLE_WEBRENDER
-cargo_build_flags += --features "quantum_render"
-endif
 
 cargo_build_flags += --manifest-path $(CARGO_FILE)
 ifdef BUILD_VERBOSE_LOG
 cargo_build_flags += --verbose
 endif
 
 # Enable color output if original stdout was a TTY and color settings
 # aren't already present. This essentially restores the default behavior
--- a/devtools/client/locales/en-US/netmonitor.properties
+++ b/devtools/client/locales/en-US/netmonitor.properties
@@ -744,8 +744,12 @@ netmonitor.custom.send=Send
 
 # LOCALIZATION NOTE (netmonitor.custom.cancel): This is the label displayed
 # on the button which cancels and closes the custom request form
 netmonitor.custom.cancel=Cancel
 
 # LOCALIZATION NOTE (netmonitor.backButton): This is the label displayed
 # on the button which exists the performance statistics view
 netmonitor.backButton=Back
+
+# LOCALIZATION NOTE (netmonitor.headers.learnMore): This is the label displayed
+# next to a header list item, with a link to external documentation
+netmonitor.headers.learnMore=Learn More
new file mode 100644
--- /dev/null
+++ b/devtools/client/netmonitor/shared/components/headers-mdn.js
@@ -0,0 +1,119 @@
+/* this source code form is subject to the terms of the mozilla public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * A mapping of header names to external documentation. Any header included
+ * here will show a "Learn More" link alongside it.
+ */
+
+"use strict";
+
+var URL_DOMAIN = "https://developer.mozilla.org";
+const URL_PATH = "/en-US/docs/Web/HTTP/Headers/";
+const URL_PARAMS =
+  "?utm_source=mozilla&utm_medium=devtools-netmonitor&utm_campaign=default";
+
+var SUPPORTED_HEADERS = [
+  "Accept",
+  "Accept-Charset",
+  "Accept-Encoding",
+  "Accept-Language",
+  "Accept-Ranges",
+  "Access-Control-Allow-Credentials",
+  "Access-Control-Allow-Headers",
+  "Access-Control-Allow-Methods",
+  "Access-Control-Allow-Origin",
+  "Access-Control-Expose-Headers",
+  "Access-Control-Max-Age",
+  "Access-Control-Request-Headers",
+  "Access-Control-Request-Method",
+  "Age",
+  "Cache-Control",
+  "Connection",
+  "Content-Disposition",
+  "Content-Encoding",
+  "Content-Language",
+  "Content-Length",
+  "Content-Location",
+  "Content-Security-Policy",
+  "Content-Security-Policy-Report-Only",
+  "Content-Type",
+  "Cookie",
+  "Cookie2",
+  "DNT",
+  "Date",
+  "ETag",
+  "Expires",
+  "From",
+  "Host",
+  "If-Match",
+  "If-Modified-Since",
+  "If-None-Match",
+  "If-Range",
+  "If-Unmodified-Since",
+  "Keep-Alive",
+  "Last-Modified",
+  "Location",
+  "Origin",
+  "Pragma",
+  "Public-Key-Pins",
+  "Public-Key-Pins-Report-Only",
+  "Referer",
+  "Referrer-Policy",
+  "Retry-After",
+  "Server",
+  "Set-Cookie",
+  "Set-Cookie2",
+  "Strict-Transport-Security",
+  "TE",
+  "Tk",
+  "Trailer",
+  "Transfer-Encoding",
+  "Upgrade-Insecure-Requests",
+  "User-Agent",
+  "Vary",
+  "Via",
+  "Warning",
+  "X-Content-Type-Options",
+  "X-DNS-Prefetch-Control",
+  "X-Frame-Options",
+  "X-XSS-Protection"
+];
+
+/**
+ * Get the MDN URL for the specified header
+ *
+ * @param {string} Name of the header
+ * The baseURL to use.
+ *
+ * @return {string}
+ * The MDN URL for the header, or null if not available.
+ */
+exports.getURL = (header) => {
+  if (SUPPORTED_HEADERS.indexOf(header) === -1) {
+    return null;
+  }
+
+  return URL_DOMAIN + URL_PATH + header + URL_PARAMS;
+};
+
+/**
+ * Use a different domain for the URLs. Used only for testing.
+ *
+ * @param {string} domain
+ * The domain to use.
+ */
+exports.setDomain = (domain) => {
+  URL_DOMAIN = domain;
+};
+
+/**
+ * Use a different list of supported headers. Used only for testing.
+ *
+ * @param {array} headers
+ * The supported headers to use.
+ */
+exports.setSupportedHeaders = (headers) => {
+  SUPPORTED_HEADERS = headers;
+};
--- a/devtools/client/netmonitor/shared/components/headers-panel.js
+++ b/devtools/client/netmonitor/shared/components/headers-panel.js
@@ -10,21 +10,26 @@ const {
   createClass,
   createFactory,
   DOM,
   PropTypes,
 } = require("devtools/client/shared/vendor/react");
 const { L10N } = require("../../l10n");
 const { writeHeaderText } = require("../../request-utils");
 const { getFormattedSize } = require("../../utils/format-utils");
+const Services = require("Services");
+const { gDevTools } = require("devtools/client/framework/devtools");
+const HeadersMDN = require("devtools/client/netmonitor/shared/components/headers-mdn");
+const { REPS, MODE } = require("devtools/client/shared/components/reps/load-reps");
+const Rep = createFactory(REPS.Rep);
 
 // Components
 const PropertiesView = createFactory(require("./properties-view"));
 
-const { div, input, textarea } = DOM;
+const { a, div, input, textarea } = DOM;
 const EDIT_AND_RESEND = L10N.getStr("netmonitor.summary.editAndResend");
 const RAW_HEADERS = L10N.getStr("netmonitor.summary.rawHeaders");
 const RAW_HEADERS_REQUEST = L10N.getStr("netmonitor.summary.rawHeaders.requestHeaders");
 const RAW_HEADERS_RESPONSE = L10N.getStr("netmonitor.summary.rawHeaders.responseHeaders");
 const HEADERS_EMPTY_TEXT = L10N.getStr("headersEmptyText");
 const HEADERS_FILTER_TEXT = L10N.getStr("headersFilterText");
 const REQUEST_HEADERS = L10N.getStr("requestHeaders");
 const REQUEST_HEADERS_FROM_UPLOAD = L10N.getStr("requestHeadersFromUpload");
@@ -40,16 +45,17 @@ const SUMMARY_VERSION = L10N.getStr("net
  * Lists basic information about the request
  */
 const HeadersPanel = createClass({
   displayName: "HeadersPanel",
 
   propTypes: {
     cloneSelectedRequest: PropTypes.func.isRequired,
     request: PropTypes.object.isRequired,
+    renderValue: PropTypes.func
   },
 
   getInitialState() {
     return {
       rawHeadersOpened: false,
     };
   },
 
@@ -208,15 +214,54 @@ const HeadersPanel = createClass({
           summaryStatus,
           summaryVersion,
           summaryRawHeaders,
         ),
         PropertiesView({
           object,
           filterPlaceHolder: HEADERS_FILTER_TEXT,
           sectionNames: Object.keys(object),
+          renderValue
         }),
       )
     );
   }
 });
 
+function onLearnMoreClick(e, headerDocURL) {
+  e.stopPropagation();
+  e.preventDefault();
+
+  let win = Services.wm.getMostRecentWindow(gDevTools.chromeWindowType);
+  win.openUILinkIn(headerDocURL, "tab");
+}
+
+function renderValue(props) {
+  const { member, value } = props;
+
+  if (typeof value !== "string") {
+    return null;
+  }
+
+  let headerDocURL = HeadersMDN.getURL(member.name);
+
+  return (
+    div({ className: "treeValueCellDivider" },
+      Rep(Object.assign(props, {
+        // FIXME: A workaround for the issue in StringRep
+        // Force StringRep to crop the text everytime
+        member: Object.assign({}, member, { open: false }),
+        mode: MODE.TINY,
+        cropLimit: 60,
+      })),
+      headerDocURL ?
+        a({
+          className: "learn-more-link",
+          title: headerDocURL,
+          onClick: (e) => onLearnMoreClick(e, headerDocURL),
+        }, `[${L10N.getStr("netmonitor.headers.learnMore")}]`)
+        :
+        null
+    )
+  );
+}
+
 module.exports = HeadersPanel;
--- a/devtools/client/netmonitor/shared/components/moz.build
+++ b/devtools/client/netmonitor/shared/components/moz.build
@@ -1,16 +1,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 DevToolsModules(
     'cookies-panel.js',
     'details-panel.js',
     'editor.js',
+    'headers-mdn.js',
     'headers-panel.js',
     'params-panel.js',
     'preview-panel.js',
     'properties-view.js',
     'response-panel.js',
     'security-panel.js',
     'timings-panel.js',
 )
--- a/devtools/client/netmonitor/test/browser.ini
+++ b/devtools/client/netmonitor/test/browser.ini
@@ -93,16 +93,17 @@ subsuite = clipboard
 skip-if = (os == 'linux' && bits == 32 && debug) # bug 1328915, disable linux32 debug devtools for timeouts
 [browser_net_copy_as_curl.js]
 subsuite = clipboard
 skip-if = (os == 'linux' && bits == 32 && debug) # bug 1328915, disable linux32 debug devtools for timeouts
 [browser_net_cors_requests.js]
 [browser_net_cyrillic-01.js]
 [browser_net_cyrillic-02.js]
 [browser_net_frame.js]
+[browser_net_header-docs.js]
 skip-if = (os == 'linux' && debug && bits == 32) # Bug 1321434
 [browser_net_filter-01.js]
 skip-if = (os == 'linux' && debug && bits == 32) # Bug 1303439
 [browser_net_filter-02.js]
 [browser_net_filter-03.js]
 [browser_net_filter-04.js]
 [browser_net_footer-summary.js]
 [browser_net_html-preview.js]
new file mode 100644
--- /dev/null
+++ b/devtools/client/netmonitor/test/browser_net_header-docs.js
@@ -0,0 +1,56 @@
+/* Any copyright is dedicated to the Public Domain.
+   http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const HeadersMDN = require("devtools/client/netmonitor/shared/components/headers-mdn");
+
+/**
+ * Tests if "Learn More" links are correctly displayed
+ * next to headers.
+ */
+add_task(function* () {
+  let { tab, monitor } = yield initNetMonitor(POST_DATA_URL);
+  info("Starting test... ");
+
+  let { document, NetMonitorView } = monitor.panelWin;
+  let { RequestsMenu } = NetMonitorView;
+
+  RequestsMenu.lazyUpdate = false;
+
+  let wait = waitForNetworkEvents(monitor, 0, 2);
+  yield ContentTask.spawn(tab.linkedBrowser, {}, function* () {
+    content.wrappedJSObject.performRequests();
+  });
+  yield wait;
+
+  let origItem = RequestsMenu.getItemAtIndex(0);
+  RequestsMenu.selectedItem = origItem;
+
+  EventUtils.sendMouseEvent({ type: "click" },
+    document.querySelectorAll(".request-list-item")[0]);
+
+  testShowLearnMore(origItem);
+
+  return teardown(monitor);
+
+  /*
+   * Tests that a "Learn More" button is only shown if
+   * and only if a header is documented in MDN.
+   */
+  function testShowLearnMore(data) {
+    document.querySelectorAll(".properties-view .treeRow.stringRow").forEach((rowEl, index) => {
+      let headerName = rowEl.querySelectorAll(".treeLabelCell .treeLabel")[0].textContent;
+      let headerDocURL = HeadersMDN.getURL(headerName);
+      let learnMoreEl = rowEl.querySelectorAll(".treeValueCell .learn-more-link");
+
+      if (headerDocURL === null) {
+        ok(learnMoreEl.length === 0,
+          "undocumented header does not include a \"Learn More\" button");
+      } else {
+        ok(learnMoreEl[0].getAttribute("title") === headerDocURL,
+          "documented header includes a \"Learn More\" button with a link to MDN");
+      }
+    });
+  }
+});
--- a/devtools/client/shared/components/tree/tree-view.css
+++ b/devtools/client/shared/components/tree/tree-view.css
@@ -72,16 +72,33 @@
   text-decoration: underline;
 }
 
 /* Filtering */
 .treeTable .treeRow.hidden {
   display: none;
 }
 
+.treeTable .treeValueCellDivider {
+  display: flex;
+  flex-wrap: wrap;
+  justify-content: space-between;
+}
+
+/* Learn More link */
+.treeTable .treeValueCell .learn-more-link {
+  color: var(--theme-highlight-blue);
+  cursor: pointer;
+  margin: 0 5px;
+}
+
+.treeTable .treeValueCell .learn-more-link:hover {
+  text-decoration: underline;
+}
+
 /******************************************************************************/
 /* Toggle Icon */
 
 .treeTable .treeRow .treeIcon {
   height: 14px;
   width: 14px;
   font-size: 10px; /* Set the size of loading spinner */
   display: inline-block;
--- a/dom/base/TimeoutManager.cpp
+++ b/dom/base/TimeoutManager.cpp
@@ -62,52 +62,56 @@ static int32_t gTimeoutBucketingStrategy
 #define DOM_MAX_TIMEOUT_VALUE    DELAY_INTERVAL_LIMIT
 
 uint32_t TimeoutManager::sNestingLevel = 0;
 
 namespace {
 
 // The number of queued runnables within the TabGroup ThrottledEventQueue
 // at which to begin applying back pressure to the window.
-const uint32_t kThrottledEventQueueBackPressure = 5000;
+#define DEFAULT_THROTTLED_EVENT_QUEUE_BACK_PRESSURE 5000
+static uint32_t gThrottledEventQueueBackPressure;
 
 // The amount of delay to apply to timers when back pressure is triggered.
 // As the length of the ThrottledEventQueue grows delay is increased.  The
 // delay is scaled such that every kThrottledEventQueueBackPressure runnables
 // in the queue equates to an additional kBackPressureDelayMS.
-const double kBackPressureDelayMS = 500;
+#define DEFAULT_BACK_PRESSURE_DELAY_MS 250
+static uint32_t gBackPressureDelayMS;
 
 // This defines a limit for how much the delay must drop before we actually
 // reduce back pressure throttle amount.  This makes the throttle delay
 // a bit "sticky" once we enter back pressure.
-const double kBackPressureDelayReductionThresholdMS = 400;
+#define DEFAULT_BACK_PRESSURE_DELAY_REDUCTION_THRESHOLD_MS 1000
+static uint32_t gBackPressureDelayReductionThresholdMS;
 
 // The minimum delay we can reduce back pressure to before we just floor
 // the value back to zero.  This allows us to ensure that we can exit
 // back pressure event if there are always a small number of runnables
 // queued up.
-const double kBackPressureDelayMinimumMS = 100;
+#define DEFAULT_BACK_PRESSURE_DELAY_MINIMUM_MS 100
+static uint32_t gBackPressureDelayMinimumMS;
 
 // Convert a ThrottledEventQueue length to a timer delay in milliseconds.
 // This will return a value between 0 and INT32_MAX.
 int32_t
 CalculateNewBackPressureDelayMS(uint32_t aBacklogDepth)
 {
   double multiplier = static_cast<double>(aBacklogDepth) /
-                      static_cast<double>(kThrottledEventQueueBackPressure);
-  double value = kBackPressureDelayMS * multiplier;
+                      static_cast<double>(gThrottledEventQueueBackPressure);
+  double value = static_cast<double>(gBackPressureDelayMS) * multiplier;
   // Avoid overflow
   if (value > INT32_MAX) {
     value = INT32_MAX;
   }
 
   // Once we get close to an empty queue just floor the delay back to zero.
   // We want to ensure we don't get stuck in a condition where there is a
   // small amount of delay remaining due to an active, but reasonable, queue.
-  else if (value < kBackPressureDelayMinimumMS) {
+  else if (value < static_cast<double>(gBackPressureDelayMinimumMS)) {
     value = 0;
   }
   return static_cast<int32_t>(value);
 }
 
 } // anonymous namespace
 
 TimeoutManager::TimeoutManager(nsGlobalWindow& aWindow)
@@ -148,16 +152,29 @@ TimeoutManager::Initialize()
                               "dom.min_tracking_background_timeout_value",
                               DEFAULT_MIN_TRACKING_BACKGROUND_TIMEOUT_VALUE);
   Preferences::AddIntVarCache(&gTimeoutBucketingStrategy,
                               "dom.timeout_bucketing_strategy",
                               TRACKING_SEPARATE_TIMEOUT_BUCKETING_STRATEGY);
   Preferences::AddBoolVarCache(&gAnnotateTrackingChannels,
                                "privacy.trackingprotection.annotate_channels",
                                false);
+
+  Preferences::AddUintVarCache(&gThrottledEventQueueBackPressure,
+                               "dom.timeout.throttled_event_queue_back_pressure",
+                               DEFAULT_THROTTLED_EVENT_QUEUE_BACK_PRESSURE);
+  Preferences::AddUintVarCache(&gBackPressureDelayMS,
+                               "dom.timeout.back_pressure_delay_ms",
+                               DEFAULT_BACK_PRESSURE_DELAY_MS);
+  Preferences::AddUintVarCache(&gBackPressureDelayReductionThresholdMS,
+                               "dom.timeout.back_pressure_delay_reduction_threshold_ms",
+                               DEFAULT_BACK_PRESSURE_DELAY_REDUCTION_THRESHOLD_MS);
+  Preferences::AddUintVarCache(&gBackPressureDelayMinimumMS,
+                               "dom.timeout.back_pressure_delay_minimum_ms",
+                               DEFAULT_BACK_PRESSURE_DELAY_MINIMUM_MS);
 }
 
 uint32_t
 TimeoutManager::GetTimeoutId(Timeout::Reason aReason)
 {
   switch (aReason) {
     case Timeout::Reason::eIdleCallbackTimeout:
       return ++mIdleCallbackTimeoutCounter;
@@ -649,17 +666,17 @@ TimeoutManager::MaybeApplyBackPressure()
     return;
   }
 
   // Only begin back pressure if the window has greatly fallen behind the main
   // thread.  This is a somewhat arbitrary threshold chosen such that it should
   // rarely fire under normaly circumstances.  Its low enough, though,
   // that we should have time to slow new runnables from being added before an
   // OOM occurs.
-  if (queue->Length() < kThrottledEventQueueBackPressure) {
+  if (queue->Length() < gThrottledEventQueueBackPressure) {
     return;
   }
 
   // First attempt to dispatch a runnable to update our back pressure state.  We
   // do this first in order to verify we can dispatch successfully before
   // entering the back pressure state.
   nsCOMPtr<nsIRunnable> r =
     NewNonOwningRunnableMethod<StoreRefPtrPassByPtr<nsGlobalWindow>>(this,
@@ -707,18 +724,18 @@ TimeoutManager::CancelOrUpdateBackPressu
 
   // If the delay has decreased, though, we only apply the new value if it has
   // reduced significantly.  This hysteresis avoids thrashing the back pressure
   // value back and forth rapidly.  This is important because reducing the
   // backpressure delay requires calling ResetTimerForThrottleReduction() which
   // can be quite expensive.  We only want to call that method if the back log
   // is really clearing.
   else if (newBackPressureDelayMS == 0 ||
-           (newBackPressureDelayMS <=
-           (mBackPressureDelayMS - kBackPressureDelayReductionThresholdMS))) {
+           (static_cast<uint32_t>(mBackPressureDelayMS) >
+           (newBackPressureDelayMS + gBackPressureDelayReductionThresholdMS))) {
     int32_t oldBackPressureDelayMS = mBackPressureDelayMS;
     mBackPressureDelayMS = newBackPressureDelayMS;
 
     // If the back pressure delay has gone down we must reset any existing
     // timers to use the new value.  Otherwise we run the risk of executing
     // timer callbacks out-of-order.
     ResetTimersForThrottleReduction(oldBackPressureDelayMS);
   }
--- a/dom/base/nsGlobalWindow.cpp
+++ b/dom/base/nsGlobalWindow.cpp
@@ -607,19 +607,21 @@ IdleRequestExecutor::SetDeadline(TimeSta
   }
 
   mDeadline = aDeadline;
 }
 
 void
 IdleRequestExecutor::MaybeDispatch()
 {
-  MOZ_DIAGNOSTIC_ASSERT(mWindow);
-
-  if (mDispatched) {
+  // If we've already dispatched the executor we don't want to do it
+  // again. Also, if we've called IdleRequestExecutor::Cancel mWindow
+  // will be null, which indicates that we shouldn't dispatch this
+  // executor either.
+  if (mDispatched || !mWindow) {
     return;
   }
 
   mDispatched = true;
   RefPtr<IdleRequestExecutor> request = this;
   NS_IdleDispatchToCurrentThread(request.forget());
 }
 
--- a/dom/indexedDB/ActorsParent.cpp
+++ b/dom/indexedDB/ActorsParent.cpp
@@ -5129,17 +5129,17 @@ class MOZ_STACK_CLASS DatabaseConnection
   const TransactionBase* mDEBUGTransaction;
 #endif
 
 public:
   AutoSavepoint();
   ~AutoSavepoint();
 
   nsresult
-  Start(const TransactionBase* aConnection);
+  Start(const TransactionBase* aTransaction);
 
   nsresult
   Commit();
 };
 
 class DatabaseConnection::CachedStatement final
 {
   friend class DatabaseConnection;
--- a/dom/media/AbstractMediaDecoder.h
+++ b/dom/media/AbstractMediaDecoder.h
@@ -12,35 +12,35 @@
 
 #include "FrameStatistics.h"
 #include "MediaEventSource.h"
 #include "MediaInfo.h"
 #include "nsISupports.h"
 #include "nsDataHashtable.h"
 #include "nsThreadUtils.h"
 
-namespace mozilla
-{
+namespace mozilla {
 
-namespace layers
-{
-  class ImageContainer;
-  class KnowsCompositor;
+namespace layers {
+class ImageContainer;
+class KnowsCompositor;
 } // namespace layers
+
 class AbstractThread;
 class MediaResource;
 class ReentrantMonitor;
 class VideoFrameContainer;
 class MediaDecoderOwner;
 class CDMProxy;
 class GMPCrashHelper;
 
 typedef nsDataHashtable<nsCStringHashKey, nsCString> MetadataTags;
 
-static inline bool IsCurrentThread(nsIThread* aThread) {
+static inline bool IsCurrentThread(nsIThread* aThread)
+{
   return NS_GetCurrentThread() == aThread;
 }
 
 /**
  * The AbstractMediaDecoder class describes the public interface for a media decoder
  * and is used by the MediaReader classes.
  */
 class AbstractMediaDecoder : public nsIObserver
@@ -54,95 +54,105 @@ public:
   // by currentSrc. Returns what was passed to Load(), if Load() has been called.
   virtual MediaResource* GetResource() const = 0;
 
   // Increments the parsed, decoded and dropped frame counters by the passed in
   // counts.
   // Can be called on any thread.
   virtual void NotifyDecodedFrames(const FrameStatisticsData& aStats) = 0;
 
-  virtual AbstractCanonical<media::NullableTimeUnit>* CanonicalDurationOrNull() { return nullptr; };
+  virtual AbstractCanonical<media::NullableTimeUnit>* CanonicalDurationOrNull()
+  {
+    return nullptr;
+  };
 
   // Return an event that will be notified when data arrives in MediaResource.
   // MediaDecoderReader will register with this event to receive notifications
   // in order to update buffer ranges.
   // Return null if this decoder doesn't support the event.
   virtual MediaEventSource<void>* DataArrivedEvent()
   {
     return nullptr;
   }
 
   // Returns an event that will be notified when the owning document changes state
   // and we might have a new compositor. If this new compositor requires us to
   // recreate our decoders, then we expect the existing decoderis to return an
   // error independently of this.
-  virtual MediaEventSource<RefPtr<layers::KnowsCompositor>>* CompositorUpdatedEvent()
+  virtual MediaEventSource<RefPtr<layers::KnowsCompositor>>*
+  CompositorUpdatedEvent()
   {
     return nullptr;
   }
 
   // Notify the media decoder that a decryption key is required before emitting
   // further output. This only needs to be overridden for decoders that expect
   // encryption, such as the MediaSource decoder.
-  virtual void NotifyWaitingForKey() {}
+  virtual void NotifyWaitingForKey() { }
 
   // Return an event that will be notified when a decoder is waiting for a
   // decryption key before it can return more output.
   virtual MediaEventSource<void>* WaitingForKeyEvent()
   {
     return nullptr;
   }
 
   // Return an abstract thread on which to run main thread runnables.
   virtual AbstractThread* AbstractMainThread() const = 0;
 
 protected:
-  virtual void UpdateEstimatedMediaDuration(int64_t aDuration) {};
+  virtual void UpdateEstimatedMediaDuration(int64_t aDuration) { };
 public:
   void DispatchUpdateEstimatedMediaDuration(int64_t aDuration)
   {
-    NS_DispatchToMainThread(NewRunnableMethod<int64_t>(this,
-                                                       &AbstractMediaDecoder::UpdateEstimatedMediaDuration,
-                                                       aDuration));
+    NS_DispatchToMainThread(NewRunnableMethod<int64_t>(
+      this, &AbstractMediaDecoder::UpdateEstimatedMediaDuration, aDuration));
   }
 
   virtual VideoFrameContainer* GetVideoFrameContainer() = 0;
   virtual mozilla::layers::ImageContainer* GetImageContainer() = 0;
 
   // Returns the owner of this decoder or null when the decoder is shutting
   // down. The owner should only be used on the main thread.
   virtual MediaDecoderOwner* GetOwner() const = 0;
 
   // Set by Reader if the current audio track can be offloaded
-  virtual void SetPlatformCanOffloadAudio(bool aCanOffloadAudio) {}
+  virtual void SetPlatformCanOffloadAudio(bool aCanOffloadAudio) { }
 
   virtual already_AddRefed<GMPCrashHelper> GetCrashHelper() { return nullptr; }
 
   // Stack based class to assist in notifying the frame statistics of
   // parsed and decoded frames. Use inside video demux & decode functions
   // to ensure all parsed and decoded frames are reported on all return paths.
-  class AutoNotifyDecoded {
+  class AutoNotifyDecoded
+  {
   public:
     explicit AutoNotifyDecoded(AbstractMediaDecoder* aDecoder)
       : mDecoder(aDecoder)
-    {}
-    ~AutoNotifyDecoded() {
+    {
+    }
+    ~AutoNotifyDecoded()
+    {
       if (mDecoder) {
         mDecoder->NotifyDecodedFrames(mStats);
       }
     }
 
     FrameStatisticsData mStats;
 
   private:
     AbstractMediaDecoder* mDecoder;
   };
 
   // Classes directly inheriting from AbstractMediaDecoder do not support
   // Observe and it should never be called directly.
-  NS_IMETHOD Observe(nsISupports *aSubject, const char * aTopic, const char16_t * aData) override
-  { MOZ_CRASH("Forbidden method"); return NS_OK; }
+  NS_IMETHOD Observe(nsISupports* aSubject, const char* aTopic,
+                     const char16_t* aData) override
+  {
+    MOZ_CRASH("Forbidden method");
+    return NS_OK;
+  }
 };
 
 } // namespace mozilla
 
 #endif
 
--- a/dom/media/AudioStream.cpp
+++ b/dom/media/AudioStream.cpp
@@ -313,30 +313,31 @@ struct ToCubebFormat<AUDIO_FORMAT_S16> {
 template <typename Function, typename... Args>
 int AudioStream::InvokeCubeb(Function aFunction, Args&&... aArgs)
 {
   MonitorAutoUnlock mon(mMonitor);
   return aFunction(mCubebStream.get(), Forward<Args>(aArgs)...);
 }
 
 nsresult
-AudioStream::Init(uint32_t aNumChannels, uint32_t aRate,
+AudioStream::Init(uint32_t aNumChannels, uint32_t aChannelMap, uint32_t aRate,
                   const dom::AudioChannel aAudioChannel)
 {
   auto startTime = TimeStamp::Now();
 
   LOG("%s channels: %d, rate: %d", __FUNCTION__, aNumChannels, aRate);
   mChannels = aNumChannels;
   mOutChannels = aNumChannels;
 
   mDumpFile = OpenDumpFile(aNumChannels, aRate);
 
   cubeb_stream_params params;
   params.rate = aRate;
   params.channels = mOutChannels;
+  params.layout = CubebUtils::ConvertChannelMapToCubebLayout(aChannelMap);
 #if defined(__ANDROID__)
 #if defined(MOZ_B2G)
   params.stream_type = CubebUtils::ConvertChannelToCubebType(aAudioChannel);
 #else
   params.stream_type = CUBEB_STREAM_TYPE_MUSIC;
 #endif
 
   if (params.stream_type == CUBEB_STREAM_TYPE_MAX) {
@@ -349,20 +350,16 @@ AudioStream::Init(uint32_t aNumChannels,
 
   cubeb* cubebContext = CubebUtils::GetCubebContext();
   if (!cubebContext) {
     NS_WARNING("Can't get cubeb context!");
     CubebUtils::ReportCubebStreamInitFailure(true);
     return NS_ERROR_DOM_MEDIA_CUBEB_INITIALIZATION_ERR;
   }
 
-  // The DecodedAudioDataSink forces mono or stereo for now.
-  params.layout = params.channels == 1 ? CUBEB_LAYOUT_MONO
-                                       : CUBEB_LAYOUT_STEREO;
-
   return OpenCubeb(cubebContext, params, startTime, CubebUtils::GetFirstStream());
 }
 
 nsresult
 AudioStream::OpenCubeb(cubeb* aContext, cubeb_stream_params& aParams,
                        TimeStamp aStartTime, bool aIsFirst)
 {
   MOZ_ASSERT(aContext);
--- a/dom/media/AudioStream.h
+++ b/dom/media/AudioStream.h
@@ -183,19 +183,20 @@ public:
     virtual void Drained() = 0;
   protected:
     virtual ~DataSource() {}
   };
 
   explicit AudioStream(DataSource& aSource);
 
   // Initialize the audio stream. aNumChannels is the number of audio
-  // channels (1 for mono, 2 for stereo, etc) and aRate is the sample rate
+  // channels (1 for mono, 2 for stereo, etc), aChannelMap is the indicator for
+  // channel layout(mono, stereo, 5.1 or 7.1 ) and aRate is the sample rate
   // (22050Hz, 44100Hz, etc).
-  nsresult Init(uint32_t aNumChannels, uint32_t aRate,
+  nsresult Init(uint32_t aNumChannels, uint32_t aChannelMap, uint32_t aRate,
                 const dom::AudioChannel aAudioStreamChannel);
 
   // Closes the stream. All future use of the stream is an error.
   void Shutdown();
 
   void Reset();
 
   // Set the current volume of the audio playback. This is a value from
@@ -219,16 +220,21 @@ public:
   // was opened, of the audio hardware.  Thread-safe.
   int64_t GetPositionInFrames();
 
   static uint32_t GetPreferredRate()
   {
     return CubebUtils::PreferredSampleRate();
   }
 
+  static uint32_t GetPreferredChannelMap(uint32_t aChannels)
+  {
+    return CubebUtils::PreferredChannelMap(aChannels);
+  }
+
   uint32_t GetOutChannels() { return mOutChannels; }
 
   // Set playback rate as a multiple of the intrinsic playback rate. This is to
   // be called only with aPlaybackRate > 0.0.
   nsresult SetPlaybackRate(double aPlaybackRate);
   // Switch between resampling (if false) and time stretching (if true, default).
   nsresult SetPreservesPitch(bool aPreservesPitch);
 
--- a/dom/media/Benchmark.cpp
+++ b/dom/media/Benchmark.cpp
@@ -5,20 +5,22 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "Benchmark.h"
 #include "BufferMediaResource.h"
 #include "MediaData.h"
 #include "MediaPrefs.h"
 #include "PDMFactory.h"
 #include "WebMDemuxer.h"
+#include "gfxPrefs.h"
 #include "mozilla/AbstractThread.h"
 #include "mozilla/Preferences.h"
 #include "mozilla/Telemetry.h"
 #include "mozilla/dom/ContentChild.h"
+#include "mozilla/gfx/gfxVars.h"
 
 #ifndef MOZ_WIDGET_ANDROID
 #include "WebMSample.h"
 #endif
 
 namespace mozilla {
 
 // Update this version number to force re-running the benchmark. Such as when
@@ -131,30 +133,32 @@ Benchmark::Dispose()
   mKeepAliveUntilComplete = nullptr;
   mPromise.RejectIfExists(false, __func__);
 }
 
 void
 Benchmark::Init()
 {
   MOZ_ASSERT(NS_IsMainThread());
-
+  gfxVars::Initialize();
+  gfxPrefs::GetSingleton();
   MediaPrefs::GetSingleton();
 }
 
 BenchmarkPlayback::BenchmarkPlayback(Benchmark* aMainThreadState,
                                      MediaDataDemuxer* aDemuxer)
   : QueueObject(new TaskQueue(GetMediaThreadPool(MediaThreadType::PLAYBACK)))
   , mMainThreadState(aMainThreadState)
   , mDecoderTaskQueue(new TaskQueue(GetMediaThreadPool(
                         MediaThreadType::PLATFORM_DECODER)))
   , mDemuxer(aDemuxer)
   , mSampleIndex(0)
   , mFrameCount(0)
   , mFinished(false)
+  , mDrained(false)
 {
   MOZ_ASSERT(static_cast<Benchmark*>(mMainThreadState)->OnThread());
 }
 
 void
 BenchmarkPlayback::DemuxSamples()
 {
   MOZ_ASSERT(OnThread());
@@ -181,18 +185,18 @@ BenchmarkPlayback::DemuxNextSample()
   MOZ_ASSERT(OnThread());
 
   RefPtr<Benchmark> ref(mMainThreadState);
   RefPtr<MediaTrackDemuxer::SamplesPromise> promise = mTrackDemuxer->GetSamples();
   promise->Then(
     Thread(), __func__,
     [this, ref](RefPtr<MediaTrackDemuxer::SamplesHolder> aHolder) {
       mSamples.AppendElements(Move(aHolder->mSamples));
-      if (ref->mParameters.mStopAtFrame &&
-          mSamples.Length() == (size_t)ref->mParameters.mStopAtFrame.ref()) {
+      if (ref->mParameters.mStopAtFrame
+          && mSamples.Length() == (size_t)ref->mParameters.mStopAtFrame.ref()) {
         InitDecoder(Move(*mTrackDemuxer->GetInfo()));
       } else {
         Dispatch(NS_NewRunnableFunction([this, ref]() { DemuxNextSample(); }));
       }
     },
     [this, ref](const MediaResult& aError) {
       switch (aError.Code()) {
         case NS_ERROR_DOM_MEDIA_END_OF_STREAM:
@@ -205,131 +209,121 @@ BenchmarkPlayback::DemuxNextSample()
 }
 
 void
 BenchmarkPlayback::InitDecoder(TrackInfo&& aInfo)
 {
   MOZ_ASSERT(OnThread());
 
   RefPtr<PDMFactory> platform = new PDMFactory();
-  mDecoder = platform->CreateDecoder({ aInfo, mDecoderTaskQueue, reinterpret_cast<MediaDataDecoderCallback*>(this) });
+  mDecoder = platform->CreateDecoder({ aInfo, mDecoderTaskQueue });
   if (!mDecoder) {
     MainThreadShutdown();
     return;
   }
   RefPtr<Benchmark> ref(mMainThreadState);
   mDecoder->Init()->Then(
     Thread(), __func__,
     [this, ref](TrackInfo::TrackType aTrackType) {
       InputExhausted();
     },
-    [this, ref](MediaResult aError) {
+    [this, ref](const MediaResult& aError) {
       MainThreadShutdown();
     });
 }
 
 void
 BenchmarkPlayback::MainThreadShutdown()
 {
   MOZ_ASSERT(OnThread());
 
   if (mFinished) {
     // Nothing more to do.
     return;
   }
   mFinished = true;
 
   if (mDecoder) {
-    mDecoder->Flush();
-    mDecoder->Shutdown();
-    mDecoder = nullptr;
-  }
-
-  mDecoderTaskQueue->BeginShutdown();
-  mDecoderTaskQueue->AwaitShutdownAndIdle();
-  mDecoderTaskQueue = nullptr;
+    RefPtr<Benchmark> ref(mMainThreadState);
+    mDecoder->Flush()->Then(
+      Thread(), __func__,
+      [ref, this]() {
+        mDecoder->Shutdown()->Then(
+          Thread(), __func__,
+          [ref, this]() {
+            mDecoderTaskQueue->BeginShutdown();
+            mDecoderTaskQueue->AwaitShutdownAndIdle();
+            mDecoderTaskQueue = nullptr;
 
-  if (mTrackDemuxer) {
-    mTrackDemuxer->Reset();
-    mTrackDemuxer->BreakCycles();
-    mTrackDemuxer = nullptr;
-  }
+            if (mTrackDemuxer) {
+              mTrackDemuxer->Reset();
+              mTrackDemuxer->BreakCycles();
+              mTrackDemuxer = nullptr;
+            }
 
-  RefPtr<Benchmark> ref(mMainThreadState);
-  Thread()->AsTaskQueue()->BeginShutdown()->Then(
-    ref->Thread(), __func__,
-    [ref]() {  ref->Dispose(); },
-    []() { MOZ_CRASH("not reached"); });
+            Thread()->AsTaskQueue()->BeginShutdown()->Then(
+              ref->Thread(), __func__,
+              [ref]() { ref->Dispose(); },
+              []() { MOZ_CRASH("not reached"); });
+          },
+          []() { MOZ_CRASH("not reached"); });
+        mDecoder = nullptr;
+      },
+      []() { MOZ_CRASH("not reached"); });
+  }
 }
 
 void
-BenchmarkPlayback::Output(MediaData* aData)
+BenchmarkPlayback::Output(const MediaDataDecoder::DecodedData& aResults)
 {
+  MOZ_ASSERT(OnThread());
   RefPtr<Benchmark> ref(mMainThreadState);
-  Dispatch(NS_NewRunnableFunction([this, ref]() {
-    mFrameCount++;
-    if (mFrameCount == ref->mParameters.mStartupFrame) {
-      mDecodeStartTime = TimeStamp::Now();
-    }
-    int32_t frames = mFrameCount - ref->mParameters.mStartupFrame;
-    TimeDuration elapsedTime = TimeStamp::Now() - mDecodeStartTime;
-    if (!mFinished &&
-        (frames == ref->mParameters.mFramesToMeasure ||
-         elapsedTime >= ref->mParameters.mTimeout)) {
-      uint32_t decodeFps = frames / elapsedTime.ToSeconds();
-      MainThreadShutdown();
-      ref->Dispatch(NS_NewRunnableFunction([ref, decodeFps]() {
-        ref->ReturnResult(decodeFps);
-      }));
-    }
-  }));
-}
-
-void
-BenchmarkPlayback::Error(const MediaResult& aError)
-{
-  RefPtr<Benchmark> ref(mMainThreadState);
-  Dispatch(NS_NewRunnableFunction([this, ref]() {  MainThreadShutdown(); }));
+  mFrameCount += aResults.Length();
+  if (!mDecodeStartTime && mFrameCount >= ref->mParameters.mStartupFrame) {
+    mDecodeStartTime = Some(TimeStamp::Now());
+  }
+  TimeStamp now = TimeStamp::Now();
+  int32_t frames = mFrameCount - ref->mParameters.mStartupFrame;
+  TimeDuration elapsedTime = now - mDecodeStartTime.refOr(now);
+  if (!mFinished
+      && (((frames == ref->mParameters.mFramesToMeasure) && frames > 0)
+          || elapsedTime >= ref->mParameters.mTimeout
+          || mDrained)) {
+    uint32_t decodeFps = frames / elapsedTime.ToSeconds();
+    MainThreadShutdown();
+    ref->Dispatch(NS_NewRunnableFunction([ref, decodeFps]() {
+      ref->ReturnResult(decodeFps);
+    }));
+  }
 }
 
 void
 BenchmarkPlayback::InputExhausted()
 {
+  MOZ_ASSERT(OnThread());
+  if (mFinished || mSampleIndex >= mSamples.Length()) {
+    return;
+  }
   RefPtr<Benchmark> ref(mMainThreadState);
-  Dispatch(NS_NewRunnableFunction([this, ref]() {
-    MOZ_ASSERT(OnThread());
-    if (mFinished || mSampleIndex >= mSamples.Length()) {
-      return;
+  mDecoder->Decode(mSamples[mSampleIndex])
+    ->Then(Thread(), __func__,
+           [ref, this](const MediaDataDecoder::DecodedData& aResults) {
+             Output(aResults);
+             InputExhausted();
+           },
+           [ref, this](const MediaResult& aError) { MainThreadShutdown(); });
+  mSampleIndex++;
+  if (mSampleIndex == mSamples.Length()) {
+    if (ref->mParameters.mStopAtFrame) {
+      mSampleIndex = 0;
+    } else {
+      mDecoder->Drain()->Then(
+        Thread(), __func__,
+        [ref, this](const MediaDataDecoder::DecodedData& aResults) {
+          mDrained = true;
+          Output(aResults);
+        },
+        [ref, this](const MediaResult& aError) { MainThreadShutdown(); });
     }
-    mDecoder->Input(mSamples[mSampleIndex]);
-    mSampleIndex++;
-    if (mSampleIndex == mSamples.Length()) {
-      if (ref->mParameters.mStopAtFrame) {
-        mSampleIndex = 0;
-      } else {
-        mDecoder->Drain();
-      }
-    }
-  }));
+  }
 }
 
-void
-BenchmarkPlayback::DrainComplete()
-{
-  RefPtr<Benchmark> ref(mMainThreadState);
-  Dispatch(NS_NewRunnableFunction([this, ref]() {
-    int32_t frames = mFrameCount - ref->mParameters.mStartupFrame;
-    TimeDuration elapsedTime = TimeStamp::Now() - mDecodeStartTime;
-    uint32_t decodeFps = frames / elapsedTime.ToSeconds();
-    MainThreadShutdown();
-    ref->Dispatch(NS_NewRunnableFunction([ref, decodeFps]() {
-      ref->ReturnResult(decodeFps);
-    }));
-  }));
-}
-
-bool
-BenchmarkPlayback::OnReaderTaskQueue()
-{
-  return OnThread();
-}
-
-}
+} // namespace mozilla
--- a/dom/media/Benchmark.h
+++ b/dom/media/Benchmark.h
@@ -5,90 +5,92 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_BENCHMARK_H
 #define MOZILLA_BENCHMARK_H
 
 #include "MediaDataDemuxer.h"
 #include "QueueObject.h"
 #include "PlatformDecoderModule.h"
+#include "mozilla/Maybe.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/TaskQueue.h"
 #include "mozilla/TimeStamp.h"
 #include "nsCOMPtr.h"
 
 namespace mozilla {
 
 class TaskQueue;
 class Benchmark;
 
-class BenchmarkPlayback : public QueueObject, private MediaDataDecoderCallback
+class BenchmarkPlayback : public QueueObject
 {
   friend class Benchmark;
-  explicit BenchmarkPlayback(Benchmark* aMainThreadState, MediaDataDemuxer* aDemuxer);
+  BenchmarkPlayback(Benchmark* aMainThreadState, MediaDataDemuxer* aDemuxer);
   void DemuxSamples();
   void DemuxNextSample();
   void MainThreadShutdown();
   void InitDecoder(TrackInfo&& aInfo);
 
-  // MediaDataDecoderCallback
-  // Those methods are called on the MediaDataDecoder's task queue.
-  void Output(MediaData* aData) override;
-  void Error(const MediaResult& aError) override;
-  void InputExhausted() override;
-  void DrainComplete() override;
-  bool OnReaderTaskQueue() override;
+  void Output(const MediaDataDecoder::DecodedData& aResults);
+  void InputExhausted();
 
   Atomic<Benchmark*> mMainThreadState;
 
   RefPtr<TaskQueue> mDecoderTaskQueue;
   RefPtr<MediaDataDecoder> mDecoder;
 
   // Object only accessed on Thread()
   RefPtr<MediaDataDemuxer> mDemuxer;
   RefPtr<MediaTrackDemuxer> mTrackDemuxer;
   nsTArray<RefPtr<MediaRawData>> mSamples;
   size_t mSampleIndex;
-  TimeStamp mDecodeStartTime;
+  Maybe<TimeStamp> mDecodeStartTime;
   uint32_t mFrameCount;
   bool mFinished;
+  bool mDrained;
 };
 
 // Init() must have been called at least once prior on the
 // main thread.
 class Benchmark : public QueueObject
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Benchmark)
 
   struct Parameters
   {
     Parameters()
       : mFramesToMeasure(-1)
       , mStartupFrame(1)
-      , mTimeout(TimeDuration::Forever()) {}
+      , mTimeout(TimeDuration::Forever())
+    {
+    }
 
     Parameters(int32_t aFramesToMeasure,
                uint32_t aStartupFrame,
                int32_t aStopAtFrame,
                const TimeDuration& aTimeout)
       : mFramesToMeasure(aFramesToMeasure)
       , mStartupFrame(aStartupFrame)
       , mStopAtFrame(Some(aStopAtFrame))
-      , mTimeout(aTimeout) {}
+      , mTimeout(aTimeout)
+    {
+    }
 
     const int32_t mFramesToMeasure;
     const uint32_t mStartupFrame;
     const Maybe<int32_t> mStopAtFrame;
     const TimeDuration mTimeout;
   };
 
   typedef MozPromise<uint32_t, bool, /* IsExclusive = */ true> BenchmarkPromise;
 
-  explicit Benchmark(MediaDataDemuxer* aDemuxer, const Parameters& aParameters = Parameters());
+  explicit Benchmark(MediaDataDemuxer* aDemuxer,
+                     const Parameters& aParameters = Parameters());
   RefPtr<BenchmarkPromise> Run();
 
   static void Init();
 
 private:
   friend class BenchmarkPlayback;
   virtual ~Benchmark();
   void ReturnResult(uint32_t aDecodeFps);
--- a/dom/media/CubebUtils.cpp
+++ b/dom/media/CubebUtils.cpp
@@ -19,16 +19,33 @@
 #include "nsAutoRef.h"
 #include "prdtoa.h"
 
 #define PREF_VOLUME_SCALE "media.volume_scale"
 #define PREF_CUBEB_LATENCY_PLAYBACK "media.cubeb_latency_playback_ms"
 #define PREF_CUBEB_LATENCY_MSG "media.cubeb_latency_msg_frames"
 #define PREF_CUBEB_LOG_LEVEL "media.cubeb.log_level"
 
+#define MASK_MONO       (1 << AudioConfig::CHANNEL_MONO)
+#define MASK_MONO_LFE   (MASK_MONO | (1 << AudioConfig::CHANNEL_LFE))
+#define MASK_STEREO     ((1 << AudioConfig::CHANNEL_LEFT) | (1 << AudioConfig::CHANNEL_RIGHT))
+#define MASK_STEREO_LFE (MASK_STEREO | (1 << AudioConfig::CHANNEL_LFE))
+#define MASK_3F         (MASK_STEREO | (1 << AudioConfig::CHANNEL_CENTER))
+#define MASK_3F_LFE     (MASK_3F | (1 << AudioConfig::CHANNEL_LFE))
+#define MASK_2F1        (MASK_STEREO | (1 << AudioConfig::CHANNEL_RCENTER))
+#define MASK_2F1_LFE    (MASK_2F1 | (1 << AudioConfig::CHANNEL_LFE))
+#define MASK_3F1        (MASK_3F | (1 < AudioConfig::CHANNEL_RCENTER))
+#define MASK_3F1_LFE    (MASK_3F1 | (1 << AudioConfig::CHANNEL_LFE))
+#define MASK_2F2        (MASK_STEREO | (1 << AudioConfig::CHANNEL_LS) | (1 << AudioConfig::CHANNEL_RS))
+#define MASK_2F2_LFE    (MASK_2F2 | (1 << AudioConfig::CHANNEL_LFE))
+#define MASK_3F2        (MASK_3F | (1 << AudioConfig::CHANNEL_LS) | (1 << AudioConfig::CHANNEL_RS))
+#define MASK_3F2_LFE    (MASK_3F2 | (1 << AudioConfig::CHANNEL_LFE))
+#define MASK_3F3R_LFE   (MASK_3F2_LFE | (1 << AudioConfig::CHANNEL_RCENTER))
+#define MASK_3F4_LFE    (MASK_3F2_LFE | (1 << AudioConfig::CHANNEL_RLS) | (1 << AudioConfig::CHANNEL_RRS))
+
 namespace mozilla {
 
 namespace {
 
 LazyLogModule gCubebLog("cubeb");
 
 void CubebLogCallback(const char* aFmt, ...)
 {
@@ -89,16 +106,21 @@ const int CUBEB_BACKEND_UNKNOWN = CUBEB_
 // thread before fetching, after which it is safe to fetch without holding the
 // mutex because it is only written once per process execution (by the first
 // initialization to complete).  Since the init must have been called on a
 // given thread before fetching the value, it's guaranteed (via the mutex) that
 // sufficient memory barriers have occurred to ensure the correct value is
 // visible on the querying thread/CPU.
 uint32_t sPreferredSampleRate;
 
+// We only support SMPTE layout in cubeb for now. If the value is
+// CUBEB_LAYOUT_UNDEFINED, then it implies that the preferred layout is
+// non-SMPTE format.
+cubeb_channel_layout sPreferredChannelLayout;
+
 } // namespace
 
 extern LazyLogModule gAudioStreamLog;
 
 static const uint32_t CUBEB_NORMAL_LATENCY_MS = 100;
 // Consevative default that can work on all platforms.
 static const uint32_t CUBEB_NORMAL_LATENCY_FRAMES = 1024;
 
@@ -194,16 +216,71 @@ uint32_t PreferredSampleRate()
 {
   if (!InitPreferredSampleRate()) {
     return 44100;
   }
   MOZ_ASSERT(sPreferredSampleRate);
   return sPreferredSampleRate;
 }
 
+bool InitPreferredChannelLayout()
+{
+  StaticMutexAutoLock lock(sMutex);
+  if (sPreferredChannelLayout != 0) {
+    return true;
+  }
+  cubeb* context = GetCubebContextUnlocked();
+  if (!context) {
+    return false;
+  }
+  return cubeb_get_preferred_channel_layout(context,
+                                            &sPreferredChannelLayout) == CUBEB_OK
+         ? true : false;
+}
+
+uint32_t PreferredChannelMap(uint32_t aChannels)
+{
+  // The first element of the following mapping table is channel counts,
+  // and the second one is its bit mask. It will be used in many times,
+  // so we shoule avoid to allocate it in stack, or it will be created
+  // and removed repeatedly. Use static to allocate this local variable
+  // in data space instead of stack.
+  static uint32_t layoutInfo[CUBEB_LAYOUT_MAX][2] = {
+    { 0, 0 },               // CUBEB_LAYOUT_UNDEFINED
+    { 2, MASK_STEREO },     // CUBEB_LAYOUT_DUAL_MONO
+    { 3, MASK_STEREO_LFE }, // CUBEB_LAYOUT_DUAL_MONO_LFE
+    { 1, MASK_MONO },       // CUBEB_LAYOUT_MONO
+    { 2, MASK_MONO_LFE },   // CUBEB_LAYOUT_MONO_LFE
+    { 2, MASK_STEREO },     // CUBEB_LAYOUT_STEREO
+    { 3, MASK_STEREO_LFE }, // CUBEB_LAYOUT_STEREO_LFE
+    { 3, MASK_3F },         // CUBEB_LAYOUT_3F
+    { 4, MASK_3F_LFE },     // CUBEB_LAYOUT_3F_LFE
+    { 3, MASK_2F1 },        // CUBEB_LAYOUT_2F1
+    { 4, MASK_2F1_LFE },    // CUBEB_LAYOUT_2F1_LFE
+    { 4, MASK_3F1 },        // CUBEB_LAYOUT_3F1
+    { 5, MASK_3F1_LFE },    // CUBEB_LAYOUT_3F1_LFE
+    { 4, MASK_2F2 },        // CUBEB_LAYOUT_2F2
+    { 5, MASK_2F2_LFE },    // CUBEB_LAYOUT_2F2_LFE
+    { 5, MASK_3F2 },        // CUBEB_LAYOUT_3F2
+    { 6, MASK_3F2_LFE },    // CUBEB_LAYOUT_3F2_LFE
+    { 7, MASK_3F3R_LFE },   // CUBEB_LAYOUT_3F3R_LFE
+    { 8, MASK_3F4_LFE },    // CUBEB_LAYOUT_3F4_LFE
+  };
+
+  // Use SMPTE default channel map if we can't get preferred layout
+  // or the channel counts of preferred layout is different from input's one
+  if (!InitPreferredChannelLayout()
+      || layoutInfo[sPreferredChannelLayout][0] != aChannels) {
+    AudioConfig::ChannelLayout smpteLayout(aChannels);
+    return smpteLayout.Map();
+  }
+
+  return layoutInfo[sPreferredChannelLayout][1];
+}
+
 void InitBrandName()
 {
   if (sBrandName) {
     return;
   }
   nsXPIDLString brandName;
   nsCOMPtr<nsIStringBundleService> stringBundleService =
     mozilla::services::GetStringBundleService();
@@ -354,16 +431,41 @@ uint32_t MaxNumberOfChannels()
       cubeb_get_max_channel_count(cubebContext,
                                   &maxNumberOfChannels) == CUBEB_OK) {
     return maxNumberOfChannels;
   }
 
   return 0;
 }
 
+cubeb_channel_layout ConvertChannelMapToCubebLayout(uint32_t aChannelMap)
+{
+  switch(aChannelMap) {
+    case MASK_MONO: return CUBEB_LAYOUT_MONO;
+    case MASK_MONO_LFE: return CUBEB_LAYOUT_MONO_LFE;
+    case MASK_STEREO: return CUBEB_LAYOUT_STEREO;
+    case MASK_STEREO_LFE: return CUBEB_LAYOUT_STEREO_LFE;
+    case MASK_3F: return CUBEB_LAYOUT_3F;
+    case MASK_3F_LFE: return CUBEB_LAYOUT_3F_LFE;
+    case MASK_2F1: return CUBEB_LAYOUT_2F1;
+    case MASK_2F1_LFE: return CUBEB_LAYOUT_2F1_LFE;
+    case MASK_3F1: return CUBEB_LAYOUT_3F1;
+    case MASK_3F1_LFE: return CUBEB_LAYOUT_3F1_LFE;
+    case MASK_2F2: return CUBEB_LAYOUT_2F2;
+    case MASK_2F2_LFE: return CUBEB_LAYOUT_2F2_LFE;
+    case MASK_3F2: return CUBEB_LAYOUT_3F2;
+    case MASK_3F2_LFE: return CUBEB_LAYOUT_3F2_LFE;
+    case MASK_3F3R_LFE: return CUBEB_LAYOUT_3F3R_LFE;
+    case MASK_3F4_LFE: return CUBEB_LAYOUT_3F4_LFE;
+    default:
+      NS_ERROR("The channel map is unsupported");
+      return CUBEB_LAYOUT_UNDEFINED;
+  }
+}
+
 #if defined(__ANDROID__) && defined(MOZ_B2G)
 cubeb_stream_type ConvertChannelToCubebType(dom::AudioChannel aChannel)
 {
   switch(aChannel) {
     case dom::AudioChannel::Normal:
       /* FALLTHROUGH */
     case dom::AudioChannel::Content:
       return CUBEB_STREAM_TYPE_MUSIC;
--- a/dom/media/CubebUtils.h
+++ b/dom/media/CubebUtils.h
@@ -25,26 +25,30 @@ void InitLibrary();
 void ShutdownLibrary();
 
 // Returns the maximum number of channels supported by the audio hardware.
 uint32_t MaxNumberOfChannels();
 
 // Get the sample rate the hardware/mixer runs at. Thread safe.
 uint32_t PreferredSampleRate();
 
+// Get the bit mask of the connected audio device's preferred layout.
+uint32_t PreferredChannelMap(uint32_t aChannels);
+
 void PrefChanged(const char* aPref, void* aClosure);
 double GetVolumeScale();
 bool GetFirstStream();
 cubeb* GetCubebContext();
 cubeb* GetCubebContextUnlocked();
 void ReportCubebStreamInitFailure(bool aIsFirstStream);
 void ReportCubebBackendUsed();
 uint32_t GetCubebPlaybackLatencyInMilliseconds();
 Maybe<uint32_t> GetCubebMSGLatencyInFrames();
 bool CubebLatencyPrefSet();
+cubeb_channel_layout ConvertChannelMapToCubebLayout(uint32_t aChannelMap);
 #if defined(__ANDROID__) && defined(MOZ_B2G)
 cubeb_stream_type ConvertChannelToCubebType(dom::AudioChannel aChannel);
 #endif
 void GetCurrentBackend(nsAString& aBackend);
 
 } // namespace CubebUtils
 } // namespace mozilla
 
--- a/dom/media/DOMMediaStream.cpp
+++ b/dom/media/DOMMediaStream.cpp
@@ -719,17 +719,17 @@ already_AddRefed<DOMMediaStream>
 DOMMediaStream::CloneInternal(TrackForwardingOption aForwarding)
 {
   RefPtr<DOMMediaStream> newStream =
     new DOMMediaStream(GetParentObject(), new ClonedStreamSourceGetter(this));
 
   LOG(LogLevel::Info, ("DOMMediaStream %p created clone %p, forwarding %s tracks",
                        this, newStream.get(),
                        aForwarding == TrackForwardingOption::ALL
-                         ? "all" : "current"));
+                       ? "all" : "current"));
 
   MOZ_RELEASE_ASSERT(mPlaybackStream);
   MOZ_RELEASE_ASSERT(mPlaybackStream->Graph());
   MediaStreamGraph* graph = mPlaybackStream->Graph();
 
   // We initiate the owned and playback streams first, since we need to create
   // all existing DOM tracks before we add the generic input port from
   // mInputStream to mOwnedStream (see AllocateInputPort wrt. destination
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -62,18 +62,18 @@ void GraphDriver::SetGraphTime(GraphDriv
   mIterationEnd = aLastSwitchNextIterationEnd;
 
   MOZ_ASSERT(!PreviousDriver());
   MOZ_ASSERT(aPreviousDriver);
 
   STREAM_LOG(LogLevel::Debug, ("Setting previous driver: %p (%s)",
                                aPreviousDriver,
                                aPreviousDriver->AsAudioCallbackDriver()
-                                 ? "AudioCallbackDriver"
-                                 : "SystemClockDriver"));
+                               ? "AudioCallbackDriver"
+                               : "SystemClockDriver"));
   SetPreviousDriver(aPreviousDriver);
 }
 
 void GraphDriver::SwitchAtNextIteration(GraphDriver* aNextDriver)
 {
   GraphImpl()->GetMonitor().AssertCurrentThreadOwns();
   LIFECYCLE_LOG("Switching to new driver: %p (%s)",
       aNextDriver, aNextDriver->AsAudioCallbackDriver() ?
--- a/dom/media/MediaCache.cpp
+++ b/dom/media/MediaCache.cpp
@@ -1521,18 +1521,18 @@ MediaCache::AllocateAndWriteBlock(MediaC
       if (!bo)
         return;
 
       bo->mStream = stream;
       bo->mStreamBlock = streamBlockIndex;
       bo->mLastUseTime = now;
       stream->mBlocks[streamBlockIndex] = blockIndex;
       if (streamBlockIndex*BLOCK_SIZE < stream->mStreamOffset) {
-        bo->mClass = aMode == MediaCacheStream::MODE_PLAYBACK
-          ? PLAYED_BLOCK : METADATA_BLOCK;
+        bo->mClass = aMode == MediaCacheStream::MODE_PLAYBACK ? PLAYED_BLOCK
+                                                              : METADATA_BLOCK;
         // This must be the most-recently-used block, since we
         // marked it as used now (which may be slightly bogus, but we'll
         // treat it as used for simplicity).
         GetListForBlock(bo)->AddFirstBlock(blockIndex);
         Verify();
       } else {
         // This may not be the latest readahead block, although it usually
         // will be. We may have to scan for the right place to insert
@@ -1643,17 +1643,18 @@ MediaCache::NoteBlockUsage(MediaCacheStr
   // The following check has to be <= because the stream offset has
   // not yet been updated for the data read from this block
   NS_ASSERTION(bo->mStreamBlock*BLOCK_SIZE <= aStreamOffset,
                "Using a block that's behind the read position?");
 
   GetListForBlock(bo)->RemoveBlock(aBlockIndex);
   bo->mClass =
     (aMode == MediaCacheStream::MODE_METADATA || bo->mClass == METADATA_BLOCK)
-    ? METADATA_BLOCK : PLAYED_BLOCK;
+    ? METADATA_BLOCK
+    : PLAYED_BLOCK;
   // Since this is just being used now, it can definitely be at the front
   // of mMetadataBlocks or mPlayedBlocks
   GetListForBlock(bo)->AddFirstBlock(aBlockIndex);
   bo->mLastUseTime = aNow;
   Verify();
 }
 
 void
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -265,22 +265,24 @@ private:
 };
 
 typedef AlignedBuffer<uint8_t> AlignedByteBuffer;
 typedef AlignedBuffer<float> AlignedFloatBuffer;
 typedef AlignedBuffer<int16_t> AlignedShortBuffer;
 typedef AlignedBuffer<AudioDataValue> AlignedAudioBuffer;
 
 // Container that holds media samples.
-class MediaData {
+class MediaData
+{
 public:
 
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaData)
 
-  enum Type {
+  enum Type
+  {
     AUDIO_DATA = 0,
     VIDEO_DATA,
     RAW_DATA,
     NULL_DATA
   };
 
   MediaData(Type aType,
             int64_t aOffset,
@@ -289,17 +291,18 @@ public:
             uint32_t aFrames)
     : mType(aType)
     , mOffset(aOffset)
     , mTime(aTimestamp)
     , mTimecode(aTimestamp)
     , mDuration(aDuration)
     , mFrames(aFrames)
     , mKeyframe(false)
-  {}
+  {
+  }
 
   // Type of contained data.
   const Type mType;
 
   // Approximate byte offset where this data was demuxed from its media.
   int64_t mOffset;
 
   // Start time of sample, in microseconds.
@@ -343,35 +346,38 @@ protected:
   MediaData(Type aType, uint32_t aFrames)
     : mType(aType)
     , mOffset(0)
     , mTime(0)
     , mTimecode(0)
     , mDuration(0)
     , mFrames(aFrames)
     , mKeyframe(false)
-  {}
+  {
+  }
 
   virtual ~MediaData() {}
 
 };
 
 // NullData is for decoder generating a sample which doesn't need to be
 // rendered.
-class NullData : public MediaData {
+class NullData : public MediaData
+{
 public:
   NullData(int64_t aOffset, int64_t aTime, int64_t aDuration)
     : MediaData(NULL_DATA, aOffset, aTime, aDuration, 0)
   {}
 
   static const Type sType = NULL_DATA;
 };
 
 // Holds chunk a decoded audio frames.
-class AudioData : public MediaData {
+class AudioData : public MediaData
+{
 public:
 
   AudioData(int64_t aOffset,
             int64_t aTime,
             int64_t aDuration,
             uint32_t aFrames,
             AlignedAudioBuffer&& aData,
             uint32_t aChannels,
@@ -406,57 +412,61 @@ public:
   const uint32_t mRate;
   // At least one of mAudioBuffer/mAudioData must be non-null.
   // mChannels channels, each with mFrames frames
   RefPtr<SharedBuffer> mAudioBuffer;
   // mFrames frames, each with mChannels values
   AlignedAudioBuffer mAudioData;
 
 protected:
-  ~AudioData() {}
+  ~AudioData() { }
 };
 
 namespace layers {
 class TextureClient;
 class PlanarYCbCrImage;
 } // namespace layers
 
 class VideoInfo;
 
 // Holds a decoded video frame, in YCbCr format. These are queued in the reader.
-class VideoData : public MediaData {
+class VideoData : public MediaData
+{
 public:
   typedef gfx::IntRect IntRect;
   typedef gfx::IntSize IntSize;
   typedef layers::ImageContainer ImageContainer;
   typedef layers::Image Image;
   typedef layers::PlanarYCbCrImage PlanarYCbCrImage;
 
   static const Type sType = VIDEO_DATA;
   static const char* sTypeName;
 
   // YCbCr data obtained from decoding the video. The index's are:
   //   0 = Y
   //   1 = Cb
   //   2 = Cr
-  struct YCbCrBuffer {
-    struct Plane {
+  struct YCbCrBuffer
+  {
+    struct Plane
+    {
       uint8_t* mData;
       uint32_t mWidth;
       uint32_t mHeight;
       uint32_t mStride;
       uint32_t mOffset;
       uint32_t mSkip;
     };
 
     Plane mPlanes[3];
     YUVColorSpace mYUVColorSpace = YUVColorSpace::BT601;
   };
 
-  class Listener {
+  class Listener
+  {
   public:
     virtual void OnSentToCompositor() = 0;
     virtual ~Listener() {}
   };
 
   // Constructs a VideoData object. If aImage is nullptr, creates a new Image
   // holding a copy of the YCbCr data passed in aBuffer. If aImage is not
   // nullptr, it's stored as the underlying video image and aBuffer is assumed
@@ -464,54 +474,58 @@ public:
   // specific number representing the timestamp of the frame of video data.
   // Returns nsnull if an error occurs. This may indicate that memory couldn't
   // be allocated to create the VideoData object, or it may indicate some
   // problem with the input data (e.g. negative stride).
 
 
   // Creates a new VideoData containing a deep copy of aBuffer. May use aContainer
   // to allocate an Image to hold the copied data.
-  static already_AddRefed<VideoData> CreateAndCopyData(const VideoInfo& aInfo,
-                                                       ImageContainer* aContainer,
-                                                       int64_t aOffset,
-                                                       int64_t aTime,
-                                                       int64_t aDuration,
-                                                       const YCbCrBuffer &aBuffer,
-                                                       bool aKeyframe,
-                                                       int64_t aTimecode,
-                                                       const IntRect& aPicture);
+  static already_AddRefed<VideoData> CreateAndCopyData(
+    const VideoInfo& aInfo,
+    ImageContainer* aContainer,
+    int64_t aOffset,
+    int64_t aTime,
+    int64_t aDuration,
+    const YCbCrBuffer &aBuffer,
+    bool aKeyframe,
+    int64_t aTimecode,
+    const IntRect& aPicture);
 
-  static already_AddRefed<VideoData> CreateAndCopyData(const VideoInfo& aInfo,
-                                                       ImageContainer* aContainer,
-                                                       int64_t aOffset,
-                                                       int64_t aTime,
-                                                       int64_t aDuration,
-                                                       const YCbCrBuffer &aBuffer,
-                                                       const YCbCrBuffer::Plane &aAlphaPlane,
-                                                       bool aKeyframe,
-                                                       int64_t aTimecode,
-                                                       const IntRect& aPicture);
+  static already_AddRefed<VideoData> CreateAndCopyData(
+    const VideoInfo& aInfo,
+    ImageContainer* aContainer,
+    int64_t aOffset,
+    int64_t aTime,
+    int64_t aDuration,
+    const YCbCrBuffer &aBuffer,
+    const YCbCrBuffer::Plane &aAlphaPlane,
+    bool aKeyframe,
+    int64_t aTimecode,
+    const IntRect& aPicture);
 
-  static already_AddRefed<VideoData> CreateAndCopyIntoTextureClient(const VideoInfo& aInfo,
-                                                                    int64_t aOffset,
-                                                                    int64_t aTime,
-                                                                    int64_t aDuration,
-                                                                    layers::TextureClient* aBuffer,
-                                                                    bool aKeyframe,
-                                                                    int64_t aTimecode,
-                                                                    const IntRect& aPicture);
+  static already_AddRefed<VideoData> CreateAndCopyIntoTextureClient(
+    const VideoInfo& aInfo,
+    int64_t aOffset,
+    int64_t aTime,
+    int64_t aDuration,
+    layers::TextureClient* aBuffer,
+    bool aKeyframe,
+    int64_t aTimecode,
+    const IntRect& aPicture);
 
-  static already_AddRefed<VideoData> CreateFromImage(const VideoInfo& aInfo,
-                                                     int64_t aOffset,
-                                                     int64_t aTime,
-                                                     int64_t aDuration,
-                                                     const RefPtr<Image>& aImage,
-                                                     bool aKeyframe,
-                                                     int64_t aTimecode,
-                                                     const IntRect& aPicture);
+  static already_AddRefed<VideoData> CreateFromImage(
+    const VideoInfo& aInfo,
+    int64_t aOffset,
+    int64_t aTime,
+    int64_t aDuration,
+    const RefPtr<Image>& aImage,
+    bool aKeyframe,
+    int64_t aTimecode,
+    const IntRect& aPicture);
 
   // Initialize PlanarYCbCrImage. Only When aCopyData is true,
   // video data is copied to PlanarYCbCrImage.
   static bool SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
                                   const VideoInfo& aInfo,
                                   const YCbCrBuffer &aBuffer,
                                   const IntRect& aPicture,
                                   bool aCopyData);
@@ -548,17 +562,17 @@ protected:
 
   bool mSentToCompositor;
   UniquePtr<Listener> mListener;
 };
 
 class CryptoTrack
 {
 public:
-  CryptoTrack() : mValid(false), mMode(0), mIVSize(0) {}
+  CryptoTrack() : mValid(false), mMode(0), mIVSize(0) { }
   bool mValid;
   int32_t mMode;
   int32_t mIVSize;
   nsTArray<uint8_t> mKeyId;
 };
 
 class CryptoSample : public CryptoTrack
 {
@@ -615,17 +629,18 @@ public:
 
 private:
   friend class MediaRawData;
   explicit MediaRawDataWriter(MediaRawData* aMediaRawData);
   bool EnsureSize(size_t aSize);
   MediaRawData* mTarget;
 };
 
-class MediaRawData : public MediaData {
+class MediaRawData : public MediaData
+{
 public:
   MediaRawData();
   MediaRawData(const uint8_t* aData, size_t aSize);
   MediaRawData(const uint8_t* aData, size_t aSize,
                const uint8_t* aAlphaData, size_t aAlphaSize);
 
   // Pointer to data or null if not-yet allocated
   const uint8_t* Data() const { return mBuffer.Data(); }
@@ -671,17 +686,17 @@ private:
   CryptoSample mCryptoInternal;
   MediaRawData(const MediaRawData&); // Not implemented
 };
 
   // MediaByteBuffer is a ref counted infallible TArray.
 class MediaByteBuffer : public nsTArray<uint8_t> {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaByteBuffer);
   MediaByteBuffer() = default;
-  explicit MediaByteBuffer(size_t aCapacity) : nsTArray<uint8_t>(aCapacity) {}
+  explicit MediaByteBuffer(size_t aCapacity) : nsTArray<uint8_t>(aCapacity) { }
 
 private:
-  ~MediaByteBuffer() {}
+  ~MediaByteBuffer() { }
 };
 
 } // namespace mozilla
 
 #endif // MediaData_h
--- a/dom/media/MediaDataDemuxer.h
+++ b/dom/media/MediaDataDemuxer.h
@@ -50,18 +50,18 @@ public:
   // 0 indicates that no such type is available.
   virtual uint32_t GetNumberTracks(TrackInfo::TrackType aType) const = 0;
 
   // Returns the MediaTrackDemuxer associated with aTrackNumber aType track.
   // aTrackNumber is not to be confused with the Track ID.
   // aTrackNumber must be constrained between  0 and  GetNumberTracks(aType) - 1
   // The actual Track ID is to be retrieved by calling
   // MediaTrackDemuxer::TrackInfo.
-  virtual already_AddRefed<MediaTrackDemuxer> GetTrackDemuxer(TrackInfo::TrackType aType,
-                                                              uint32_t aTrackNumber) = 0;
+  virtual already_AddRefed<MediaTrackDemuxer> GetTrackDemuxer(
+    TrackInfo::TrackType aType, uint32_t aTrackNumber) = 0;
 
   // Returns true if the underlying resource allows seeking.
   virtual bool IsSeekable() const = 0;
 
   // Returns true if the underlying resource can only seek within buffered
   // ranges.
   virtual bool IsSeekableOnlyInBufferedRanges() const { return false; }
 
@@ -96,37 +96,43 @@ protected:
   }
 };
 
 class MediaTrackDemuxer
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaTrackDemuxer)
 
-  class SamplesHolder {
+  class SamplesHolder
+  {
   public:
     NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SamplesHolder)
     nsTArray<RefPtr<MediaRawData>> mSamples;
   private:
-    ~SamplesHolder() {}
+    ~SamplesHolder() { }
   };
 
-  class SkipFailureHolder {
+  class SkipFailureHolder
+  {
   public:
     SkipFailureHolder(const MediaResult& aFailure, uint32_t aSkipped)
       : mFailure(aFailure)
       , mSkipped(aSkipped)
     {}
     MediaResult mFailure;
     uint32_t mSkipped;
   };
 
-  typedef MozPromise<media::TimeUnit, MediaResult, /* IsExclusive = */ true> SeekPromise;
-  typedef MozPromise<RefPtr<SamplesHolder>, MediaResult, /* IsExclusive = */ true> SamplesPromise;
-  typedef MozPromise<uint32_t, SkipFailureHolder, /* IsExclusive = */ true> SkipAccessPointPromise;
+  typedef MozPromise<media::TimeUnit, MediaResult, /* IsExclusive = */ true>
+    SeekPromise;
+  typedef MozPromise<RefPtr<SamplesHolder>, MediaResult,
+                     /* IsExclusive = */ true>
+    SamplesPromise;
+  typedef MozPromise<uint32_t, SkipFailureHolder, /* IsExclusive = */ true>
+    SkipAccessPointPromise;
 
   // Returns the TrackInfo (a.k.a Track Description) for this track.
   // The TrackInfo returned will be:
   // TrackInfo::kVideoTrack -> VideoInfo.
   // TrackInfo::kAudioTrack -> AudioInfo.
   // respectively.
   virtual UniquePtr<TrackInfo> GetInfo() const = 0;
 
@@ -202,14 +208,14 @@ public:
 
   // If the MediaTrackDemuxer and MediaDataDemuxer hold cross references.
   // BreakCycles must be overridden.
   virtual void BreakCycles()
   {
   }
 
 protected:
-  virtual ~MediaTrackDemuxer() {}
+  virtual ~MediaTrackDemuxer() { }
 };
 
 } // namespace mozilla
 
 #endif // MediaDataDemuxer_h
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -87,26 +87,28 @@ class MediaMemoryTracker : public nsIMem
 
   MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf);
 
   MediaMemoryTracker();
   void InitMemoryReporter();
 
   static StaticRefPtr<MediaMemoryTracker> sUniqueInstance;
 
-  static MediaMemoryTracker* UniqueInstance() {
+  static MediaMemoryTracker* UniqueInstance()
+  {
     if (!sUniqueInstance) {
       sUniqueInstance = new MediaMemoryTracker();
       sUniqueInstance->InitMemoryReporter();
     }
     return sUniqueInstance;
   }
 
   typedef nsTArray<MediaDecoder*> DecodersArray;
-  static DecodersArray& Decoders() {
+  static DecodersArray& Decoders()
+  {
     return UniqueInstance()->mDecoders;
   }
 
   DecodersArray mDecoders;
 
 public:
   static void AddMediaDecoder(MediaDecoder* aDecoder)
   {
@@ -436,34 +438,36 @@ MediaDecoder::MediaDecoder(MediaDecoderO
   //
   // Initialize watchers.
   //
 
   // mDuration
   mWatchManager.Watch(mStateMachineDuration, &MediaDecoder::DurationChanged);
 
   // mStateMachineIsShutdown
-  mWatchManager.Watch(mStateMachineIsShutdown, &MediaDecoder::ShutdownBitChanged);
+  mWatchManager.Watch(mStateMachineIsShutdown,
+                      &MediaDecoder::ShutdownBitChanged);
 
   // readyState
   mWatchManager.Watch(mPlayState, &MediaDecoder::UpdateReadyState);
   mWatchManager.Watch(mNextFrameStatus, &MediaDecoder::UpdateReadyState);
   // ReadyState computation depends on MediaDecoder::CanPlayThrough, which
   // depends on the download rate.
   mWatchManager.Watch(mBuffered, &MediaDecoder::UpdateReadyState);
 
   // mLogicalPosition
   mWatchManager.Watch(mCurrentPosition, &MediaDecoder::UpdateLogicalPosition);
   mWatchManager.Watch(mPlayState, &MediaDecoder::UpdateLogicalPosition);
   mWatchManager.Watch(mLogicallySeeking, &MediaDecoder::UpdateLogicalPosition);
 
   // mIgnoreProgressData
   mWatchManager.Watch(mLogicallySeeking, &MediaDecoder::SeekingChanged);
 
-  mWatchManager.Watch(mIsAudioDataAudible, &MediaDecoder::NotifyAudibleStateChanged);
+  mWatchManager.Watch(mIsAudioDataAudible,
+                      &MediaDecoder::NotifyAudibleStateChanged);
 
   MediaShutdownManager::Instance().Register(this);
 }
 
 #undef INIT_MIRROR
 #undef INIT_CANONICAL
 
 void
@@ -643,18 +647,19 @@ MediaDecoder::SetStateMachineParameters(
   MOZ_ASSERT(NS_IsMainThread());
   if (mPlaybackRate != 1 && mPlaybackRate != 0) {
     mDecoderStateMachine->DispatchSetPlaybackRate(mPlaybackRate);
   }
   mTimedMetadataListener = mDecoderStateMachine->TimedMetadataEvent().Connect(
     mAbstractMainThread, this, &MediaDecoder::OnMetadataUpdate);
   mMetadataLoadedListener = mDecoderStateMachine->MetadataLoadedEvent().Connect(
     mAbstractMainThread, this, &MediaDecoder::MetadataLoaded);
-  mFirstFrameLoadedListener = mDecoderStateMachine->FirstFrameLoadedEvent().Connect(
-    mAbstractMainThread, this, &MediaDecoder::FirstFrameLoaded);
+  mFirstFrameLoadedListener =
+    mDecoderStateMachine->FirstFrameLoadedEvent().Connect(
+      mAbstractMainThread, this, &MediaDecoder::FirstFrameLoaded);
 
   mOnPlaybackEvent = mDecoderStateMachine->OnPlaybackEvent().Connect(
     mAbstractMainThread, this, &MediaDecoder::OnPlaybackEvent);
   mOnPlaybackErrorEvent = mDecoderStateMachine->OnPlaybackErrorEvent().Connect(
     mAbstractMainThread, this, &MediaDecoder::OnPlaybackErrorEvent);
   mOnDecoderDoctorEvent = mDecoderStateMachine->OnDecoderDoctorEvent().Connect(
     mAbstractMainThread, this, &MediaDecoder::OnDecoderDoctorEvent);
   mOnMediaNotSeekable = mDecoderStateMachine->OnMediaNotSeekable().Connect(
@@ -690,17 +695,18 @@ MediaDecoder::Play()
     return NS_OK;
   }
 
   ChangeState(PLAY_STATE_PLAYING);
   return NS_OK;
 }
 
 nsresult
-MediaDecoder::Seek(double aTime, SeekTarget::Type aSeekType, dom::Promise* aPromise /*=nullptr*/)
+MediaDecoder::Seek(double aTime, SeekTarget::Type aSeekType,
+                   dom::Promise* aPromise /*=nullptr*/)
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
 
   MOZ_ASSERT(aTime >= 0.0, "Cannot seek to a negative value.");
 
   int64_t timeUsecs = TimeUnit::FromSeconds(aTime).ToMicroseconds();
 
@@ -808,17 +814,18 @@ MediaDecoder::MetadataLoaded(nsAutoPtr<M
   mMediaSeekableOnlyInBufferedRanges = aInfo->mMediaSeekableOnlyInBufferedRanges;
   mInfo = aInfo.forget();
   ConstructMediaTracks();
 
   // Make sure the element and the frame (if any) are told about
   // our new size.
   if (aEventVisibility != MediaDecoderEventVisibility::Suppressed) {
     mFiredMetadataLoaded = true;
-    GetOwner()->MetadataLoaded(mInfo, nsAutoPtr<const MetadataTags>(aTags.forget()));
+    GetOwner()->MetadataLoaded(mInfo,
+                               nsAutoPtr<const MetadataTags>(aTags.forget()));
   }
   // Invalidate() will end up calling GetOwner()->UpdateMediaSize with the last
   // dimensions retrieved from the video frame container. The video frame
   // container contains more up to date dimensions than aInfo.
   // So we call Invalidate() after calling GetOwner()->MetadataLoaded to ensure
   // the media element has the latest dimensions.
   Invalidate();
 
@@ -833,25 +840,27 @@ MediaDecoder::EnsureTelemetryReported()
   if (mTelemetryReported || !mInfo) {
     // Note: sometimes we get multiple MetadataLoaded calls (for example
     // for chained ogg). So we ensure we don't report duplicate results for
     // these resources.
     return;
   }
 
   nsTArray<nsCString> codecs;
-  if (mInfo->HasAudio() && !mInfo->mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
+  if (mInfo->HasAudio()
+      && !mInfo->mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
     codecs.AppendElement(mInfo->mAudio.GetAsAudioInfo()->mMimeType);
   }
-  if (mInfo->HasVideo() && !mInfo->mVideo.GetAsVideoInfo()->mMimeType.IsEmpty()) {
+  if (mInfo->HasVideo()
+      && !mInfo->mVideo.GetAsVideoInfo()->mMimeType.IsEmpty()) {
     codecs.AppendElement(mInfo->mVideo.GetAsVideoInfo()->mMimeType);
   }
   if (codecs.IsEmpty()) {
-    codecs.AppendElement(nsPrintfCString("resource; %s",
-                                         mResource->GetContentType().OriginalString().Data()));
+    codecs.AppendElement(nsPrintfCString(
+      "resource; %s", mResource->GetContentType().OriginalString().Data()));
   }
   for (const nsCString& codec : codecs) {
     DECODER_LOG("Telemetry MEDIA_CODEC_USED= '%s'", codec.get());
     Telemetry::Accumulate(Telemetry::ID::MEDIA_CODEC_USED, codec);
   }
 
   mTelemetryReported = true;
 }
@@ -865,19 +874,20 @@ MediaDecoder::PlayStateStr()
 
 void
 MediaDecoder::FirstFrameLoaded(nsAutoPtr<MediaInfo> aInfo,
                                MediaDecoderEventVisibility aEventVisibility)
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
 
-  DECODER_LOG("FirstFrameLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d mPlayState=%s",
-              aInfo->mAudio.mChannels, aInfo->mAudio.mRate,
-              aInfo->HasAudio(), aInfo->HasVideo(), PlayStateStr());
+  DECODER_LOG("FirstFrameLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d "
+              "mPlayState=%s",
+              aInfo->mAudio.mChannels, aInfo->mAudio.mRate, aInfo->HasAudio(),
+              aInfo->HasVideo(), PlayStateStr());
 
   mInfo = aInfo.forget();
 
   Invalidate();
 
   // This can run cache callbacks.
   mResource->EnsureCacheUpToDate();
 
@@ -957,18 +967,20 @@ public:
 private:
   WeakPtr<HTMLMediaElement> mElement;
 };
 
 already_AddRefed<GMPCrashHelper>
 MediaDecoder::GetCrashHelper()
 {
   MOZ_ASSERT(NS_IsMainThread());
-  return GetOwner()->GetMediaElement() ?
-    MakeAndAddRef<MediaElementGMPCrashHelper>(GetOwner()->GetMediaElement()) : nullptr;
+  return GetOwner()->GetMediaElement()
+         ? MakeAndAddRef<MediaElementGMPCrashHelper>(
+             GetOwner()->GetMediaElement())
+         : nullptr;
 }
 
 bool
 MediaDecoder::IsEnded() const
 {
   MOZ_ASSERT(NS_IsMainThread());
   return mPlayState == PLAY_STATE_ENDED;
 }
@@ -994,48 +1006,50 @@ MediaDecoder::PlaybackEnded()
   }
 
   DECODER_LOG("MediaDecoder::PlaybackEnded");
 
   ChangeState(PLAY_STATE_ENDED);
   InvalidateWithFlags(VideoFrameContainer::INVALIDATE_FORCE);
   GetOwner()->PlaybackEnded();
 
-  // This must be called after |GetOwner()->PlaybackEnded()| call above, in order
-  // to fire the required durationchange.
+  // This must be called after |GetOwner()->PlaybackEnded()| call above, in
+  // order to fire the required durationchange.
   if (IsInfinite()) {
     SetInfinite(false);
   }
 }
 
 MediaStatistics
 MediaDecoder::GetStatistics()
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mResource);
 
   MediaStatistics result;
-  result.mDownloadRate = mResource->GetDownloadRate(&result.mDownloadRateReliable);
+  result.mDownloadRate =
+    mResource->GetDownloadRate(&result.mDownloadRateReliable);
   result.mDownloadPosition = mResource->GetCachedDataEnd(mDecoderPosition);
   result.mTotalBytes = mResource->GetLength();
   result.mPlaybackRate = mPlaybackBytesPerSecond;
   result.mPlaybackRateReliable = mPlaybackRateReliable;
   result.mDecoderPosition = mDecoderPosition;
   result.mPlaybackPosition = mPlaybackPosition;
   return result;
 }
 
 void
 MediaDecoder::ComputePlaybackRate()
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mResource);
 
   int64_t length = mResource->GetLength();
-  if (!IsNaN(mDuration) && !mozilla::IsInfinite<double>(mDuration) && length >= 0) {
+  if (!IsNaN(mDuration) && !mozilla::IsInfinite<double>(mDuration)
+      && length >= 0) {
     mPlaybackRateReliable = true;
     mPlaybackBytesPerSecond = length / mDuration;
     return;
   }
 
   bool reliable = false;
   mPlaybackBytesPerSecond = mPlaybackStatistics->GetRateAtLastStop(&reliable);
   mPlaybackRateReliable = reliable;
@@ -1194,17 +1208,18 @@ MediaDecoder::ChangeState(PlayState aSta
 }
 
 void
 MediaDecoder::UpdateLogicalPositionInternal()
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
 
-  double currentPosition = static_cast<double>(CurrentPosition()) / static_cast<double>(USECS_PER_S);
+  double currentPosition =
+    static_cast<double>(CurrentPosition()) / static_cast<double>(USECS_PER_S);
   if (mPlayState == PLAY_STATE_ENDED) {
     currentPosition = std::max(currentPosition, mDuration);
   }
   bool logicalPositionChanged = mLogicalPosition != currentPosition;
   mLogicalPosition = currentPosition;
 
   // Invalidate the frame so any video data is displayed.
   // Do this before the timeupdate event so that if that
@@ -1238,18 +1253,19 @@ MediaDecoder::DurationChanged()
 
   DECODER_LOG("Duration changed to %f", mDuration);
 
   // Duration has changed so we should recompute playback rate
   UpdatePlaybackRate();
 
   // See https://www.w3.org/Bugs/Public/show_bug.cgi?id=28822 for a discussion
   // of whether we should fire durationchange on explicit infinity.
-  if (mFiredMetadataLoaded &&
-      (!mozilla::IsInfinite<double>(mDuration) || mExplicitDuration.Ref().isSome())) {
+  if (mFiredMetadataLoaded
+      && (!mozilla::IsInfinite<double>(mDuration)
+          || mExplicitDuration.Ref().isSome())) {
     GetOwner()->DispatchAsyncEvent(NS_LITERAL_STRING("durationchange"));
   }
 
   if (CurrentPosition() > TimeUnit::FromSeconds(mDuration).ToMicroseconds()) {
     Seek(mDuration, SeekTarget::Accurate);
   }
 }
 
@@ -1277,18 +1293,20 @@ MediaDecoder::UpdateEstimatedMediaDurati
   if (mPlayState <= PLAY_STATE_LOADING) {
     return;
   }
 
   // The duration is only changed if its significantly different than the
   // the current estimate, as the incoming duration is an estimate and so
   // often is unstable as more data is read and the estimate is updated.
   // Can result in a durationchangeevent. aDuration is in microseconds.
-  if (mEstimatedDuration.Ref().isSome() &&
-      mozilla::Abs(mEstimatedDuration.Ref().ref().ToMicroseconds() - aDuration) < ESTIMATED_DURATION_FUZZ_FACTOR_USECS) {
+  if (mEstimatedDuration.Ref().isSome()
+      && mozilla::Abs(mEstimatedDuration.Ref().ref().ToMicroseconds()
+                      - aDuration)
+         < ESTIMATED_DURATION_FUZZ_FACTOR_USECS) {
     return;
   }
 
   mEstimatedDuration = Some(TimeUnit::FromMicroseconds(aDuration));
 }
 
 bool
 MediaDecoder::IsTransportSeekable()
@@ -1322,28 +1340,29 @@ MediaDecoder::GetSeekable()
     return GetBuffered();
   } else if (!IsMediaSeekable()) {
     return media::TimeIntervals();
   } else if (!IsTransportSeekable()) {
     return GetBuffered();
   } else {
     return media::TimeIntervals(
       media::TimeInterval(media::TimeUnit::FromMicroseconds(0),
-                          IsInfinite() ?
-                            media::TimeUnit::FromInfinity() :
-                            media::TimeUnit::FromSeconds(GetDuration())));
+                          IsInfinite()
+                          ? media::TimeUnit::FromInfinity()
+                          : media::TimeUnit::FromSeconds(GetDuration())));
   }
 }
 
 void
 MediaDecoder::SetFragmentEndTime(double aTime)
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (mDecoderStateMachine) {
-    mDecoderStateMachine->DispatchSetFragmentEndTime(static_cast<int64_t>(aTime * USECS_PER_S));
+    mDecoderStateMachine->DispatchSetFragmentEndTime(
+      static_cast<int64_t>(aTime * USECS_PER_S));
   }
 }
 
 void
 MediaDecoder::Suspend()
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (mResource) {
@@ -1438,17 +1457,18 @@ MediaDecoder::SetStateMachine(MediaDecod
   } else {
     DisconnectMirrors();
   }
 }
 
 ImageContainer*
 MediaDecoder::GetImageContainer()
 {
-  return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer() : nullptr;
+  return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer()
+                              : nullptr;
 }
 
 void
 MediaDecoder::InvalidateWithFlags(uint32_t aFlags)
 {
   if (mVideoFrameContainer) {
     mVideoFrameContainer->InvalidateWithFlags(aFlags);
   }
@@ -1460,56 +1480,63 @@ MediaDecoder::Invalidate()
   if (mVideoFrameContainer) {
     mVideoFrameContainer->Invalidate();
   }
 }
 
 // Constructs the time ranges representing what segments of the media
 // are buffered and playable.
 media::TimeIntervals
-MediaDecoder::GetBuffered() {
+MediaDecoder::GetBuffered()
+{
   MOZ_ASSERT(NS_IsMainThread());
   return mBuffered.Ref();
 }
 
 size_t
-MediaDecoder::SizeOfVideoQueue() {
+MediaDecoder::SizeOfVideoQueue()
+{
   MOZ_ASSERT(NS_IsMainThread());
   if (mDecoderStateMachine) {
     return mDecoderStateMachine->SizeOfVideoQueue();
   }
   return 0;
 }
 
 size_t
-MediaDecoder::SizeOfAudioQueue() {
+MediaDecoder::SizeOfAudioQueue()
+{
   MOZ_ASSERT(NS_IsMainThread());
   if (mDecoderStateMachine) {
     return mDecoderStateMachine->SizeOfAudioQueue();
   }
   return 0;
 }
 
-void MediaDecoder::AddSizeOfResources(ResourceSizes* aSizes) {
+void MediaDecoder::AddSizeOfResources(ResourceSizes* aSizes)
+{
   MOZ_ASSERT(NS_IsMainThread());
   if (GetResource()) {
-    aSizes->mByteSize += GetResource()->SizeOfIncludingThis(aSizes->mMallocSizeOf);
+    aSizes->mByteSize +=
+      GetResource()->SizeOfIncludingThis(aSizes->mMallocSizeOf);
   }
 }
 
 void
-MediaDecoder::NotifyDataArrived() {
+MediaDecoder::NotifyDataArrived()
+{
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
   mDataArrivedEvent.Notify();
 }
 
 // Provide access to the state machine object
 MediaDecoderStateMachine*
-MediaDecoder::GetStateMachine() const {
+MediaDecoder::GetStateMachine() const
+{
   MOZ_ASSERT(NS_IsMainThread());
   return mDecoderStateMachine;
 }
 
 void
 MediaDecoder::FireTimeUpdate()
 {
   MOZ_ASSERT(NS_IsMainThread());
@@ -1587,38 +1614,37 @@ MediaDecoder::IsWebMEnabled()
 {
   return Preferences::GetBool("media.webm.enabled");
 }
 
 #ifdef MOZ_ANDROID_OMX
 bool
 MediaDecoder::IsAndroidMediaPluginEnabled()
 {
-  return AndroidBridge::Bridge() &&
-         AndroidBridge::Bridge()->GetAPIVersion() < 16 &&
-         Preferences::GetBool("media.plugins.enabled");
+  return AndroidBridge::Bridge()
+         && AndroidBridge::Bridge()->GetAPIVersion() < 16
+         && Preferences::GetBool("media.plugins.enabled");
 }
 #endif
 
 NS_IMETHODIMP
 MediaMemoryTracker::CollectReports(nsIHandleReportCallback* aHandleReport,
                                    nsISupports* aData, bool aAnonymize)
 {
-  int64_t video = 0, audio = 0;
-
   // NB: When resourceSizes' ref count goes to 0 the promise will report the
   //     resources memory and finish the asynchronous memory report.
   RefPtr<MediaDecoder::ResourceSizes> resourceSizes =
       new MediaDecoder::ResourceSizes(MediaMemoryTracker::MallocSizeOf);
 
   nsCOMPtr<nsIHandleReportCallback> handleReport = aHandleReport;
   nsCOMPtr<nsISupports> data = aData;
 
   resourceSizes->Promise()->Then(
-      // Non-DocGroup version of AbstractThread::MainThread is fine for memory report.
+      // Non-DocGroup version of AbstractThread::MainThread is fine for memory
+      // report.
       AbstractThread::MainThread(),
       __func__,
       [handleReport, data] (size_t size) {
         handleReport->Callback(
             EmptyCString(), NS_LITERAL_CSTRING("explicit/media/resources"),
             KIND_HEAP, UNITS_BYTES, size,
             NS_LITERAL_CSTRING("Memory used by media resources including "
                                "streaming buffers, caches, etc."),
@@ -1628,16 +1654,18 @@ MediaMemoryTracker::CollectReports(nsIHa
           do_GetService("@mozilla.org/memory-reporter-manager;1");
 
         if (imgr) {
           imgr->EndReport();
         }
       },
       [] (size_t) { /* unused reject function */ });
 
+  int64_t video = 0;
+  int64_t audio = 0;
   DecodersArray& decoders = Decoders();
   for (size_t i = 0; i < decoders.Length(); ++i) {
     MediaDecoder* decoder = decoders[i];
     video += decoder->SizeOfVideoQueue();
     audio += decoder->SizeOfAudioQueue();
     decoder->AddSizeOfResources(resourceSizes);
   }
 
@@ -1726,21 +1754,23 @@ MediaDecoder::RemoveMediaTracks()
 MediaDecoderOwner::NextFrameStatus
 MediaDecoder::NextFrameBufferedStatus()
 {
   MOZ_ASSERT(NS_IsMainThread());
   // Next frame hasn't been decoded yet.
   // Use the buffered range to consider if we have the next frame available.
   media::TimeUnit currentPosition =
     media::TimeUnit::FromMicroseconds(CurrentPosition());
-  media::TimeInterval interval(currentPosition,
-                               currentPosition + media::TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
+  media::TimeInterval interval(
+    currentPosition,
+    currentPosition
+    + media::TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
   return GetBuffered().Contains(interval)
-    ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
-    : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
+         ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
+         : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
 }
 
 nsCString
 MediaDecoder::GetDebugInfo()
 {
   return nsPrintfCString(
     "channels=%u rate=%u hasAudio=%d hasVideo=%d mPlayState=%s mdsm=%p",
     mInfo ? mInfo->mAudio.mChannels : 0, mInfo ? mInfo->mAudio.mRate : 0,
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -57,17 +57,18 @@ enum class MediaEventType : int8_t;
 #undef GetCurrentTime
 #endif
 
 class MediaDecoder : public AbstractMediaDecoder
 {
 public:
   // Used to register with MediaResource to receive notifications which will
   // be forwarded to MediaDecoder.
-  class ResourceCallback : public MediaResourceCallback {
+  class ResourceCallback : public MediaResourceCallback
+  {
     // Throttle calls to MediaDecoder::NotifyDataArrived()
     // to be at most once per 500ms.
     static const uint32_t sDelay = 500;
 
   public:
     explicit ResourceCallback(AbstractThread* aMainThread);
     // Start to receive notifications from ResourceCallback.
     void Connect(MediaDecoder* aDecoder);
@@ -91,22 +92,25 @@ public:
 
     // The decoder to send notifications. Main-thread only.
     MediaDecoder* mDecoder = nullptr;
     nsCOMPtr<nsITimer> mTimer;
     bool mTimerArmed = false;
     const RefPtr<AbstractThread> mAbstractMainThread;
   };
 
-  typedef MozPromise<bool /* aIgnored */, bool /* aIgnored */, /* IsExclusive = */ true> SeekPromise;
+  typedef MozPromise<bool /* aIgnored */, bool /* aIgnored */,
+                     /* IsExclusive = */ true>
+    SeekPromise;
 
   NS_DECL_THREADSAFE_ISUPPORTS
 
   // Enumeration for the valid play states (see mPlayState)
-  enum PlayState {
+  enum PlayState
+  {
     PLAY_STATE_START,
     PLAY_STATE_LOADING,
     PLAY_STATE_PAUSED,
     PLAY_STATE_PLAYING,
     PLAY_STATE_ENDED,
     PLAY_STATE_SHUTDOWN
   };
 
@@ -202,17 +206,18 @@ public:
   // is used as the input for each ProcessedMediaStream created by calls to
   // captureStream(UntilEnded). Seeking creates a new source stream, as does
   // replaying after the input as ended. In the latter case, the new source is
   // not connected to streams created by captureStreamUntilEnded.
 
   // Add an output stream. All decoder output will be sent to the stream.
   // The stream is initially blocked. The decoder is responsible for unblocking
   // it while it is playing back.
-  virtual void AddOutputStream(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
+  virtual void AddOutputStream(ProcessedMediaStream* aStream,
+                               bool aFinishWhenEnded);
   // Remove an output stream added with AddOutputStream.
   virtual void RemoveOutputStream(MediaStream* aStream);
 
   // Return the duration of the video in seconds.
   virtual double GetDuration();
 
   // Return true if the stream is infinite (see SetInfinite).
   bool IsInfinite() const;
@@ -425,17 +430,19 @@ private:
 
   MediaDecoderOwner* GetOwner() const override;
 
   AbstractThread* AbstractMainThread() const final override
   {
     return mAbstractMainThread;
   }
 
-  typedef MozPromise<RefPtr<CDMProxy>, bool /* aIgnored */, /* IsExclusive = */ true> CDMProxyPromise;
+  typedef MozPromise<RefPtr<CDMProxy>, bool /* aIgnored */,
+                     /* IsExclusive = */ true>
+    CDMProxyPromise;
 
   // Resolved when a CDMProxy is available and the capabilities are known or
   // rejected when this decoder is about to shut down.
   RefPtr<CDMProxyPromise> RequestCDMProxy() const;
 
   void SetCDMProxy(CDMProxy* aProxy);
 
   void EnsureTelemetryReported();
@@ -471,22 +478,25 @@ private:
 
   void UpdateReadyState()
   {
     MOZ_ASSERT(NS_IsMainThread());
     MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
     GetOwner()->UpdateReadyState();
   }
 
-  virtual MediaDecoderOwner::NextFrameStatus NextFrameStatus() { return mNextFrameStatus; }
+  virtual MediaDecoderOwner::NextFrameStatus NextFrameStatus()
+  {
+    return mNextFrameStatus;
+  }
   virtual MediaDecoderOwner::NextFrameStatus NextFrameBufferedStatus();
 
   // Returns a string describing the state of the media player internal
   // data. Used for debugging purposes.
-  virtual void GetMozDebugReaderData(nsACString& aString) {}
+  virtual void GetMozDebugReaderData(nsACString& aString) { }
 
   virtual void DumpDebugInfo();
 
   using DebugInfoPromise = MozPromise<nsCString, bool, true>;
   RefPtr<DebugInfoPromise> RequestDebugInfo();
 
 protected:
   virtual ~MediaDecoder();
@@ -782,55 +792,56 @@ protected:
   // back again.
   Canonical<int64_t> mDecoderPosition;
 
   // True if the decoder is visible.
   Canonical<bool> mIsVisible;
 
 public:
   AbstractCanonical<media::NullableTimeUnit>* CanonicalDurationOrNull() override;
-  AbstractCanonical<double>* CanonicalVolume() {
-    return &mVolume;
-  }
-  AbstractCanonical<bool>* CanonicalPreservesPitch() {
+  AbstractCanonical<double>* CanonicalVolume() { return &mVolume; }
+  AbstractCanonical<bool>* CanonicalPreservesPitch()
+  {
     return &mPreservesPitch;
   }
-  AbstractCanonical<media::NullableTimeUnit>* CanonicalEstimatedDuration() {
+  AbstractCanonical<media::NullableTimeUnit>* CanonicalEstimatedDuration()
+  {
     return &mEstimatedDuration;
   }
-  AbstractCanonical<Maybe<double>>* CanonicalExplicitDuration() {
+  AbstractCanonical<Maybe<double>>* CanonicalExplicitDuration()
+  {
     return &mExplicitDuration;
   }
-  AbstractCanonical<PlayState>* CanonicalPlayState() {
-    return &mPlayState;
-  }
-  AbstractCanonical<PlayState>* CanonicalNextPlayState() {
-    return &mNextState;
-  }
-  AbstractCanonical<bool>* CanonicalLogicallySeeking() {
+  AbstractCanonical<PlayState>* CanonicalPlayState() { return &mPlayState; }
+  AbstractCanonical<PlayState>* CanonicalNextPlayState() { return &mNextState; }
+  AbstractCanonical<bool>* CanonicalLogicallySeeking()
+  {
     return &mLogicallySeeking;
   }
-  AbstractCanonical<bool>* CanonicalSameOriginMedia() {
+  AbstractCanonical<bool>* CanonicalSameOriginMedia()
+  {
     return &mSameOriginMedia;
   }
-  AbstractCanonical<PrincipalHandle>* CanonicalMediaPrincipalHandle() {
+  AbstractCanonical<PrincipalHandle>* CanonicalMediaPrincipalHandle()
+  {
     return &mMediaPrincipalHandle;
   }
-  AbstractCanonical<double>* CanonicalPlaybackBytesPerSecond() {
+  AbstractCanonical<double>* CanonicalPlaybackBytesPerSecond()
+  {
     return &mPlaybackBytesPerSecond;
   }
-  AbstractCanonical<bool>* CanonicalPlaybackRateReliable() {
+  AbstractCanonical<bool>* CanonicalPlaybackRateReliable()
+  {
     return &mPlaybackRateReliable;
   }
-  AbstractCanonical<int64_t>* CanonicalDecoderPosition() {
+  AbstractCanonical<int64_t>* CanonicalDecoderPosition()
+  {
     return &mDecoderPosition;
   }
-  AbstractCanonical<bool>* CanonicalIsVisible() {
-    return &mIsVisible;
-  }
+  AbstractCanonical<bool>* CanonicalIsVisible() { return &mIsVisible; }
 
 private:
   // Notify owner when the audible state changed
   void NotifyAudibleStateChanged();
 
   /* Functions called by ResourceCallback */
 
   // A media stream is assumed to be infinite if the metadata doesn't
--- a/dom/media/MediaDecoderReader.h
+++ b/dom/media/MediaDecoderReader.h
@@ -24,57 +24,61 @@
 
 namespace mozilla {
 
 class CDMProxy;
 class MediaDecoderReader;
 
 struct WaitForDataRejectValue
 {
-  enum Reason {
+  enum Reason
+  {
     SHUTDOWN,
     CANCELED
   };
 
   WaitForDataRejectValue(MediaData::Type aType, Reason aReason)
-    :mType(aType), mReason(aReason) {}
+    :mType(aType), mReason(aReason)
+  {
+  }
   MediaData::Type mType;
   Reason mReason;
 };
 
 struct SeekRejectValue
 {
   MOZ_IMPLICIT SeekRejectValue(const MediaResult& aError)
-    : mType(MediaData::NULL_DATA), mError(aError) {}
+    : mType(MediaData::NULL_DATA), mError(aError) { }
   MOZ_IMPLICIT SeekRejectValue(nsresult aResult)
-    : mType(MediaData::NULL_DATA), mError(aResult) {}
+    : mType(MediaData::NULL_DATA), mError(aResult) { }
   SeekRejectValue(MediaData::Type aType, const MediaResult& aError)
-    : mType(aType), mError(aError) {}
+    : mType(aType), mError(aError) { }
   MediaData::Type mType;
   MediaResult mError;
 };
 
 class MetadataHolder
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MetadataHolder)
   MediaInfo mInfo;
   nsAutoPtr<MetadataTags> mTags;
 
 private:
-  virtual ~MetadataHolder() {}
+  virtual ~MetadataHolder() { }
 };
 
 // Encapsulates the decoding and reading of media data. Reading can either
 // synchronous and done on the calling "decode" thread, or asynchronous and
 // performed on a background thread, with the result being returned by
 // callback.
 // Unless otherwise specified, methods and fields of this class can only
 // be accessed on the decode task queue.
-class MediaDecoderReader {
+class MediaDecoderReader
+{
   friend class ReRequestVideoWithSkipTask;
   friend class ReRequestAudioTask;
 
   static const bool IsExclusive = true;
 
 public:
   using TrackSet = EnumSet<TrackInfo::TrackType>;
 
@@ -99,17 +103,17 @@ public:
 
   // Initializes the reader, returns NS_OK on success, or NS_ERROR_FAILURE
   // on failure.
   nsresult Init();
 
   // Called by MDSM in dormant state to release resources allocated by this
   // reader. The reader can resume decoding by calling Seek() to a specific
   // position.
-  virtual void ReleaseResources() {}
+  virtual void ReleaseResources() { }
 
   // Destroys the decoding state. The reader cannot be made usable again.
   // This is different from ReleaseMediaResources() as it is irreversable,
   // whereas ReleaseMediaResources() is.  Must be called on the decode
   // thread.
   virtual RefPtr<ShutdownPromise> Shutdown();
 
   virtual bool OnTaskQueue() const
@@ -123,18 +127,19 @@ public:
   // decoder must not call any of the callbacks for outstanding
   // Request*Data() calls after this is called. Calls to Request*Data()
   // made after this should be processed as usual.
   //
   // Normally this call preceedes a Seek() call, or shutdown.
   //
   // aParam is a set of TrackInfo::TrackType enums specifying which
   // queues need to be reset, defaulting to both audio and video tracks.
-  virtual nsresult ResetDecode(TrackSet aTracks = TrackSet(TrackInfo::kAudioTrack,
-                                                           TrackInfo::kVideoTrack));
+  virtual nsresult ResetDecode(
+    TrackSet aTracks = TrackSet(TrackInfo::kAudioTrack,
+                                TrackInfo::kVideoTrack));
 
   // Requests one audio sample from the reader.
   //
   // The decode should be performed asynchronously, and the promise should
   // be resolved when it is complete.
   virtual RefPtr<MediaDataPromise> RequestAudioData();
 
   // Requests one video sample from the reader.
@@ -225,33 +230,45 @@ public:
   // decoding.
   virtual bool VideoIsHardwareAccelerated() const { return false; }
 
   TimedMetadataEventSource& TimedMetadataEvent()
   {
     return mTimedMetadataEvent;
   }
 
-  // Notified by the OggReader during playback when chained ogg is detected.
+  // Notified by the OggDemuxer during playback when chained ogg is detected.
   MediaEventSource<void>& OnMediaNotSeekable() { return mOnMediaNotSeekable; }
 
   TimedMetadataEventProducer& TimedMetadataProducer()
   {
     return mTimedMetadataEvent;
   }
 
   MediaEventProducer<void>& MediaNotSeekableProducer()
   {
     return mOnMediaNotSeekable;
   }
 
+  // Notified if the reader can't decode a sample due to a missing decryption
+  // key.
+  MediaEventSource<TrackInfo::TrackType>& OnTrackWaitingForKey()
+  {
+    return mOnTrackWaitingForKey;
+  }
+
+  MediaEventProducer<TrackInfo::TrackType>& OnTrackWaitingForKeyProducer()
+  {
+    return mOnTrackWaitingForKey;
+  }
+
   // Switch the video decoder to BlankDecoderModule. It might takes effective
   // since a few samples later depends on how much demuxed samples are already
   // queued in the original video decoder.
-  virtual void SetVideoBlankDecode(bool aIsBlankDecode) {}
+  virtual void SetVideoBlankDecode(bool aIsBlankDecode) { }
 
 protected:
   virtual ~MediaDecoderReader();
 
   // Recomputes mBuffered.
   virtual void UpdateBuffered();
 
   RefPtr<MediaDataPromise> DecodeToFirstVideoData();
@@ -301,16 +318,19 @@ protected:
   bool mShutdown;
 
   // Used to send TimedMetadata to the listener.
   TimedMetadataEventProducer mTimedMetadataEvent;
 
   // Notify if this media is not seekable.
   MediaEventProducer<void> mOnMediaNotSeekable;
 
+  // Notify if we are waiting for a decryption key.
+  MediaEventProducer<TrackInfo::TrackType> mOnTrackWaitingForKey;
+
 private:
   virtual nsresult InitInternal() { return NS_OK; }
 
   // Does any spinup that needs to happen on this task queue. This runs on a
   // different thread than Init, and there should not be ordering dependencies
   // between the two (even though in practice, Init will always run first right
   // now thanks to the tail dispatcher).
   void InitializationTask();
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -134,34 +134,37 @@ static const int64_t LOW_DATA_THRESHOLD_
 static_assert(LOW_DATA_THRESHOLD_USECS > AMPLE_AUDIO_USECS,
               "LOW_DATA_THRESHOLD_USECS is too small");
 
 } // namespace detail
 
 // Amount of excess usecs of data to add in to the "should we buffer" calculation.
 static const uint32_t EXHAUSTED_DATA_MARGIN_USECS = 100000;
 
-static int64_t DurationToUsecs(TimeDuration aDuration) {
+static int64_t DurationToUsecs(TimeDuration aDuration)
+{
   return static_cast<int64_t>(aDuration.ToSeconds() * USECS_PER_S);
 }
 
 static const uint32_t MIN_VIDEO_QUEUE_SIZE = 3;
 static const uint32_t MAX_VIDEO_QUEUE_SIZE = 10;
 #ifdef MOZ_APPLEMEDIA
 static const uint32_t HW_VIDEO_QUEUE_SIZE = 10;
 #else
 static const uint32_t HW_VIDEO_QUEUE_SIZE = 3;
 #endif
 static const uint32_t VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE = 9999;
 
 static uint32_t sVideoQueueDefaultSize = MAX_VIDEO_QUEUE_SIZE;
 static uint32_t sVideoQueueHWAccelSize = HW_VIDEO_QUEUE_SIZE;
-static uint32_t sVideoQueueSendToCompositorSize = VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE;
-
-static void InitVideoQueuePrefs() {
+static uint32_t sVideoQueueSendToCompositorSize =
+  VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE;
+
+static void InitVideoQueuePrefs()
+{
   MOZ_ASSERT(NS_IsMainThread());
   static bool sPrefInit = false;
   if (!sPrefInit) {
     sPrefInit = true;
     sVideoQueueDefaultSize = Preferences::GetUint(
       "media.video-queue.default-size", MAX_VIDEO_QUEUE_SIZE);
     sVideoQueueHWAccelSize = Preferences::GetUint(
       "media.video-queue.hw-accel-size", HW_VIDEO_QUEUE_SIZE);
@@ -177,24 +180,24 @@ SuspendBackgroundVideoDelay()
 {
   return TimeDuration::FromMilliseconds(
     MediaPrefs::MDSMSuspendBackgroundVideoDelay());
 }
 
 class MediaDecoderStateMachine::StateObject
 {
 public:
-  virtual ~StateObject() {}
-  virtual void Exit() {};  // Exit action.
-  virtual void Step() {}   // Perform a 'cycle' of this state object.
+  virtual ~StateObject() { }
+  virtual void Exit() { }   // Exit action.
+  virtual void Step() { }   // Perform a 'cycle' of this state object.
   virtual State GetState() const = 0;
 
   // Event handlers for various events.
-  virtual void HandleCDMProxyReady() {}
-  virtual void HandleAudioCaptured() {}
+  virtual void HandleCDMProxyReady() { }
+  virtual void HandleAudioCaptured() { }
   virtual void HandleAudioDecoded(MediaData* aAudio)
   {
     Crash("Unexpected event!", __func__);
   }
   virtual void HandleVideoDecoded(MediaData* aVideo, TimeStamp aDecodeStart)
   {
     Crash("Unexpected event!", __func__);
   }
@@ -245,17 +248,18 @@ public:
 
 private:
   template <class S, typename R, typename... As>
   auto ReturnTypeHelper(R(S::*)(As...)) -> R;
 
   void Crash(const char* aReason, const char* aSite)
   {
     char buf[1024];
-    SprintfLiteral(buf, "%s state=%s callsite=%s", aReason, ToStateStr(GetState()), aSite);
+    SprintfLiteral(buf, "%s state=%s callsite=%s", aReason,
+                   ToStateStr(GetState()), aSite);
     MOZ_ReportAssertionFailure(buf, __FILE__, __LINE__);
     MOZ_CRASH();
   }
 
 protected:
   enum class EventVisibility : int8_t
   {
     Observable,
@@ -267,37 +271,37 @@ protected:
   TaskQueue* OwnerThread() const { return mMaster->mTaskQueue; }
   MediaResource* Resource() const { return mMaster->mResource; }
   MediaDecoderReaderWrapper* Reader() const { return mMaster->mReader; }
   const MediaInfo& Info() const { return mMaster->Info(); }
   bool IsExpectingMoreData() const
   {
     // We are expecting more data if either the resource states so, or if we
     // have a waiting promise pending (such as with non-MSE EME).
-    return Resource()->IsExpectingMoreData() ||
-           mMaster->IsWaitingAudioData() ||
-           mMaster->IsWaitingVideoData();
+    return Resource()->IsExpectingMoreData()
+           || mMaster->IsWaitingAudioData()
+           || mMaster->IsWaitingVideoData();
   }
   MediaQueue<MediaData>& AudioQueue() const { return mMaster->mAudioQueue; }
   MediaQueue<MediaData>& VideoQueue() const { return mMaster->mVideoQueue; }
 
   // Note this function will delete the current state object.
   // Don't access members to avoid UAF after this call.
   template <class S, typename... Ts>
   auto SetState(Ts... aArgs)
     -> decltype(ReturnTypeHelper(&S::Enter))
   {
     // keep mMaster in a local object because mMaster will become invalid after
     // the current state object is deleted.
     auto master = mMaster;
 
     auto s = new S(master);
 
-    MOZ_ASSERT(GetState() != s->GetState() ||
-               GetState() == DECODER_STATE_SEEKING);
+    MOZ_ASSERT(GetState() != s->GetState()
+               || GetState() == DECODER_STATE_SEEKING);
 
     SLOG("change state to: %s", ToStateStr(s->GetState()));
 
     Exit();
 
     master->mStateObj.reset(s);
     return s->Enter(Move(aArgs)...);
   }
@@ -317,17 +321,17 @@ protected:
  *   SHUTDOWN if failing to decode metadata.
  *   WAIT_FOR_CDM if the media is encrypted and CDM is not available.
  *   DECODING_FIRSTFRAME otherwise.
  */
 class MediaDecoderStateMachine::DecodeMetadataState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit DecodeMetadataState(Master* aPtr) : StateObject(aPtr) {}
+  explicit DecodeMetadataState(Master* aPtr) : StateObject(aPtr) { }
 
   void Enter()
   {
     MOZ_ASSERT(!mMaster->mVideoDecodeSuspended);
     MOZ_ASSERT(!mMetadataRequest.Exists());
     SLOG("Dispatching AsyncReadMetadata");
 
     // Set mode to METADATA since we are about to read metadata.
@@ -392,17 +396,17 @@ private:
  * Transition to other states when CDM is ready:
  *   SEEKING if any pending seek request.
  *   DECODING_FIRSTFRAME otherwise.
  */
 class MediaDecoderStateMachine::WaitForCDMState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit WaitForCDMState(Master* aPtr) : StateObject(aPtr) {}
+  explicit WaitForCDMState(Master* aPtr) : StateObject(aPtr) { }
 
   void Enter()
   {
     MOZ_ASSERT(!mMaster->mVideoDecodeSuspended);
   }
 
   void Exit() override
   {
@@ -446,32 +450,32 @@ private:
  *
  * Transition to:
  *   SEEKING if any seek request or play state changes to PLAYING.
  */
 class MediaDecoderStateMachine::DormantState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit DormantState(Master* aPtr) : StateObject(aPtr) {}
+  explicit DormantState(Master* aPtr) : StateObject(aPtr) { }
 
   void Enter()
   {
     if (mMaster->IsPlaying()) {
       mMaster->StopPlayback();
     }
 
     // Calculate the position to seek to when exiting dormant.
-    auto t = mMaster->mMediaSink->IsStarted()
-      ? mMaster->GetClock()
-      : mMaster->GetMediaTime();
+    auto t = mMaster->mMediaSink->IsStarted() ? mMaster->GetClock()
+                                              : mMaster->GetMediaTime();
     mPendingSeek.mTarget.emplace(t, SeekTarget::Accurate);
     // SeekJob asserts |mTarget.IsValid() == !mPromise.IsEmpty()| so we
     // need to create the promise even it is not used at all.
-    RefPtr<MediaDecoder::SeekPromise> x = mPendingSeek.mPromise.Ensure(__func__);
+    RefPtr<MediaDecoder::SeekPromise> x =
+      mPendingSeek.mPromise.Ensure(__func__);
 
     mMaster->ResetDecode();
     mMaster->StopMediaSink();
     mMaster->mReader->ReleaseResources();
   }
 
   void Exit() override
   {
@@ -508,17 +512,17 @@ private:
  *   SHUTDOWN if any decode error.
  *   SEEKING if any seek request.
  *   DECODING when the 'loadeddata' event is fired.
  */
 class MediaDecoderStateMachine::DecodingFirstFrameState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit DecodingFirstFrameState(Master* aPtr) : StateObject(aPtr) {}
+  explicit DecodingFirstFrameState(Master* aPtr) : StateObject(aPtr) { }
 
   void Enter();
 
   State GetState() const override
   {
     return DECODER_STATE_DECODING_FIRSTFRAME;
   }
 
@@ -573,17 +577,18 @@ public:
 
   void HandleVideoWaited(MediaData::Type aType) override
   {
     mMaster->RequestVideoData(false, media::TimeUnit());
   }
 
   void HandleVideoSuspendTimeout() override
   {
-    // Do nothing for we need to decode the 1st video frame to get the dimensions.
+    // Do nothing for we need to decode the 1st video frame to get the
+    // dimensions.
   }
 
   void HandleResumeVideoDecoding() override
   {
     // We never suspend video decoding in this state.
     MOZ_ASSERT(false, "Shouldn't have suspended video decoding.");
   }
 
@@ -623,32 +628,32 @@ public:
     }
     mDormantTimer.Reset();
     mOnAudioPopped.DisconnectIfExists();
     mOnVideoPopped.DisconnectIfExists();
   }
 
   void Step() override
   {
-    if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING &&
-        mMaster->IsPlaying()) {
+    if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING
+        && mMaster->IsPlaying()) {
       // We're playing, but the element/decoder is in paused state. Stop
       // playing!
       mMaster->StopPlayback();
     }
 
     // Start playback if necessary so that the clock can be properly queried.
     if (!mIsPrerolling) {
       mMaster->MaybeStartPlayback();
     }
 
     mMaster->UpdatePlaybackPositionPeriodically();
 
-    MOZ_ASSERT(!mMaster->IsPlaying() ||
-               mMaster->IsStateMachineScheduled(),
+    MOZ_ASSERT(!mMaster->IsPlaying()
+               || mMaster->IsStateMachineScheduled(),
                "Must have timer scheduled");
 
     MaybeStartBuffering();
   }
 
   State GetState() const override
   {
     return DECODER_STATE_DECODING;
@@ -760,18 +765,18 @@ private:
     // since they decode audio and video on different threads so they
     // are unlikely to run out of decoded audio.
     if (Reader()->IsAsync()) {
       return;
     }
 
     TimeDuration decodeTime = TimeStamp::Now() - aDecodeStart;
     int64_t adjustedTime = THRESHOLD_FACTOR * DurationToUsecs(decodeTime);
-    if (adjustedTime > mMaster->mLowAudioThresholdUsecs &&
-        !mMaster->HasLowBufferedData())
+    if (adjustedTime > mMaster->mLowAudioThresholdUsecs
+        && !mMaster->HasLowBufferedData())
     {
       mMaster->mLowAudioThresholdUsecs =
         std::min(adjustedTime, mMaster->mAmpleAudioThresholdUsecs);
 
       mMaster->mAmpleAudioThresholdUsecs =
         std::max(THRESHOLD_FACTOR * mMaster->mLowAudioThresholdUsecs,
                  mMaster->mAmpleAudioThresholdUsecs);
 
@@ -780,33 +785,33 @@ private:
            "mAmpleAudioThresholdUsecs=%lld",
            mMaster->mLowAudioThresholdUsecs,
            mMaster->mAmpleAudioThresholdUsecs);
     }
   }
 
   bool DonePrerollingAudio()
   {
-    return !mMaster->IsAudioDecoding() ||
-           mMaster->GetDecodedAudioDuration() >=
-             mMaster->AudioPrerollUsecs() * mMaster->mPlaybackRate;
+    return !mMaster->IsAudioDecoding()
+           || mMaster->GetDecodedAudioDuration()
+              >= mMaster->AudioPrerollUsecs() * mMaster->mPlaybackRate;
   }
 
   bool DonePrerollingVideo()
   {
-    return !mMaster->IsVideoDecoding() ||
-           static_cast<uint32_t>(mMaster->VideoQueue().GetSize()) >=
-             mMaster->VideoPrerollFrames() * mMaster->mPlaybackRate + 1;
+    return !mMaster->IsVideoDecoding()
+           || static_cast<uint32_t>(mMaster->VideoQueue().GetSize())
+              >= mMaster->VideoPrerollFrames() * mMaster->mPlaybackRate + 1;
   }
 
   void MaybeStopPrerolling()
   {
-    if (mIsPrerolling &&
-        (DonePrerollingAudio() || mMaster->IsWaitingAudioData()) &&
-        (DonePrerollingVideo() || mMaster->IsWaitingVideoData())) {
+    if (mIsPrerolling
+        && (DonePrerollingAudio() || mMaster->IsWaitingAudioData())
+        && (DonePrerollingVideo() || mMaster->IsWaitingVideoData())) {
       mIsPrerolling = false;
       // Check if we can start playback.
       mMaster->ScheduleStateMachine();
     }
   }
 
   void StartDormantTimer()
   {
@@ -870,17 +875,17 @@ private:
  *   SHUTDOWN if seek failed.
  *   COMPLETED if the new playback position is the end of the media resource.
  *   DECODING otherwise.
  */
 class MediaDecoderStateMachine::SeekingState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit SeekingState(Master* aPtr) : StateObject(aPtr) {}
+  explicit SeekingState(Master* aPtr) : StateObject(aPtr) { }
 
   RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob aSeekJob,
                                           EventVisibility aVisibility)
   {
     mSeekJob = Move(aSeekJob);
 
     // Always switch off the blank decoder otherwise we might become visible
     // in the middle of seeking and won't have a valid video frame to show
@@ -891,39 +896,42 @@ public:
       Reader()->SetVideoBlankDecode(false);
     }
 
     // Don't stop playback for a video-only seek since audio is playing.
     if (!mSeekJob.mTarget->IsVideoOnly()) {
       mMaster->StopPlayback();
     }
 
-    mMaster->UpdatePlaybackPositionInternal(mSeekJob.mTarget->GetTime().ToMicroseconds());
+    mMaster->UpdatePlaybackPositionInternal(
+      mSeekJob.mTarget->GetTime().ToMicroseconds());
 
     if (aVisibility == EventVisibility::Observable) {
       mMaster->mOnPlaybackEvent.Notify(MediaEventType::SeekStarted);
       // We want dormant actions to be transparent to the user.
       // So we only notify the change when the seek request is from the user.
-      mMaster->UpdateNextFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING);
+      mMaster->UpdateNextFrameStatus(
+        MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING);
     }
 
     DoSeek();
 
     return mSeekJob.mPromise.Ensure(__func__);
   }
 
   virtual void Exit() override = 0;
 
   State GetState() const override
   {
     return DECODER_STATE_SEEKING;
   }
 
   void HandleAudioDecoded(MediaData* aAudio) override = 0;
-  void HandleVideoDecoded(MediaData* aVideo, TimeStamp aDecodeStart) override = 0;
+  void HandleVideoDecoded(MediaData* aVideo,
+                          TimeStamp aDecodeStart) override = 0;
   void HandleAudioWaited(MediaData::Type aType) override = 0;
   void HandleVideoWaited(MediaData::Type aType) override = 0;
 
   void HandleVideoSuspendTimeout() override
   {
     // Do nothing since we want a valid video frame to show when seek is done.
   }
 
@@ -951,39 +959,42 @@ public:
   explicit AccurateSeekingState(Master* aPtr) : SeekingState(aPtr)
   {
   }
 
   RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob aSeekJob,
                                           EventVisibility aVisibility)
   {
     MOZ_ASSERT(aSeekJob.mTarget->IsAccurate() || aSeekJob.mTarget->IsFast());
-    mCurrentTimeBeforeSeek = TimeUnit::FromMicroseconds(mMaster->GetMediaTime());
+    mCurrentTimeBeforeSeek =
+      TimeUnit::FromMicroseconds(mMaster->GetMediaTime());
     return SeekingState::Enter(Move(aSeekJob), aVisibility);
   }
 
   void Exit() override
   {
     // Disconnect MediaDecoder.
     mSeekJob.RejectIfExists(__func__);
 
     // Disconnect MediaDecoderReaderWrapper.
     mSeekRequest.DisconnectIfExists();
 
     mWaitRequest.DisconnectIfExists();
   }
 
   void HandleAudioDecoded(MediaData* aAudio) override
   {
-    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, "Seek shouldn't be finished");
+    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
+               "Seek shouldn't be finished");
     MOZ_ASSERT(aAudio);
 
     // Video-only seek doesn't reset audio decoder. There might be pending audio
-    // requests when AccurateSeekTask::Seek() begins. We will just store the data
-    // without checking |mDiscontinuity| or calling DropAudioUpToSeekTarget().
+    // requests when AccurateSeekTask::Seek() begins. We will just store the
+    // data without checking |mDiscontinuity| or calling
+    // DropAudioUpToSeekTarget().
     if (mSeekJob.mTarget->IsVideoOnly()) {
       mMaster->PushAudio(aAudio);
       return;
     }
 
     AdjustFastSeekIfNeeded(aAudio);
 
     if (mSeekJob.mTarget->IsFast()) {
@@ -1002,17 +1013,18 @@ public:
       RequestAudioData();
       return;
     }
     MaybeFinishSeek();
   }
 
   void HandleVideoDecoded(MediaData* aVideo, TimeStamp aDecodeStart) override
   {
-    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, "Seek shouldn't be finished");
+    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
+               "Seek shouldn't be finished");
     MOZ_ASSERT(aVideo);
 
     AdjustFastSeekIfNeeded(aVideo);
 
     if (mSeekJob.mTarget->IsFast()) {
       // Non-precise seek. We can stop the seek at the first sample.
       mMaster->PushVideo(aVideo);
       mDoneVideoSeeking = true;
@@ -1079,28 +1091,30 @@ public:
     }
     VideoQueue().Finish();
     mDoneVideoSeeking = true;
     MaybeFinishSeek();
   }
 
   void HandleAudioWaited(MediaData::Type aType) override
   {
-    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, "Seek shouldn't be finished");
+    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
+               "Seek shouldn't be finished");
 
     // Ignore pending requests from video-only seek.
     if (mSeekJob.mTarget->IsVideoOnly()) {
       return;
     }
     RequestAudioData();
   }
 
   void HandleVideoWaited(MediaData::Type aType) override
   {
-    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, "Seek shouldn't be finished");
+    MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking,
+               "Seek shouldn't be finished");
 
     RequestVideoData();
   }
 
 private:
   void DemuxerSeek()
   {
     // Request the demuxer to perform seek.
@@ -1129,26 +1143,26 @@ private:
 
     DemuxerSeek();
   }
 
   int64_t CalculateNewCurrentTime() const override
   {
     const int64_t seekTime = mSeekJob.mTarget->GetTime().ToMicroseconds();
 
-    // For the accurate seek, we always set the newCurrentTime = seekTime so that
-    // the updated HTMLMediaElement.currentTime will always be the seek target;
-    // we rely on the MediaSink to handles the gap between the newCurrentTime and
-    // the real decoded samples' start time.
+    // For the accurate seek, we always set the newCurrentTime = seekTime so
+    // that the updated HTMLMediaElement.currentTime will always be the seek
+    // target; we rely on the MediaSink to handles the gap between the
+    // newCurrentTime and the real decoded samples' start time.
     if (mSeekJob.mTarget->IsAccurate()) {
       return seekTime;
     }
 
-    // For the fast seek, we update the newCurrentTime with the decoded audio and
-    // video samples, set it to be the one which is closet to the seekTime.
+    // For the fast seek, we update the newCurrentTime with the decoded audio
+    // and video samples, set it to be the one which is closet to the seekTime.
     if (mSeekJob.mTarget->IsFast()) {
       RefPtr<MediaData> audio = AudioQueue().PeekFront();
       RefPtr<MediaData> video = VideoQueue().PeekFront();
 
       // A situation that both audio and video approaches the end.
       if (!audio && !video) {
         return seekTime;
       }
@@ -1159,56 +1173,62 @@ private:
       const int64_t videoGap = std::abs(videoStart - seekTime);
       return audioGap <= videoGap ? audioStart : videoStart;
     }
 
     MOZ_ASSERT(false, "AccurateSeekTask doesn't handle other seek types.");
     return 0;
   }
 
-  void OnSeekResolved(media::TimeUnit) {
+  void OnSeekResolved(media::TimeUnit)
+  {
     mSeekRequest.Complete();
 
     // We must decode the first samples of active streams, so we can determine
     // the new stream time. So dispatch tasks to do that.
     if (!mDoneVideoSeeking) {
       RequestVideoData();
     }
     if (!mDoneAudioSeeking) {
       RequestAudioData();
     }
   }
 
-  void OnSeekRejected(const SeekRejectValue& aReject) {
+  void OnSeekRejected(const SeekRejectValue& aReject)
+  {
     mSeekRequest.Complete();
 
     if (aReject.mError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) {
       SLOG("OnSeekRejected reason=WAITING_FOR_DATA type=%d", aReject.mType);
       MOZ_ASSERT(!mMaster->IsRequestingAudioData());
       MOZ_ASSERT(!mMaster->IsRequestingVideoData());
       MOZ_ASSERT(!mMaster->IsWaitingAudioData());
       MOZ_ASSERT(!mMaster->IsWaitingVideoData());
       // Fire 'waiting' to notify the player that we are waiting for data.
-      mMaster->UpdateNextFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING);
-      Reader()->WaitForData(aReject.mType)->Then(
-        OwnerThread(), __func__,
-        [this] (MediaData::Type aType) {
-          SLOG("OnSeekRejected wait promise resolved");
-          mWaitRequest.Complete();
-          DemuxerSeek();
-        },
-        [this] (const WaitForDataRejectValue& aRejection) {
-          SLOG("OnSeekRejected wait promise rejected");
-          mWaitRequest.Complete();
-          mMaster->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA);
-        })->Track(mWaitRequest);
+      mMaster->UpdateNextFrameStatus(
+        MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING);
+      Reader()
+        ->WaitForData(aReject.mType)
+        ->Then(OwnerThread(), __func__,
+               [this](MediaData::Type aType) {
+                 SLOG("OnSeekRejected wait promise resolved");
+                 mWaitRequest.Complete();
+                 DemuxerSeek();
+               },
+               [this](const WaitForDataRejectValue& aRejection) {
+                 SLOG("OnSeekRejected wait promise rejected");
+                 mWaitRequest.Complete();
+                 mMaster->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA);
+               })
+        ->Track(mWaitRequest);
       return;
     }
 
-    MOZ_ASSERT(NS_FAILED(aReject.mError), "Cancels should also disconnect mSeekRequest");
+    MOZ_ASSERT(NS_FAILED(aReject.mError),
+               "Cancels should also disconnect mSeekRequest");
     mMaster->DecodeError(aReject.mError);
   }
 
   void RequestAudioData()
   {
     MOZ_ASSERT(!mDoneAudioSeeking);
     mMaster->RequestAudioData();
   }
@@ -1216,39 +1236,41 @@ private:
   void RequestVideoData()
   {
     MOZ_ASSERT(!mDoneVideoSeeking);
     mMaster->RequestVideoData(false, media::TimeUnit());
   }
 
   void AdjustFastSeekIfNeeded(MediaData* aSample)
   {
-    if (mSeekJob.mTarget->IsFast() &&
-        mSeekJob.mTarget->GetTime() > mCurrentTimeBeforeSeek &&
-        aSample->mTime < mCurrentTimeBeforeSeek.ToMicroseconds()) {
+    if (mSeekJob.mTarget->IsFast()
+        && mSeekJob.mTarget->GetTime() > mCurrentTimeBeforeSeek
+        && aSample->mTime < mCurrentTimeBeforeSeek.ToMicroseconds()) {
       // We are doing a fastSeek, but we ended up *before* the previous
       // playback position. This is surprising UX, so switch to an accurate
       // seek and decode to the seek target. This is not conformant to the
       // spec, fastSeek should always be fast, but until we get the time to
       // change all Readers to seek to the keyframe after the currentTime
       // in this case, we'll just decode forward. Bug 1026330.
       mSeekJob.mTarget->SetType(SeekTarget::Accurate);
     }
   }
 
   nsresult DropAudioUpToSeekTarget(AudioData* aAudio)
   {
     MOZ_ASSERT(aAudio && mSeekJob.mTarget->IsAccurate());
 
-    CheckedInt64 sampleDuration = FramesToUsecs(aAudio->mFrames, Info().mAudio.mRate);
+    CheckedInt64 sampleDuration =
+      FramesToUsecs(aAudio->mFrames, Info().mAudio.mRate);
     if (!sampleDuration.isValid()) {
       return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
     }
 
-    if (aAudio->mTime + sampleDuration.value() <= mSeekJob.mTarget->GetTime().ToMicroseconds()) {
+    if (aAudio->mTime + sampleDuration.value()
+        <= mSeekJob.mTarget->GetTime().ToMicroseconds()) {
       // Our seek target lies after the frames in this AudioData. Don't
       // push it onto the audio queue, and keep decoding forwards.
       return NS_OK;
     }
 
     if (aAudio->mTime > mSeekJob.mTarget->GetTime().ToMicroseconds()) {
       // The seek target doesn't lie in the audio block just after the last
       // audio frames we've seen which were before the seek target. This
@@ -1263,52 +1285,52 @@ private:
       return NS_OK;
     }
 
     // The seek target lies somewhere in this AudioData's frames, strip off
     // any frames which lie before the seek target, so we'll begin playback
     // exactly at the seek target.
     NS_ASSERTION(mSeekJob.mTarget->GetTime().ToMicroseconds() >= aAudio->mTime,
                  "Target must at or be after data start.");
-    NS_ASSERTION(mSeekJob.mTarget->GetTime().ToMicroseconds() < aAudio->mTime + sampleDuration.value(),
+    NS_ASSERTION(mSeekJob.mTarget->GetTime().ToMicroseconds()
+                 < aAudio->mTime + sampleDuration.value(),
                  "Data must end after target.");
 
-    CheckedInt64 framesToPrune =
-      UsecsToFrames(mSeekJob.mTarget->GetTime().ToMicroseconds() - aAudio->mTime, Info().mAudio.mRate);
+    CheckedInt64 framesToPrune = UsecsToFrames(
+      mSeekJob.mTarget->GetTime().ToMicroseconds() - aAudio->mTime,
+      Info().mAudio.mRate);
     if (!framesToPrune.isValid()) {
       return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
     }
     if (framesToPrune.value() > aAudio->mFrames) {
       // We've messed up somehow. Don't try to trim frames, the |frames|
       // variable below will overflow.
       SWARN("Can't prune more frames that we have!");
       return NS_ERROR_FAILURE;
     }
-    uint32_t frames = aAudio->mFrames - static_cast<uint32_t>(framesToPrune.value());
+    uint32_t frames =
+      aAudio->mFrames - static_cast<uint32_t>(framesToPrune.value());
     uint32_t channels = aAudio->mChannels;
     AlignedAudioBuffer audioData(frames * channels);
     if (!audioData) {
       return NS_ERROR_OUT_OF_MEMORY;
     }
 
     memcpy(audioData.get(),
            aAudio->mAudioData.get() + (framesToPrune.value() * channels),
            frames * channels * sizeof(AudioDataValue));
     CheckedInt64 duration = FramesToUsecs(frames, Info().mAudio.mRate);
     if (!duration.isValid()) {
       return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
     }
-    RefPtr<AudioData> data(new AudioData(aAudio->mOffset,
-                                         mSeekJob.mTarget->GetTime().ToMicroseconds(),
-                                         duration.value(),
-                                         frames,
-                                         Move(audioData),
-                                         channels,
-                                         aAudio->mRate));
-    MOZ_ASSERT(AudioQueue().GetSize() == 0, "Should be the 1st sample after seeking");
+    RefPtr<AudioData> data(new AudioData(
+      aAudio->mOffset, mSeekJob.mTarget->GetTime().ToMicroseconds(),
+      duration.value(), frames, Move(audioData), channels, aAudio->mRate));
+    MOZ_ASSERT(AudioQueue().GetSize() == 0,
+               "Should be the 1st sample after seeking");
     mMaster->PushAudio(data);
     mDoneAudioSeeking = true;
 
     return NS_OK;
   }
 
   nsresult DropVideoUpToSeekTarget(MediaData* aSample)
   {
@@ -1321,26 +1343,28 @@ private:
     // If the frame end time is less than the seek target, we won't want
     // to display this frame after the seek, so discard it.
     if (target >= video->GetEndTime()) {
       SLOG("DropVideoUpToSeekTarget() pop video frame [%lld, %lld] target=%lld",
            video->mTime, video->GetEndTime(), target);
       mFirstVideoFrameAfterSeek = video;
     } else {
       if (target >= video->mTime && video->GetEndTime() >= target) {
-        // The seek target lies inside this frame's time slice. Adjust the frame's
-        // start time to match the seek target.
+        // The seek target lies inside this frame's time slice. Adjust the
+        // frame's start time to match the seek target.
         video->UpdateTimestamp(target);
       }
       mFirstVideoFrameAfterSeek = nullptr;
 
-      SLOG("DropVideoUpToSeekTarget() found video frame [%lld, %lld] containing target=%lld",
-                  video->mTime, video->GetEndTime(), target);
-
-      MOZ_ASSERT(VideoQueue().GetSize() == 0, "Should be the 1st sample after seeking");
+      SLOG("DropVideoUpToSeekTarget() found video frame [%lld, %lld] "
+           "containing target=%lld",
+           video->mTime, video->GetEndTime(), target);
+
+      MOZ_ASSERT(VideoQueue().GetSize() == 0,
+                 "Should be the 1st sample after seeking");
       mMaster->PushVideo(video);
       mDoneVideoSeeking = true;
     }
 
     return NS_OK;
   }
 
   void MaybeFinishSeek()
@@ -1418,18 +1442,18 @@ private:
   {
     auto currentTime = mCurrentTime;
     DiscardFrames(VideoQueue(), [currentTime] (int64_t aSampleTime) {
       return aSampleTime <= currentTime;
     });
 
     if (!NeedMoreVideo()) {
       FinishSeek();
-    } else if (!mMaster->IsRequestingVideoData() &&
-               !mMaster->IsWaitingVideoData()) {
+    } else if (!mMaster->IsRequestingVideoData()
+               && !mMaster->IsWaitingVideoData()) {
       RequestVideoData();
     }
   }
 
   class AysncNextFrameSeekTask : public Runnable
   {
   public:
     explicit AysncNextFrameSeekTask(NextFrameSeekingState* aStateObject)
@@ -1555,22 +1579,23 @@ private:
   void RequestVideoData()
   {
     mMaster->RequestVideoData(false, media::TimeUnit());
   }
 
   bool NeedMoreVideo() const
   {
     // Need to request video when we have none and video queue is not finished.
-    return VideoQueue().GetSize() == 0 &&
-           !VideoQueue().IsFinished();
+    return VideoQueue().GetSize() == 0
+           && !VideoQueue().IsFinished();
   }
 
   // Update the seek target's time before resolving this seek task, the updated
-  // time will be used in the MDSM::SeekCompleted() to update the MDSM's position.
+  // time will be used in the MDSM::SeekCompleted() to update the MDSM's
+  // position.
   void UpdateSeekTargetTime()
   {
     RefPtr<MediaData> data = VideoQueue().PeekFront();
     if (data) {
       mSeekJob.mTarget->SetTime(TimeUnit::FromMicroseconds(data->mTime));
     } else {
       MOZ_ASSERT(VideoQueue().AtEndOfStream());
       mSeekJob.mTarget->SetTime(mDuration);
@@ -1604,34 +1629,37 @@ private:
  *   SHUTDOWN if any decode error.
  *   COMPLETED when having decoded all audio/video data.
  *   DECODING when having decoded enough data to continue playback.
  */
 class MediaDecoderStateMachine::BufferingState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit BufferingState(Master* aPtr) : StateObject(aPtr) {}
+  explicit BufferingState(Master* aPtr) : StateObject(aPtr) { }
 
   void Enter()
   {
     if (mMaster->IsPlaying()) {
       mMaster->StopPlayback();
     }
 
     mBufferingStart = TimeStamp::Now();
 
     MediaStatistics stats = mMaster->GetStatistics();
     SLOG("Playback rate: %.1lfKB/s%s download rate: %.1lfKB/s%s",
-         stats.mPlaybackRate/1024, stats.mPlaybackRateReliable ? "" : " (unreliable)",
-         stats.mDownloadRate/1024, stats.mDownloadRateReliable ? "" : " (unreliable)");
+         stats.mPlaybackRate / 1024,
+         stats.mPlaybackRateReliable ? "" : " (unreliable)",
+         stats.mDownloadRate / 1024,
+         stats.mDownloadRateReliable ? "" : " (unreliable)");
 
     mMaster->ScheduleStateMachineIn(USECS_PER_S);
 
-    mMaster->UpdateNextFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING);
+    mMaster->UpdateNextFrameStatus(
+      MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING);
   }
 
   void Step() override;
 
   State GetState() const override
   {
     return DECODER_STATE_BUFFERING;
   }
@@ -1709,66 +1737,68 @@ private:
  *
  * Transition to:
  *   SEEKING if any seek request.
  */
 class MediaDecoderStateMachine::CompletedState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit CompletedState(Master* aPtr) : StateObject(aPtr) {}
+  explicit CompletedState(Master* aPtr) : StateObject(aPtr) { }
 
   void Enter()
   {
     // We've decoded all samples. We don't need decoders anymore.
     Reader()->ReleaseResources();
 
     bool hasNextFrame = (!mMaster->HasAudio() || !mMaster->mAudioCompleted)
-      && (!mMaster->HasVideo() || !mMaster->mVideoCompleted);
-
-    mMaster->UpdateNextFrameStatus(hasNextFrame
-      ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
-      : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE);
+                        && (!mMaster->HasVideo() || !mMaster->mVideoCompleted);
+
+    mMaster->UpdateNextFrameStatus(
+      hasNextFrame ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
+                   : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE);
 
     Step();
   }
 
   void Exit() override
   {
     mSentPlaybackEndedEvent = false;
   }
 
   void Step() override
   {
-    if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING &&
-        mMaster->IsPlaying()) {
+    if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING
+        && mMaster->IsPlaying()) {
       mMaster->StopPlayback();
     }
 
     // Play the remaining media. We want to run AdvanceFrame() at least
     // once to ensure the current playback position is advanced to the
     // end of the media, and so that we update the readyState.
-    if ((mMaster->HasVideo() && !mMaster->mVideoCompleted) ||
-        (mMaster->HasAudio() && !mMaster->mAudioCompleted)) {
+    if ((mMaster->HasVideo() && !mMaster->mVideoCompleted)
+        || (mMaster->HasAudio() && !mMaster->mAudioCompleted)) {
       // Start playback if necessary to play the remaining media.
       mMaster->MaybeStartPlayback();
       mMaster->UpdatePlaybackPositionPeriodically();
-      MOZ_ASSERT(!mMaster->IsPlaying() ||
-                 mMaster->IsStateMachineScheduled(),
+      MOZ_ASSERT(!mMaster->IsPlaying()
+                 || mMaster->IsStateMachineScheduled(),
                  "Must have timer scheduled");
       return;
     }
 
     // StopPlayback in order to reset the IsPlaying() state so audio
     // is restarted correctly.
     mMaster->StopPlayback();
 
     if (!mSentPlaybackEndedEvent) {
-      int64_t clockTime = std::max(mMaster->AudioEndTime(), mMaster->VideoEndTime());
-      clockTime = std::max(int64_t(0), std::max(clockTime, mMaster->Duration().ToMicroseconds()));
+      int64_t clockTime =
+        std::max(mMaster->AudioEndTime(), mMaster->VideoEndTime());
+      clockTime = std::max(
+        int64_t(0), std::max(clockTime, mMaster->Duration().ToMicroseconds()));
       mMaster->UpdatePlaybackPosition(clockTime);
 
       // Ensure readyState is updated before firing the 'ended' event.
       mMaster->UpdateNextFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE);
 
       mMaster->mOnPlaybackEvent.Notify(MediaEventType::PlaybackEnded);
 
       mSentPlaybackEndedEvent = true;
@@ -1814,17 +1844,17 @@ private:
  *
  * Transition from:
  *   Any states other than SHUTDOWN.
  */
 class MediaDecoderStateMachine::ShutdownState
   : public MediaDecoderStateMachine::StateObject
 {
 public:
-  explicit ShutdownState(Master* aPtr) : StateObject(aPtr) {}
+  explicit ShutdownState(Master* aPtr) : StateObject(aPtr) { }
 
   RefPtr<ShutdownPromise> Enter();
 
   void Exit() override
   {
     MOZ_DIAGNOSTIC_ASSERT(false, "Shouldn't escape the SHUTDOWN state.");
   }
 
@@ -1933,18 +1963,18 @@ StateObject::HandleResumeVideoDecoding()
   const SeekTarget::Type type = mMaster->HasAudio()
                                 ? SeekTarget::Type::Accurate
                                 : SeekTarget::Type::PrevSyncPoint;
 
   seekJob.mTarget.emplace(mMaster->GetMediaTime(),
                           type,
                           true /* aVideoOnly */);
 
-  // Hold mMaster->mAbstractMainThread here because this->mMaster will be invalid
-  // after the current state object is deleted in SetState();
+  // Hold mMaster->mAbstractMainThread here because this->mMaster will be
+  // invalid after the current state object is deleted in SetState();
   RefPtr<AbstractThread> mainThread = mMaster->mAbstractMainThread;
 
   SetSeekingState(Move(seekJob), EventVisibility::Suppressed)->Then(
     mainThread, __func__,
     [start, info, hw](){ ReportRecoveryTelemetry(start, info, hw); },
     [](){});
 }
 
@@ -1971,17 +2001,18 @@ DecodeMetadataState::OnMetadataRead(Meta
   mMetadataRequest.Complete();
 
   // Set mode to PLAYBACK after reading metadata.
   Resource()->SetReadMode(MediaCacheStream::MODE_PLAYBACK);
 
   mMaster->mInfo.emplace(aMetadata->mInfo);
   mMaster->mMetadataTags = aMetadata->mTags.forget();
   mMaster->mMediaSeekable = Info().mMediaSeekable;
-  mMaster->mMediaSeekableOnlyInBufferedRanges = Info().mMediaSeekableOnlyInBufferedRanges;
+  mMaster->mMediaSeekableOnlyInBufferedRanges =
+    Info().mMediaSeekableOnlyInBufferedRanges;
 
   if (Info().mMetadataDuration.isSome()) {
     mMaster->RecomputeDuration();
   } else if (Info().mUnadjustedMetadataEndTime.isSome()) {
     const TimeUnit unadjusted = Info().mUnadjustedMetadataEndTime.ref();
     const TimeUnit adjustment = Info().mStartTime;
     mMaster->mInfo->mMetadataDuration.emplace(unadjusted - adjustment);
     mMaster->RecomputeDuration();
@@ -2057,34 +2088,34 @@ DecodingFirstFrameState::Enter()
 }
 
 void
 MediaDecoderStateMachine::
 DecodingFirstFrameState::MaybeFinishDecodeFirstFrame()
 {
   MOZ_ASSERT(!mMaster->mSentFirstFrameLoadedEvent);
 
-  if ((mMaster->IsAudioDecoding() && AudioQueue().GetSize() == 0) ||
-      (mMaster->IsVideoDecoding() && VideoQueue().GetSize() == 0)) {
+  if ((mMaster->IsAudioDecoding() && AudioQueue().GetSize() == 0)
+      || (mMaster->IsVideoDecoding() && VideoQueue().GetSize() == 0)) {
     return;
   }
 
   mMaster->FinishDecodeFirstFrame();
   SetState<DecodingState>();
 }
 
 void
 MediaDecoderStateMachine::
 DecodingState::Enter()
 {
   MOZ_ASSERT(mMaster->mSentFirstFrameLoadedEvent);
 
-  if (!mMaster->mIsVisible &&
-      !mMaster->mVideoDecodeSuspendTimer.IsScheduled() &&
-      !mMaster->mVideoDecodeSuspended) {
+  if (!mMaster->mIsVisible
+      && !mMaster->mVideoDecodeSuspendTimer.IsScheduled()
+      && !mMaster->mVideoDecodeSuspended) {
     // If we are not visible and the timer is not schedule, it means the timer
     // has timed out and we should suspend video decoding now if necessary.
     HandleVideoSuspendTimeout();
   }
 
   if (!mMaster->IsVideoDecoding() && !mMaster->IsAudioDecoding()) {
     SetState<CompletedState>();
     return;
@@ -2143,52 +2174,53 @@ DecodingState::HandleEndOfVideo()
     MaybeStopPrerolling();
   }
 }
 
 void
 MediaDecoderStateMachine::
 DecodingState::DispatchDecodeTasksIfNeeded()
 {
-  if (mMaster->IsAudioDecoding() &&
-      !mMaster->mMinimizePreroll &&
-      !mMaster->HaveEnoughDecodedAudio()) {
+  if (mMaster->IsAudioDecoding()
+      && !mMaster->mMinimizePreroll
+      && !mMaster->HaveEnoughDecodedAudio()) {
     EnsureAudioDecodeTaskQueued();
   }
 
-  if (mMaster->IsVideoDecoding() &&
-      !mMaster->mMinimizePreroll &&
-      !mMaster->HaveEnoughDecodedVideo()) {
+  if (mMaster->IsVideoDecoding()
+      && !mMaster->mMinimizePreroll
+      && !mMaster->HaveEnoughDecodedVideo()) {
     EnsureVideoDecodeTaskQueued();
   }
 }
 
 void
 MediaDecoderStateMachine::
 DecodingState::EnsureAudioDecodeTaskQueued()
 {
-  if (!mMaster->IsAudioDecoding() ||
-      mMaster->IsRequestingAudioData() ||
-      mMaster->IsWaitingAudioData()) {
+  if (!mMaster->IsAudioDecoding()
+      || mMaster->IsRequestingAudioData()
+      || mMaster->IsWaitingAudioData()) {
     return;
   }
   mMaster->RequestAudioData();
 }
 
 void
 MediaDecoderStateMachine::
 DecodingState::EnsureVideoDecodeTaskQueued()
 {
-  if (!mMaster->IsVideoDecoding() ||
-      mMaster->IsRequestingVideoData() ||
-      mMaster->IsWaitingVideoData()) {
+  if (!mMaster->IsVideoDecoding()
+      || mMaster->IsRequestingVideoData()
+      || mMaster->IsWaitingVideoData()) {
     return;
   }
-  mMaster->RequestVideoData(NeedToSkipToNextKeyframe(),
-                            media::TimeUnit::FromMicroseconds(mMaster->GetMediaTime()));
+  mMaster->RequestVideoData(
+    NeedToSkipToNextKeyframe(),
+    media::TimeUnit::FromMicroseconds(mMaster->GetMediaTime()));
 }
 
 bool
 MediaDecoderStateMachine::
 DecodingState::NeedToSkipToNextKeyframe()
 {
   // Since GetClock() can only be called after starting MediaSink, we return
   // false quickly if it is not started because we won't fall behind playback
@@ -2206,27 +2238,32 @@ DecodingState::NeedToSkipToNextKeyframe(
   // We'll skip the video decode to the next keyframe if we're low on
   // audio, or if we're low on video, provided we're not running low on
   // data to decode. If we're running low on downloaded data to decode,
   // we won't start keyframe skipping, as we'll be pausing playback to buffer
   // soon anyway and we'll want to be able to display frames immediately
   // after buffering finishes. We ignore the low audio calculations for
   // readers that are async, as since their audio decode runs on a different
   // task queue it should never run low and skipping won't help their decode.
-  bool isLowOnDecodedAudio = !Reader()->IsAsync() &&
-                             mMaster->IsAudioDecoding() &&
-                             (mMaster->GetDecodedAudioDuration() <
-                              mMaster->mLowAudioThresholdUsecs * mMaster->mPlaybackRate);
-  bool isLowOnDecodedVideo = (mMaster->GetClock() - mMaster->mDecodedVideoEndTime) * mMaster->mPlaybackRate >
-                             LOW_VIDEO_THRESHOLD_USECS;
+  bool isLowOnDecodedAudio =
+    !Reader()->IsAsync()
+    && mMaster->IsAudioDecoding()
+    && (mMaster->GetDecodedAudioDuration()
+        < mMaster->mLowAudioThresholdUsecs * mMaster->mPlaybackRate);
+  bool isLowOnDecodedVideo =
+    (mMaster->GetClock() - mMaster->mDecodedVideoEndTime)
+    * mMaster->mPlaybackRate
+    > LOW_VIDEO_THRESHOLD_USECS;
   bool lowBuffered = mMaster->HasLowBufferedData();
 
   if ((isLowOnDecodedAudio || isLowOnDecodedVideo) && !lowBuffered) {
-    SLOG("Skipping video decode to the next keyframe lowAudio=%d lowVideo=%d lowUndecoded=%d async=%d",
-         isLowOnDecodedAudio, isLowOnDecodedVideo, lowBuffered, Reader()->IsAsync());
+    SLOG("Skipping video decode to the next keyframe lowAudio=%d lowVideo=%d "
+         "lowUndecoded=%d async=%d",
+         isLowOnDecodedAudio, isLowOnDecodedVideo, lowBuffered,
+         Reader()->IsAsync());
     return true;
   }
 
   return false;
 }
 
 void
 MediaDecoderStateMachine::
@@ -2243,23 +2280,23 @@ DecodingState::MaybeStartBuffering()
   // Don't enter buffering while prerolling so that the decoder has a chance to
   // enqueue some decoded data before we give up and start buffering.
   if (!mMaster->IsPlaying()) {
     return;
   }
 
   bool shouldBuffer;
   if (Reader()->UseBufferingHeuristics()) {
-    shouldBuffer = IsExpectingMoreData() &&
-                   mMaster->HasLowDecodedData() &&
-                   mMaster->HasLowBufferedData();
+    shouldBuffer = IsExpectingMoreData()
+                   && mMaster->HasLowDecodedData()
+                   && mMaster->HasLowBufferedData();
   } else {
     shouldBuffer =
-      (mMaster->OutOfDecodedAudio() && mMaster->IsWaitingAudioData()) ||
-      (mMaster->OutOfDecodedVideo() && mMaster->IsWaitingVideoData());
+      (mMaster->OutOfDecodedAudio() && mMaster->IsWaitingAudioData())
+      || (mMaster->OutOfDecodedVideo() && mMaster->IsWaitingVideoData());
   }
   if (shouldBuffer) {
     SetState<BufferingState>();
   }
 }
 
 void
 MediaDecoderStateMachine::
@@ -2300,41 +2337,42 @@ SeekingState::SeekCompleted()
   if (!target.IsVideoOnly()) {
     // Don't update playback position for video-only seek.
     // Otherwise we might have |newCurrentTime > mMediaSink->GetPosition()|
     // and fail the assertion in GetClock() since we didn't stop MediaSink.
     mMaster->UpdatePlaybackPositionInternal(newCurrentTime);
   }
 
   // Try to decode another frame to detect if we're at the end...
-  SLOG("Seek completed, mCurrentPosition=%lld", mMaster->mCurrentPosition.Ref());
+  SLOG("Seek completed, mCurrentPosition=%lld",
+       mMaster->mCurrentPosition.Ref());
 
   if (mMaster->VideoQueue().PeekFront()) {
     mMaster->mMediaSink->Redraw(Info().mVideo);
     mMaster->mOnPlaybackEvent.Notify(MediaEventType::Invalidate);
   }
 
   SetState<DecodingState>();
 }
 
 void
 MediaDecoderStateMachine::
 BufferingState::DispatchDecodeTasksIfNeeded()
 {
-  if (mMaster->IsAudioDecoding() &&
-      !mMaster->HaveEnoughDecodedAudio() &&
-      !mMaster->IsRequestingAudioData() &&
-      !mMaster->IsWaitingAudioData()) {
+  if (mMaster->IsAudioDecoding()
+      && !mMaster->HaveEnoughDecodedAudio()
+      && !mMaster->IsRequestingAudioData()
+      && !mMaster->IsWaitingAudioData()) {
     mMaster->RequestAudioData();
   }
 
-  if (mMaster->IsVideoDecoding() &&
-      !mMaster->HaveEnoughDecodedVideo() &&
-      !mMaster->IsRequestingVideoData() &&
-      !mMaster->IsWaitingVideoData()) {
+  if (mMaster->IsVideoDecoding()
+      && !mMaster->HaveEnoughDecodedVideo()
+      && !mMaster->IsRequestingVideoData()
+      && !mMaster->IsWaitingVideoData()) {
     mMaster->RequestVideoData(false, media::TimeUnit());
   }
 }
 
 void
 MediaDecoderStateMachine::
 BufferingState::Step()
 {
@@ -2342,34 +2380,35 @@ BufferingState::Step()
   MOZ_ASSERT(!mBufferingStart.IsNull(), "Must know buffering start time.");
 
   // With buffering heuristics we will remain in the buffering state if
   // we've not decoded enough data to begin playback, or if we've not
   // downloaded a reasonable amount of data inside our buffering time.
   if (Reader()->UseBufferingHeuristics()) {
     TimeDuration elapsed = now - mBufferingStart;
     bool isLiveStream = Resource()->IsLiveStream();
-    if ((isLiveStream || !mMaster->CanPlayThrough()) &&
-        elapsed < TimeDuration::FromSeconds(mBufferingWait * mMaster->mPlaybackRate) &&
-        mMaster->HasLowBufferedData(mBufferingWait * USECS_PER_S) &&
-        IsExpectingMoreData()) {
+    if ((isLiveStream || !mMaster->CanPlayThrough())
+        && elapsed
+           < TimeDuration::FromSeconds(mBufferingWait * mMaster->mPlaybackRate)
+        && mMaster->HasLowBufferedData(mBufferingWait * USECS_PER_S)
+        && IsExpectingMoreData()) {
       SLOG("Buffering: wait %ds, timeout in %.3lfs",
            mBufferingWait, mBufferingWait - elapsed.ToSeconds());
       mMaster->ScheduleStateMachineIn(USECS_PER_S);
       DispatchDecodeTasksIfNeeded();
       return;
     }
   } else if (mMaster->OutOfDecodedAudio() || mMaster->OutOfDecodedVideo()) {
     DispatchDecodeTasksIfNeeded();
-    MOZ_ASSERT(!mMaster->OutOfDecodedAudio() ||
-               mMaster->IsRequestingAudioData() ||
-               mMaster->IsWaitingAudioData());
-    MOZ_ASSERT(!mMaster->OutOfDecodedVideo() ||
-               mMaster->IsRequestingVideoData() ||
-               mMaster->IsWaitingVideoData());
+    MOZ_ASSERT(!mMaster->OutOfDecodedAudio()
+               || mMaster->IsRequestingAudioData()
+               || mMaster->IsWaitingAudioData());
+    MOZ_ASSERT(!mMaster->OutOfDecodedVideo()
+               || mMaster->IsRequestingVideoData()
+               || mMaster->IsWaitingVideoData());
     SLOG("In buffering mode, waiting to be notified: outOfAudio: %d, "
          "mAudioStatus: %s, outOfVideo: %d, mVideoStatus: %s",
          mMaster->OutOfDecodedAudio(), mMaster->AudioRequestStatus(),
          mMaster->OutOfDecodedVideo(), mMaster->VideoRequestStatus());
     return;
   }
 
   SLOG("Buffered for %.3lfs", (now - mBufferingStart).ToSeconds());
@@ -2525,19 +2564,19 @@ MediaDecoderStateMachine::MediaDecoderSt
 {
   MOZ_COUNT_CTOR(MediaDecoderStateMachine);
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
 
   InitVideoQueuePrefs();
 
 #ifdef XP_WIN
   // Ensure high precision timers are enabled on Windows, otherwise the state
-  // machine isn't woken up at reliable intervals to set the next frame,
-  // and we drop frames while painting. Note that multiple calls to this
-  // function per-process is OK, provided each call is matched by a corresponding
+  // machine isn't woken up at reliable intervals to set the next frame, and we
+  // drop frames while painting. Note that multiple calls to this function
+  // per-process is OK, provided each call is matched by a corresponding
   // timeEndPeriod() call.
   timeBeginPeriod(1);
 #endif
 }
 
 #undef INIT_WATCHABLE
 #undef INIT_MIRROR
 #undef INIT_CANONICAL
@@ -2567,27 +2606,33 @@ MediaDecoderStateMachine::Initialization
   mPreservesPitch.Connect(aDecoder->CanonicalPreservesPitch());
   mSameOriginMedia.Connect(aDecoder->CanonicalSameOriginMedia());
   mMediaPrincipalHandle.Connect(aDecoder->CanonicalMediaPrincipalHandle());
   mPlaybackBytesPerSecond.Connect(aDecoder->CanonicalPlaybackBytesPerSecond());
   mPlaybackRateReliable.Connect(aDecoder->CanonicalPlaybackRateReliable());
   mDecoderPosition.Connect(aDecoder->CanonicalDecoderPosition());
 
   // Initialize watchers.
-  mWatchManager.Watch(mBuffered, &MediaDecoderStateMachine::BufferedRangeUpdated);
+  mWatchManager.Watch(mBuffered,
+                      &MediaDecoderStateMachine::BufferedRangeUpdated);
   mWatchManager.Watch(mVolume, &MediaDecoderStateMachine::VolumeChanged);
-  mWatchManager.Watch(mPreservesPitch, &MediaDecoderStateMachine::PreservesPitchChanged);
-  mWatchManager.Watch(mEstimatedDuration, &MediaDecoderStateMachine::RecomputeDuration);
-  mWatchManager.Watch(mExplicitDuration, &MediaDecoderStateMachine::RecomputeDuration);
-  mWatchManager.Watch(mObservedDuration, &MediaDecoderStateMachine::RecomputeDuration);
+  mWatchManager.Watch(mPreservesPitch,
+                      &MediaDecoderStateMachine::PreservesPitchChanged);
+  mWatchManager.Watch(mEstimatedDuration,
+                      &MediaDecoderStateMachine::RecomputeDuration);
+  mWatchManager.Watch(mExplicitDuration,
+                      &MediaDecoderStateMachine::RecomputeDuration);
+  mWatchManager.Watch(mObservedDuration,
+                      &MediaDecoderStateMachine::RecomputeDuration);
   mWatchManager.Watch(mPlayState, &MediaDecoderStateMachine::PlayStateChanged);
 
   if (MediaPrefs::MDSMSuspendBackgroundVideoEnabled()) {
     mIsVisible.Connect(aDecoder->CanonicalIsVisible());
-    mWatchManager.Watch(mIsVisible, &MediaDecoderStateMachine::VisibilityChanged);
+    mWatchManager.Watch(mIsVisible,
+                        &MediaDecoderStateMachine::VisibilityChanged);
   }
 }
 
 void
 MediaDecoderStateMachine::AudioAudibleChanged(bool aAudible)
 {
   mIsAudioDataAudible = aAudible;
 }
@@ -2598,29 +2643,31 @@ MediaDecoderStateMachine::CreateAudioSin
   RefPtr<MediaDecoderStateMachine> self = this;
   auto audioSinkCreator = [self] () {
     MOZ_ASSERT(self->OnTaskQueue());
     DecodedAudioDataSink* audioSink = new DecodedAudioDataSink(
       self->mTaskQueue, self->mAudioQueue, self->GetMediaTime(),
       self->Info().mAudio, self->mAudioChannel);
 
     self->mAudibleListener = audioSink->AudibleEvent().Connect(
-      self->mTaskQueue, self.get(), &MediaDecoderStateMachine::AudioAudibleChanged);
+      self->mTaskQueue, self.get(),
+      &MediaDecoderStateMachine::AudioAudibleChanged);
     return audioSink;
   };
   return new AudioSinkWrapper(mTaskQueue, audioSinkCreator);
 }
 
 already_AddRefed<media::MediaSink>
 MediaDecoderStateMachine::CreateMediaSink(bool aAudioCaptured)
 {
-  RefPtr<media::MediaSink> audioSink = aAudioCaptured
-    ? new DecodedStream(mTaskQueue, mAbstractMainThread, mAudioQueue, mVideoQueue,
-                        mOutputStreamManager, mSameOriginMedia.Ref(),
-                        mMediaPrincipalHandle.Ref())
+  RefPtr<media::MediaSink> audioSink =
+    aAudioCaptured
+    ? new DecodedStream(mTaskQueue, mAbstractMainThread, mAudioQueue,
+                        mVideoQueue, mOutputStreamManager,
+                        mSameOriginMedia.Ref(), mMediaPrincipalHandle.Ref())
     : CreateAudioSink();
 
   RefPtr<media::MediaSink> mediaSink =
     new VideoSink(mTaskQueue, audioSink, mVideoQueue,
                   mVideoFrameContainer, *mFrameStats,
                   sVideoQueueSendToCompositorSize);
   return mediaSink.forget();
 }
@@ -2639,18 +2686,18 @@ MediaDecoderStateMachine::GetDecodedAudi
   return AudioQueue().Duration();
 }
 
 bool
 MediaDecoderStateMachine::HaveEnoughDecodedAudio()
 {
   MOZ_ASSERT(OnTaskQueue());
   auto ampleAudioUSecs = mAmpleAudioThresholdUsecs * mPlaybackRate;
-  return AudioQueue().GetSize() > 0 &&
-         GetDecodedAudioDuration() >= ampleAudioUSecs;
+  return AudioQueue().GetSize() > 0
+         && GetDecodedAudioDuration() >= ampleAudioUSecs;
 }
 
 bool MediaDecoderStateMachine::HaveEnoughDecodedVideo()
 {
   MOZ_ASSERT(OnTaskQueue());
   return VideoQueue().GetSize() >= GetAmpleVideoFrames() * mPlaybackRate + 1;
 }
 
@@ -2799,17 +2846,18 @@ void MediaDecoderStateMachine::UpdatePla
                                TimeUnit::FromMicroseconds(mCurrentPosition.Ref()));
 }
 
 void MediaDecoderStateMachine::UpdatePlaybackPosition(int64_t aTime)
 {
   MOZ_ASSERT(OnTaskQueue());
   UpdatePlaybackPositionInternal(aTime);
 
-  bool fragmentEnded = mFragmentEndTime >= 0 && GetMediaTime() >= mFragmentEndTime;
+  bool fragmentEnded =
+    mFragmentEndTime >= 0 && GetMediaTime() >= mFragmentEndTime;
   mMetadataManager.DispatchMetadataIfNeeded(TimeUnit::FromMicroseconds(aTime));
 
   if (fragmentEnded) {
     StopPlayback();
   }
 }
 
 /* static */ const char*
@@ -2868,18 +2916,18 @@ void MediaDecoderStateMachine::Recompute
   } else {
     return;
   }
 
   // Only adjust the duration when an explicit duration isn't set (MSE).
   // The duration is always exactly known with MSE and there's no need to adjust
   // it based on what may have been seen in the past; in particular as this data
   // may no longer exist such as when the mediasource duration was reduced.
-  if (mExplicitDuration.Ref().isNothing() &&
-      duration < mObservedDuration.Ref()) {
+  if (mExplicitDuration.Ref().isNothing()
+      && duration < mObservedDuration.Ref()) {
     duration = mObservedDuration;
   }
 
   MOZ_ASSERT(duration.ToMicroseconds() >= 0);
   mDuration = Some(duration);
 }
 
 RefPtr<ShutdownPromise>
@@ -2892,18 +2940,18 @@ MediaDecoderStateMachine::Shutdown()
 void MediaDecoderStateMachine::PlayStateChanged()
 {
   MOZ_ASSERT(OnTaskQueue());
 
   if (mPlayState != MediaDecoder::PLAY_STATE_PLAYING) {
     mVideoDecodeSuspendTimer.Reset();
   } else if (mMinimizePreroll) {
     // Once we start playing, we don't want to minimize our prerolling, as we
-    // assume the user is likely to want to keep playing in future. This needs to
-    // happen before we invoke StartDecoding().
+    // assume the user is likely to want to keep playing in future. This needs
+    // to happen before we invoke StartDecoding().
     mMinimizePreroll = false;
   }
 
   mStateObj->HandlePlayStateChanged(mPlayState);
 }
 
 void MediaDecoderStateMachine::VisibilityChanged()
 {
@@ -2951,28 +2999,33 @@ void MediaDecoderStateMachine::BufferedR
 }
 
 RefPtr<MediaDecoder::SeekPromise>
 MediaDecoderStateMachine::Seek(const SeekTarget& aTarget)
 {
   MOZ_ASSERT(OnTaskQueue());
 
   if (IsShutdown()) {
-    return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true, __func__);
+    return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true,
+                                                      __func__);
   }
 
   // We need to be able to seek in some way
   if (!mMediaSeekable && !mMediaSeekableOnlyInBufferedRanges) {
-    DECODER_WARN("Seek() function should not be called on a non-seekable state machine");
-    return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true, __func__);
+    DECODER_WARN(
+      "Seek() function should not be called on a non-seekable state machine");
+    return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true,
+                                                      __func__);
   }
 
   if (aTarget.IsNextFrame() && !HasVideo()) {
-    DECODER_WARN("Ignore a NextFrameSeekTask on a media file without video track.");
-    return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true, __func__);
+    DECODER_WARN(
+      "Ignore a NextFrameSeekTask on a media file without video track.");
+    return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true,
+                                                      __func__);
   }
 
   MOZ_ASSERT(mDuration.Ref().isSome(), "We should have got duration already");
 
   return mStateObj->HandleSeek(aTarget);
 }
 
 RefPtr<MediaDecoder::SeekPromise>
@@ -3007,18 +3060,20 @@ MediaDecoderStateMachine::RequestAudioDa
              AudioQueue().GetSize(), mReader->SizeOfAudioQueueInFrames());
 
   mReader->RequestAudioData()->Then(
     OwnerThread(), __func__,
     [this] (MediaData* aAudio) {
       MOZ_ASSERT(aAudio);
       mAudioDataRequest.Complete();
       // audio->GetEndTime() is not always mono-increasing in chained ogg.
-      mDecodedAudioEndTime = std::max(aAudio->GetEndTime(), mDecodedAudioEndTime);
-      SAMPLE_LOG("OnAudioDecoded [%lld,%lld]", aAudio->mTime, aAudio->GetEndTime());
+      mDecodedAudioEndTime =
+        std::max(aAudio->GetEndTime(), mDecodedAudioEndTime);
+      SAMPLE_LOG("OnAudioDecoded [%lld,%lld]", aAudio->mTime,
+                 aAudio->GetEndTime());
       mStateObj->HandleAudioDecoded(aAudio);
     },
     [this] (const MediaResult& aError) {
       SAMPLE_LOG("OnAudioNotDecoded aError=%u", aError.Code());
       mAudioDataRequest.Complete();
       switch (aError.Code()) {
         case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA:
           mStateObj->HandleWaitingForAudio();
@@ -3038,29 +3093,32 @@ MediaDecoderStateMachine::RequestAudioDa
 void
 MediaDecoderStateMachine::RequestVideoData(bool aSkipToNextKeyframe,
                                            const media::TimeUnit& aCurrentTime)
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(IsVideoDecoding());
   MOZ_ASSERT(!IsRequestingVideoData());
   MOZ_ASSERT(!IsWaitingVideoData());
-  SAMPLE_LOG("Queueing video task - queued=%i, decoder-queued=%o, skip=%i, time=%lld",
-             VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames(), aSkipToNextKeyframe,
-             aCurrentTime.ToMicroseconds());
+  SAMPLE_LOG(
+    "Queueing video task - queued=%i, decoder-queued=%o, skip=%i, time=%lld",
+    VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames(),
+    aSkipToNextKeyframe, aCurrentTime.ToMicroseconds());
 
   TimeStamp videoDecodeStartTime = TimeStamp::Now();
   mReader->RequestVideoData(aSkipToNextKeyframe, aCurrentTime)->Then(
     OwnerThread(), __func__,
     [this, videoDecodeStartTime] (MediaData* aVideo) {
       MOZ_ASSERT(aVideo);
       mVideoDataRequest.Complete();
       // Handle abnormal or negative timestamps.
-      mDecodedVideoEndTime = std::max(mDecodedVideoEndTime, aVideo->GetEndTime());
-      SAMPLE_LOG("OnVideoDecoded [%lld,%lld]", aVideo->mTime, aVideo->GetEndTime());
+      mDecodedVideoEndTime =
+        std::max(mDecodedVideoEndTime, aVideo->GetEndTime());
+      SAMPLE_LOG("OnVideoDecoded [%lld,%lld]", aVideo->mTime,
+                 aVideo->GetEndTime());
       mStateObj->HandleVideoDecoded(aVideo, videoDecodeStartTime);
     },
     [this] (const MediaResult& aError) {
       SAMPLE_LOG("OnVideoNotDecoded aError=%u", aError.Code());
       mVideoDataRequest.Complete();
       switch (aError.Code()) {
         case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA:
           mStateObj->HandleWaitingForVideo();
@@ -3136,42 +3194,43 @@ MediaDecoderStateMachine::StartMediaSink
     }
   }
 }
 
 bool
 MediaDecoderStateMachine::HasLowDecodedAudio()
 {
   MOZ_ASSERT(OnTaskQueue());
-  return IsAudioDecoding() &&
-         GetDecodedAudioDuration() < EXHAUSTED_DATA_MARGIN_USECS * mPlaybackRate;
+  return IsAudioDecoding()
+         && GetDecodedAudioDuration()
+            < EXHAUSTED_DATA_MARGIN_USECS * mPlaybackRate;
 }
 
 bool
 MediaDecoderStateMachine::HasLowDecodedVideo()
 {
   MOZ_ASSERT(OnTaskQueue());
-  return IsVideoDecoding() &&
-         VideoQueue().GetSize() < LOW_VIDEO_FRAMES * mPlaybackRate;
+  return IsVideoDecoding()
+         && VideoQueue().GetSize() < LOW_VIDEO_FRAMES * mPlaybackRate;
 }
 
 bool
 MediaDecoderStateMachine::HasLowDecodedData()
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(mReader->UseBufferingHeuristics());
   return HasLowDecodedAudio() || HasLowDecodedVideo();
 }
 
 bool MediaDecoderStateMachine::OutOfDecodedAudio()
 {
     MOZ_ASSERT(OnTaskQueue());
-    return IsAudioDecoding() && !AudioQueue().IsFinished() &&
-           AudioQueue().GetSize() == 0 &&
-           !mMediaSink->HasUnplayedFrames(TrackInfo::kAudioTrack);
+    return IsAudioDecoding() && !AudioQueue().IsFinished()
+           && AudioQueue().GetSize() == 0
+           && !mMediaSink->HasUnplayedFrames(TrackInfo::kAudioTrack);
 }
 
 bool MediaDecoderStateMachine::HasLowBufferedData()
 {
   MOZ_ASSERT(OnTaskQueue());
   return HasLowBufferedData(detail::LOW_DATA_THRESHOLD_USECS);
 }
 
@@ -3189,24 +3248,25 @@ bool MediaDecoderStateMachine::HasLowBuf
   if (mBuffered.Ref().IsInvalid()) {
     return false;
   }
 
   // We are never low in decoded data when we don't have audio/video or have
   // decoded all audio/video samples.
   int64_t endOfDecodedVideoData =
     (HasVideo() && !VideoQueue().IsFinished())
-      ? mDecodedVideoEndTime
-      : INT64_MAX;
+    ? mDecodedVideoEndTime
+    : INT64_MAX;
   int64_t endOfDecodedAudioData =
     (HasAudio() && !AudioQueue().IsFinished())
-      ? mDecodedAudioEndTime
-      : INT64_MAX;
-
-  int64_t endOfDecodedData = std::min(endOfDecodedVideoData, endOfDecodedAudioData);
+    ? mDecodedAudioEndTime
+    : INT64_MAX;
+
+  int64_t endOfDecodedData =
+    std::min(endOfDecodedVideoData, endOfDecodedAudioData);
   if (Duration().ToMicroseconds() < endOfDecodedData) {
     // Our duration is not up to date. No point buffering.
     return false;
   }
 
   if (endOfDecodedData == INT64_MAX) {
     // Have decoded all samples. No point buffering.
     return false;
@@ -3266,17 +3326,18 @@ MediaDecoderStateMachine::FinishDecodeFi
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(!mSentFirstFrameLoadedEvent);
   DECODER_LOG("FinishDecodeFirstFrame");
 
   mMediaSink->Redraw(Info().mVideo);
 
   DECODER_LOG("Media duration %lld, "
               "transportSeekable=%d, mediaSeekable=%d",
-              Duration().ToMicroseconds(), mResource->IsTransportSeekable(), mMediaSeekable);
+              Duration().ToMicroseconds(), mResource->IsTransportSeekable(),
+              mMediaSeekable);
 
   // Get potentially updated metadata
   mReader->ReadUpdatedMetadata(mInfo.ptr());
 
   EnqueueFirstFrameLoadedEvent();
 }
 
 RefPtr<ShutdownPromise>
@@ -3359,17 +3420,18 @@ MediaDecoderStateMachine::UpdatePlayback
   if (VideoEndTime() != -1 || AudioEndTime() != -1) {
 
     const int64_t clockTime = GetClock();
     // Skip frames up to the frame at the playback position, and figure out
     // the time remaining until it's time to display the next frame and drop
     // the current frame.
     NS_ASSERTION(clockTime >= 0, "Should have positive clock time.");
 
-    // These will be non -1 if we've displayed a video frame, or played an audio frame.
+    // These will be non -1 if we've displayed a video frame, or played an audio
+    // frame.
     int64_t t = std::min(clockTime, std::max(VideoEndTime(), AudioEndTime()));
     // FIXME: Bug 1091422 - chained ogg files hit this assertion.
     //MOZ_ASSERT(t >= GetMediaTime());
     if (t > GetMediaTime()) {
       UpdatePlaybackPosition(t);
     }
   }
   // Note we have to update playback position before releasing the monitor.
@@ -3380,21 +3442,26 @@ MediaDecoderStateMachine::UpdatePlayback
   int64_t delay = std::max<int64_t>(1, AUDIO_DURATION_USECS / mPlaybackRate);
   ScheduleStateMachineIn(delay);
 }
 
 /* static */ const char*
 MediaDecoderStateMachine::ToStr(NextFrameStatus aStatus)
 {
   switch (aStatus) {
-    case MediaDecoderOwner::NEXT_FRAME_AVAILABLE: return "NEXT_FRAME_AVAILABLE";
-    case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE: return "NEXT_FRAME_UNAVAILABLE";
-    case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING: return "NEXT_FRAME_UNAVAILABLE_BUFFERING";
-    case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING: return "NEXT_FRAME_UNAVAILABLE_SEEKING";
-    case MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED: return "NEXT_FRAME_UNINITIALIZED";
+    case MediaDecoderOwner::NEXT_FRAME_AVAILABLE:
+      return "NEXT_FRAME_AVAILABLE";
+    case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE:
+      return "NEXT_FRAME_UNAVAILABLE";
+    case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING:
+      return "NEXT_FRAME_UNAVAILABLE_BUFFERING";
+    case MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING:
+      return "NEXT_FRAME_UNAVAILABLE_SEEKING";
+    case MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED:
+      return "NEXT_FRAME_UNINITIALIZED";
   }
   return "UNKNOWN";
 }
 
 void
 MediaDecoderStateMachine::UpdateNextFrameStatus(NextFrameStatus aStatus)
 {
   MOZ_ASSERT(OnTaskQueue());
@@ -3411,17 +3478,18 @@ MediaDecoderStateMachine::CanPlayThrough
   return GetStatistics().CanPlayThrough();
 }
 
 MediaStatistics
 MediaDecoderStateMachine::GetStatistics()
 {
   MOZ_ASSERT(OnTaskQueue());
   MediaStatistics result;
-  result.mDownloadRate = mResource->GetDownloadRate(&result.mDownloadRateReliable);
+  result.mDownloadRate =
+    mResource->GetDownloadRate(&result.mDownloadRateReliable);
   result.mDownloadPosition = mResource->GetCachedDataEnd(mDecoderPosition);
   result.mTotalBytes = mResource->GetLength();
   result.mPlaybackRate = mPlaybackBytesPerSecond;
   result.mPlaybackRateReliable = mPlaybackRateReliable;
   result.mDecoderPosition = mDecoderPosition;
   result.mPlaybackPosition = mPlaybackOffset;
   return result;
 }
@@ -3430,25 +3498,26 @@ void
 MediaDecoderStateMachine::ScheduleStateMachine()
 {
   MOZ_ASSERT(OnTaskQueue());
   if (mDispatchedStateMachine) {
     return;
   }
   mDispatchedStateMachine = true;
 
-  OwnerThread()->Dispatch(NewRunnableMethod(this, &MediaDecoderStateMachine::RunStateMachine));
+  OwnerThread()->Dispatch(
+    NewRunnableMethod(this, &MediaDecoderStateMachine::RunStateMachine));
 }
 
 void
 MediaDecoderStateMachine::ScheduleStateMachineIn(int64_t aMicroseconds)
 {
-  MOZ_ASSERT(OnTaskQueue());          // mDelayedScheduler.Ensure() may Disconnect()
-                                      // the promise, which must happen on the state
-                                      // machine task queue.
+  MOZ_ASSERT(OnTaskQueue()); // mDelayedScheduler.Ensure() may Disconnect()
+                             // the promise, which must happen on the state
+                             // machine task queue.
   MOZ_ASSERT(aMicroseconds > 0);
   if (mDispatchedStateMachine) {
     return;
   }
 
   TimeStamp now = TimeStamp::Now();
   TimeStamp target = now + TimeDuration::FromMicroseconds(aMicroseconds);
 
@@ -3630,46 +3699,47 @@ MediaDecoderStateMachine::SetAudioCaptur
 
   // Restore playback parameters.
   mMediaSink->SetPlaybackParams(params);
 
   mAudioCaptured = aCaptured;
 
   // Don't buffer as much when audio is captured because we don't need to worry
   // about high latency audio devices.
-  mAmpleAudioThresholdUsecs = mAudioCaptured ?
-                              detail::AMPLE_AUDIO_USECS / 2 :
-                              detail::AMPLE_AUDIO_USECS;
+  mAmpleAudioThresholdUsecs =
+    mAudioCaptured ? detail::AMPLE_AUDIO_USECS / 2 : detail::AMPLE_AUDIO_USECS;
 
   mStateObj->HandleAudioCaptured();
 }
 
 uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const
 {
   MOZ_ASSERT(OnTaskQueue());
   return (mReader->IsAsync() && mReader->VideoIsHardwareAccelerated())
-    ? std::max<uint32_t>(sVideoQueueHWAccelSize, MIN_VIDEO_QUEUE_SIZE)
-    : std::max<uint32_t>(sVideoQueueDefaultSize, MIN_VIDEO_QUEUE_SIZE);
+         ? std::max<uint32_t>(sVideoQueueHWAccelSize, MIN_VIDEO_QUEUE_SIZE)
+         : std::max<uint32_t>(sVideoQueueDefaultSize, MIN_VIDEO_QUEUE_SIZE);
 }
 
 nsCString
 MediaDecoderStateMachine::GetDebugInfo()
 {
   MOZ_ASSERT(OnTaskQueue());
   return nsPrintfCString(
-    "GetMediaTime=%lld GetClock=%lld mMediaSink=%p "
-    "state=%s mPlayState=%d mSentFirstFrameLoadedEvent=%d IsPlaying=%d "
-    "mAudioStatus=%s mVideoStatus=%s mDecodedAudioEndTime=%lld mDecodedVideoEndTime=%lld "
-    "mAudioCompleted=%d mVideoCompleted=%d ",
-    GetMediaTime(), mMediaSink->IsStarted() ? GetClock() : -1, mMediaSink.get(),
-    ToStateStr(), mPlayState.Ref(), mSentFirstFrameLoadedEvent, IsPlaying(),
-    AudioRequestStatus(), VideoRequestStatus(), mDecodedAudioEndTime, mDecodedVideoEndTime,
-    mAudioCompleted, mVideoCompleted)
-    + mStateObj->GetDebugInfo() + nsCString("\n")
-    + mMediaSink->GetDebugInfo();
+           "GetMediaTime=%lld GetClock=%lld mMediaSink=%p "
+           "state=%s mPlayState=%d mSentFirstFrameLoadedEvent=%d IsPlaying=%d "
+           "mAudioStatus=%s mVideoStatus=%s mDecodedAudioEndTime=%lld "
+           "mDecodedVideoEndTime=%lld "
+           "mAudioCompleted=%d mVideoCompleted=%d ",
+           GetMediaTime(), mMediaSink->IsStarted() ? GetClock() : -1,
+           mMediaSink.get(), ToStateStr(), mPlayState.Ref(),
+           mSentFirstFrameLoadedEvent, IsPlaying(), AudioRequestStatus(),
+           VideoRequestStatus(), mDecodedAudioEndTime, mDecodedVideoEndTime,
+           mAudioCompleted, mVideoCompleted)
+         + mStateObj->GetDebugInfo() + nsCString("\n")
+         + mMediaSink->GetDebugInfo();
 }
 
 RefPtr<MediaDecoder::DebugInfoPromise>
 MediaDecoderStateMachine::RequestDebugInfo()
 {
   using PromiseType = MediaDecoder::DebugInfoPromise;
   RefPtr<PromiseType::Private> p = new PromiseType::Private(__func__);
   OwnerThread()->Dispatch(NS_NewRunnableFunction([this, p] () {
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -109,17 +109,18 @@ class AudioSegment;
 class DecodedStream;
 class MediaDecoderReaderWrapper;
 class OutputStreamManager;
 class TaskQueue;
 
 extern LazyLogModule gMediaDecoderLog;
 extern LazyLogModule gMediaSampleLog;
 
-enum class MediaEventType : int8_t {
+enum class MediaEventType : int8_t
+{
   PlaybackStarted,
   PlaybackStopped,
   PlaybackEnded,
   SeekStarted,
   Invalidate,
   EnterVideoSuspend,
   ExitVideoSuspend
 };
@@ -145,17 +146,18 @@ public:
   typedef MediaDecoderOwner::NextFrameStatus NextFrameStatus;
   typedef mozilla::layers::ImageContainer::FrameID FrameID;
   MediaDecoderStateMachine(MediaDecoder* aDecoder,
                            MediaDecoderReader* aReader);
 
   nsresult Init(MediaDecoder* aDecoder);
 
   // Enumeration for the valid decoding states
-  enum State {
+  enum State
+  {
     DECODER_STATE_DECODING_METADATA,
     DECODER_STATE_WAIT_FOR_CDM,
     DECODER_STATE_DORMANT,
     DECODER_STATE_DECODING_FIRSTFRAME,
     DECODER_STATE_DECODING,
     DECODER_STATE_SEEKING,
     DECODER_STATE_BUFFERING,
     DECODER_STATE_COMPLETED,
@@ -425,17 +427,18 @@ protected:
   bool IsWaitingAudioData() const { return mAudioWaitRequest.Exists(); }
   bool IsWaitingVideoData() const { return mVideoWaitRequest.Exists(); }
 
   // Returns the "media time". This is the absolute time which the media
   // playback has reached. i.e. this returns values in the range
   // [mStartTime, mEndTime], and mStartTime will not be 0 if the media does
   // not start at 0. Note this is different than the "current playback position",
   // which is in the range [0,duration].
-  int64_t GetMediaTime() const {
+  int64_t GetMediaTime() const
+  {
     MOZ_ASSERT(OnTaskQueue());
     return mCurrentPosition;
   }
 
   // Returns an upper bound on the number of microseconds of audio that is
   // decoded and playable. This is the sum of the number of usecs of audio which
   // is decoded and in the reader's audio queue, and the usecs of unplayed audio
   // which has been pushed to the audio hardware for playback. Note that after
@@ -489,17 +492,21 @@ private:
   // the audio, decoder, state machine, and main threads.
   MediaQueue<MediaData> mAudioQueue;
   // Queue of video frames. This queue is threadsafe, and is accessed from
   // the decoder, state machine, and main threads.
   MediaQueue<MediaData> mVideoQueue;
 
   UniquePtr<StateObject> mStateObj;
 
-  media::TimeUnit Duration() const { MOZ_ASSERT(OnTaskQueue()); return mDuration.Ref().ref(); }
+  media::TimeUnit Duration() const
+  {
+    MOZ_ASSERT(OnTaskQueue());
+    return mDuration.Ref().ref();
+  }
 
   // Recomputes the canonical duration from various sources.
   void RecomputeDuration();
 
 
   // FrameID which increments every time a frame is pushed to our queue.
   FrameID mCurrentFrameID;
 
@@ -510,18 +517,18 @@ private:
   // Returns true if we're logically playing, that is, if the Play() has
   // been called and Pause() has not or we have not yet reached the end
   // of media. This is irrespective of the seeking state; if the owner
   // calls Play() and then Seek(), we still count as logically playing.
   // The decoder monitor must be held.
   bool IsLogicallyPlaying()
   {
     MOZ_ASSERT(OnTaskQueue());
-    return mPlayState == MediaDecoder::PLAY_STATE_PLAYING ||
-           mNextPlayState == MediaDecoder::PLAY_STATE_PLAYING;
+    return mPlayState == MediaDecoder::PLAY_STATE_PLAYING
+           || mNextPlayState == MediaDecoder::PLAY_STATE_PLAYING;
   }
 
   // Media Fragment end time in microseconds. Access controlled by decoder monitor.
   int64_t mFragmentEndTime;
 
   // The media sink resource.  Used on the state machine thread.
   RefPtr<media::MediaSink> mMediaSink;
 
@@ -746,31 +753,34 @@ private:
   Canonical<int64_t> mPlaybackOffset;
 
   // Used to distinguish whether the audio is producing sound.
   Canonical<bool> mIsAudioDataAudible;
 
 public:
   AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() const;
 
-  AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration() {
+  AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration()
+  {
     return &mDuration;
   }
-  AbstractCanonical<bool>* CanonicalIsShutdown() {
-    return &mIsShutdown;
-  }
-  AbstractCanonical<NextFrameStatus>* CanonicalNextFrameStatus() {
+  AbstractCanonical<bool>* CanonicalIsShutdown() { return &mIsShutdown; }
+  AbstractCanonical<NextFrameStatus>* CanonicalNextFrameStatus()
+  {
     return &mNextFrameStatus;
   }
-  AbstractCanonical<int64_t>* CanonicalCurrentPosition() {
+  AbstractCanonical<int64_t>* CanonicalCurrentPosition()
+  {
     return &mCurrentPosition;
   }
-  AbstractCanonical<int64_t>* CanonicalPlaybackOffset() {
+  AbstractCanonical<int64_t>* CanonicalPlaybackOffset()
+  {
     return &mPlaybackOffset;
   }
-  AbstractCanonical<bool>* CanonicalIsAudioDataAudible() {
+  AbstractCanonical<bool>* CanonicalIsAudioDataAudible()
+  {
     return &mIsAudioDataAudible;
   }
 };
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -17,16 +17,17 @@
 #include "mozilla/layers/ShadowLayers.h"
 #include "mozilla/AbstractThread.h"
 #include "mozilla/CDMProxy.h"
 #include "mozilla/ClearOnShutdown.h"
 #include "mozilla/Preferences.h"
 #include "mozilla/Telemetry.h"
 #include "mozilla/SharedThreadPool.h"
 #include "mozilla/SyncRunnable.h"
+#include "mozilla/Unused.h"
 #include "nsContentUtils.h"
 #include "nsPrintfCString.h"
 #include "nsSize.h"
 
 #include <algorithm>
 #include <queue>
 
 using namespace mozilla::media;
@@ -96,35 +97,34 @@ private:
   std::queue<RefPtr<PromisePrivate>> mPromises;
 };
 
 StaticMutex DecoderAllocPolicy::sMutex;
 
 class DecoderAllocPolicy::AutoDeallocToken : public Token
 {
 public:
-  explicit AutoDeallocToken(TrackType aTrack)
-    : mTrack(aTrack)
-  {}
+  explicit AutoDeallocToken(TrackType aTrack) : mTrack(aTrack) { }
 
 private:
   ~AutoDeallocToken()
   {
     DecoderAllocPolicy::Instance(mTrack).Dealloc();
   }
 
   const TrackType mTrack;
 };
 
 DecoderAllocPolicy::DecoderAllocPolicy(TrackType aTrack)
   : mMonitor("DecoderAllocPolicy::mMonitor")
   , mDecoderLimit(MediaPrefs::MediaDecoderLimit())
   , mTrack(aTrack)
 {
-  // Non DocGroup-version AbstractThread::MainThread is fine for ClearOnShutdown().
+  // Non DocGroup-version AbstractThread::MainThread is fine for
+  // ClearOnShutdown().
   AbstractThread::MainThread()->Dispatch(NS_NewRunnableFunction([this] () {
     ClearOnShutdown(this, ShutdownPhase::ShutdownThreads);
   }));
 }
 
 DecoderAllocPolicy::~DecoderAllocPolicy()
 {
   while (!mPromises.empty()) {
@@ -191,18 +191,39 @@ DecoderAllocPolicy::operator=(std::nullp
 
 class MediaFormatReader::DecoderFactory
 {
   using InitPromise = MediaDataDecoder::InitPromise;
   using TokenPromise = DecoderAllocPolicy::Promise;
   using Token = DecoderAllocPolicy::Token;
 
 public:
-  explicit DecoderFactory(MediaFormatReader* aOwner) : mOwner(aOwner) {}
+  explicit DecoderFactory(MediaFormatReader* aOwner) : mOwner(aOwner) { }
   void CreateDecoder(TrackType aTrack);
+  // Shutdown any decoder pending initialization.
+  RefPtr<ShutdownPromise> ShutdownDecoder(TrackType aTrack)
+  {
+    MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack
+               || aTrack == TrackInfo::kVideoTrack);
+    auto& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo;
+    data.mTokenRequest.DisconnectIfExists();
+    data.mInitRequest.DisconnectIfExists();
+    if (!data.mDecoder) {
+      return ShutdownPromise::CreateAndResolve(true, __func__);
+    }
+    if (data.mShutdownRequest.Exists()) {
+      // A shutdown is already in progress due to a prior initialization error,
+      // return the existing promise.
+      data.mShutdownRequest.Disconnect();
+      RefPtr<ShutdownPromise> p = data.mShutdownPromise.forget();
+      return p;
+    }
+    RefPtr<MediaDataDecoder> decoder = data.mDecoder.forget();
+    return decoder->Shutdown();
+  }
 
 private:
   class Wrapper;
 
   enum class Stage : int8_t
   {
     None,
     WaitForToken,
@@ -210,78 +231,77 @@ private:
     WaitForInit
   };
 
   struct Data
   {
     Stage mStage = Stage::None;
     RefPtr<Token> mToken;
     RefPtr<MediaDataDecoder> mDecoder;
-    MozPromiseRequestHolder<TokenPromise> mTokenPromise;
-    MozPromiseRequestHolder<InitPromise> mInitPromise;
-    ~Data()
-    {
-      mTokenPromise.DisconnectIfExists();
-      mInitPromise.DisconnectIfExists();
-      if (mDecoder) {
-        mDecoder->Flush();
-        mDecoder->Shutdown();
-      }
-    }
+    MozPromiseRequestHolder<TokenPromise> mTokenRequest;
+    MozPromiseRequestHolder<InitPromise> mInitRequest;
+    MozPromiseRequestHolder<ShutdownPromise> mShutdownRequest;
+    RefPtr<ShutdownPromise> mShutdownPromise;
   } mAudio, mVideo;
 
   void RunStage(TrackType aTrack);
   MediaResult DoCreateDecoder(TrackType aTrack);
   void DoInitDecoder(TrackType aTrack);
 
   MediaFormatReader* const mOwner; // guaranteed to be valid by the owner.
 };
 
 void
 MediaFormatReader::DecoderFactory::CreateDecoder(TrackType aTrack)
 {
-  MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack ||
-             aTrack == TrackInfo::kVideoTrack);
+  MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack
+             || aTrack == TrackInfo::kVideoTrack);
   RunStage(aTrack);
 }
 
 class MediaFormatReader::DecoderFactory::Wrapper : public MediaDataDecoder
 {
   using Token = DecoderAllocPolicy::Token;
 
 public:
   Wrapper(already_AddRefed<MediaDataDecoder> aDecoder,
           already_AddRefed<Token> aToken)
     : mDecoder(aDecoder), mToken(aToken) {}
 
   RefPtr<InitPromise> Init() override { return mDecoder->Init(); }
-  void Input(MediaRawData* aSample) override { mDecoder->Input(aSample); }
-  void Flush() override { mDecoder->Flush(); }
-  void Drain() override { mDecoder->Drain(); }
+  RefPtr<DecodePromise> Decode(MediaRawData* aSample) override
+  {
+    return mDecoder->Decode(aSample);
+  }
+  RefPtr<DecodePromise> Drain() override { return mDecoder->Drain(); }
+  RefPtr<FlushPromise> Flush() override { return mDecoder->Flush(); }
   bool IsHardwareAccelerated(nsACString& aFailureReason) const override
   {
     return mDecoder->IsHardwareAccelerated(aFailureReason);
   }
   const char* GetDescriptionName() const override
   {
     return mDecoder->GetDescriptionName();
   }
   void SetSeekThreshold(const media::TimeUnit& aTime) override
   {
     mDecoder->SetSeekThreshold(aTime);
   }
   bool SupportDecoderRecycling() const override
   {
     return mDecoder->SupportDecoderRecycling();
   }
-  void Shutdown() override
+  RefPtr<ShutdownPromise> Shutdown() override
   {
-    mDecoder->Shutdown();
-    mDecoder = nullptr;
-    mToken = nullptr;
+    RefPtr<MediaDataDecoder> decoder = mDecoder.forget();
+    RefPtr<Token> token = mToken.forget();
+    return decoder->Shutdown()->Then(
+      AbstractThread::GetCurrent(), __func__,
+      [token]() {},
+      [token]() { MOZ_RELEASE_ASSERT(false, "Can't reach here"); });
   }
 
 private:
   RefPtr<MediaDataDecoder> mDecoder;
   RefPtr<Token> mToken;
 };
 
 void
@@ -290,39 +310,39 @@ MediaFormatReader::DecoderFactory::RunSt
   auto& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo;
 
   switch (data.mStage) {
     case Stage::None: {
       MOZ_ASSERT(!data.mToken);
       DecoderAllocPolicy::Instance(aTrack).Alloc()->Then(
         mOwner->OwnerThread(), __func__,
         [this, &data, aTrack] (Token* aToken) {
-          data.mTokenPromise.Complete();
+          data.mTokenRequest.Complete();
           data.mToken = aToken;
           data.mStage = Stage::CreateDecoder;
           RunStage(aTrack);
         },
         [&data] () {
-          data.mTokenPromise.Complete();
+          data.mTokenRequest.Complete();
           data.mStage = Stage::None;
-        })->Track(data.mTokenPromise);
+        })->Track(data.mTokenRequest);
       data.mStage = Stage::WaitForToken;
       break;
     }
 
     case Stage::WaitForToken: {
       MOZ_ASSERT(!data.mToken);
-      MOZ_ASSERT(data.mTokenPromise.Exists());
+      MOZ_ASSERT(data.mTokenRequest.Exists());
       break;
     }
 
     case Stage::CreateDecoder: {
       MOZ_ASSERT(data.mToken);
       MOZ_ASSERT(!data.mDecoder);
-      MOZ_ASSERT(!data.mInitPromise.Exists());
+      MOZ_ASSERT(!data.mInitRequest.Exists());
 
       MediaResult rv = DoCreateDecoder(aTrack);
       if (NS_FAILED(rv)) {
         NS_WARNING("Error constructing decoders");
         data.mToken = nullptr;
         data.mStage = Stage::None;
         mOwner->NotifyError(aTrack, rv);
         return;
@@ -331,142 +351,162 @@ MediaFormatReader::DecoderFactory::RunSt
       data.mDecoder = new Wrapper(data.mDecoder.forget(), data.mToken.forget());
       DoInitDecoder(aTrack);
       data.mStage = Stage::WaitForInit;
       break;
     }
 
     case Stage::WaitForInit: {
       MOZ_ASSERT(data.mDecoder);
-      MOZ_ASSERT(data.mInitPromise.Exists());
+      MOZ_ASSERT(data.mInitRequest.Exists());
       break;
     }
   }
 }
 
 MediaResult
 MediaFormatReader::DecoderFactory::DoCreateDecoder(TrackType aTrack)
 {
   auto& ownerData = mOwner->GetDecoderData(aTrack);
   auto& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo;
 
   auto decoderCreatingError = "error creating audio decoder";
-  MediaResult result = MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, decoderCreatingError);
+  MediaResult result =
+    MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, decoderCreatingError);
 
   if (!mOwner->mPlatform) {
     mOwner->mPlatform = new PDMFactory();
     if (mOwner->IsEncrypted()) {
       MOZ_ASSERT(mOwner->mCDMProxy);
       mOwner->mPlatform->SetCDMProxy(mOwner->mCDMProxy);
     }
   }
 
   switch (aTrack) {
     case TrackInfo::kAudioTrack: {
       data.mDecoder = mOwner->mPlatform->CreateDecoder({
         ownerData.mInfo
         ? *ownerData.mInfo->GetAsAudioInfo()
         : *ownerData.mOriginalInfo->GetAsAudioInfo(),
         ownerData.mTaskQueue,
-        ownerData.mCallback.get(),
         mOwner->mCrashHelper,
         ownerData.mIsBlankDecode,
-        &result
+        &result,
+        aTrack,
+        &mOwner->OnTrackWaitingForKeyProducer()
       });
       break;
     }
 
     case TrackType::kVideoTrack: {
       // Decoders use the layers backend to decide if they can use hardware decoding,
       // so specify LAYERS_NONE if we want to forcibly disable it.
       data.mDecoder = mOwner->mPlatform->CreateDecoder({
         ownerData.mInfo
         ? *ownerData.mInfo->GetAsVideoInfo()
         : *ownerData.mOriginalInfo->GetAsVideoInfo(),
         ownerData.mTaskQueue,
-        ownerData.mCallback.get(),
         mOwner->mKnowsCompositor,
         mOwner->GetImageContainer(),
         mOwner->mCrashHelper,
         ownerData.mIsBlankDecode,
-        &result
+        &result,
+        aTrack,
+        &mOwner->OnTrackWaitingForKeyProducer()
       });
       break;
     }
 
     default:
       break;
   }
 
   if (data.mDecoder) {
-    result = MediaResult(NS_OK);
-    return result;
+    return NS_OK;
   }
 
   ownerData.mDescription = decoderCreatingError;
   return result;
 }
 
 void
 MediaFormatReader::DecoderFactory::DoInitDecoder(TrackType aTrack)
 {
   auto& ownerData = mOwner->GetDecoderData(aTrack);
   auto& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo;
 
-  data.mDecoder->Init()->Then(
-    mOwner->OwnerThread(), __func__,
-    [this, &data, &ownerData] (TrackType aTrack) {
-      data.mInitPromise.Complete();
-      data.mStage = Stage::None;
-      MutexAutoLock lock(ownerData.mMutex);
-      ownerData.mDecoder = data.mDecoder.forget();
-      ownerData.mDescription = ownerData.mDecoder->GetDescriptionName();
-      mOwner->SetVideoDecodeThreshold();
-      mOwner->ScheduleUpdate(aTrack);
-    },
-    [this, &data, aTrack] (MediaResult aError) {
-      data.mInitPromise.Complete();
-      data.mStage = Stage::None;
-      data.mDecoder->Shutdown();
-      data.mDecoder = nullptr;
-      mOwner->NotifyError(aTrack, aError);
-    })->Track(data.mInitPromise);
+  data.mDecoder->Init()
+    ->Then(mOwner->OwnerThread(), __func__,
+           [this, &data, &ownerData](TrackType aTrack) {
+             data.mInitRequest.Complete();
+             data.mStage = Stage::None;
+             MutexAutoLock lock(ownerData.mMutex);
+             ownerData.mDecoder = data.mDecoder.forget();
+             ownerData.mDescription = ownerData.mDecoder->GetDescriptionName();
+             mOwner->SetVideoDecodeThreshold();
+             mOwner->ScheduleUpdate(aTrack);
+           },
+           [this, &data, &ownerData, aTrack](const MediaResult& aError) {
+             data.mInitRequest.Complete();
+             MOZ_RELEASE_ASSERT(!ownerData.mDecoder,
+                                "Can't have a decoder already set");
+             data.mStage = Stage::None;
+             data.mShutdownPromise = data.mDecoder->Shutdown();
+             data.mShutdownPromise
+               ->Then(
+                 mOwner->OwnerThread(), __func__,
+                 [this, &data, aTrack, aError]() {
+                   data.mShutdownRequest.Complete();
+                   data.mShutdownPromise = nullptr;
+                   data.mDecoder = nullptr;
+                   mOwner->NotifyError(aTrack, aError);
+                 },
+                 []() { MOZ_RELEASE_ASSERT(false, "Can't ever get here"); })
+               ->Track(data.mShutdownRequest);
+           })
+    ->Track(data.mInitRequest);
 }
 
 // DemuxerProxy ensures that the original main demuxer is only ever accessed
 // via its own dedicated task queue.
 // This ensure that the reader's taskqueue will never blocked while a demuxer
 // is itself blocked attempting to access the MediaCache or the MediaResource.
 class MediaFormatReader::DemuxerProxy
 {
   using TrackType = TrackInfo::TrackType;
   class Wrapper;
 
 public:
-  explicit DemuxerProxy(MediaDataDemuxer* aDemuxer, AbstractThread* mainThread)
+  explicit DemuxerProxy(MediaDataDemuxer* aDemuxer, AbstractThread* aMainThread)
     : mTaskQueue(new AutoTaskQueue(
                    GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
-                   mainThread))
+                   aMainThread))
     , mData(new Data(aDemuxer))
   {
     MOZ_COUNT_CTOR(DemuxerProxy);
   }
 
   ~DemuxerProxy()
   {
     MOZ_COUNT_DTOR(DemuxerProxy);
+  }
+
+  RefPtr<ShutdownPromise> Shutdown()
+  {
     mData->mAudioDemuxer = nullptr;
     mData->mVideoDemuxer = nullptr;
     RefPtr<Data> data = mData.forget();
-    mTaskQueue->Dispatch(
+    return InvokeAsync(mTaskQueue, __func__, [data]() {
       // We need to clear our reference to the demuxer now. So that in the event
       // the init promise wasn't resolved, such as what can happen with the
       // mediasource demuxer that is waiting on more data, it will force the
       // init promise to be rejected.
-      NS_NewRunnableFunction([data]() { data->mDemuxer = nullptr; }));
+      data->mDemuxer = nullptr;
+      return ShutdownPromise::CreateAndResolve(true, __func__);
+    });
   }
 
   RefPtr<MediaDataDemuxer::InitPromise> Init();
 
   Wrapper*
   GetTrackDemuxer(TrackType aTrack, uint32_t aTrackNumber)
   {
     MOZ_RELEASE_ASSERT(mData && mData->mInitDone);
@@ -534,17 +574,18 @@ private:
   const RefPtr<AutoTaskQueue> mTaskQueue;
   struct Data
   {
     NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Data)
 
     explicit Data(MediaDataDemuxer* aDemuxer)
       : mInitDone(false)
       , mDemuxer(aDemuxer)
-    { }
+    {
+    }
 
     Atomic<bool> mInitDone;
     // Only ever accessed over mTaskQueue once.
     RefPtr<MediaDataDemuxer> mDemuxer;
     // Only accessed once InitPromise has been resolved and immutable after.
     // So we can safely access them without the use of the mutex.
     uint32_t mNumAudioTrack = 0;
     RefPtr<Wrapper> mAudioDemuxer;
@@ -564,17 +605,18 @@ class MediaFormatReader::DemuxerProxy::W
 {
 public:
   Wrapper(MediaTrackDemuxer* aTrackDemuxer, AutoTaskQueue* aTaskQueue)
     : mMutex("TrackDemuxer Mutex")
     , mTaskQueue(aTaskQueue)
     , mGetSamplesMayBlock(aTrackDemuxer->GetSamplesMayBlock())
     , mInfo(aTrackDemuxer->GetInfo())
     , mTrackDemuxer(aTrackDemuxer)
-  { }
+  {
+  }
 
   UniquePtr<TrackInfo> GetInfo() const override
   {
     if (!mInfo) {
       return nullptr;
     }
     return mInfo->Clone();
   }
@@ -806,80 +848,152 @@ MediaFormatReader::MediaFormatReader(Abs
   MOZ_ASSERT(aDemuxer);
   MOZ_COUNT_CTOR(MediaFormatReader);
 
   if (aDecoder && aDecoder->CompositorUpdatedEvent()) {
     mCompositorUpdatedListener =
       aDecoder->CompositorUpdatedEvent()->Connect(
         mTaskQueue, this, &MediaFormatReader::NotifyCompositorUpdated);
   }
+  mOnTrackWaitingForKeyListener = OnTrackWaitingForKey().Connect(
+    mTaskQueue, this, &MediaFormatReader::NotifyWaitingForKey);
 }
 
 MediaFormatReader::~MediaFormatReader()
 {
   MOZ_COUNT_DTOR(MediaFormatReader);
 }
 
 RefPtr<ShutdownPromise>
 MediaFormatReader::Shutdown()
 {
   MOZ_ASSERT(OnTaskQueue());
-
-  mDecoderFactory = nullptr;
+  LOG("");
+
   mDemuxerInitRequest.DisconnectIfExists();
   mNotifyDataArrivedPromise.DisconnectIfExists();
   mMetadataPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
   mSeekPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
   mSkipRequest.DisconnectIfExists();
 
-  if (mAudio.mDecoder) {
-    Reset(TrackInfo::kAudioTrack);
-    if (mAudio.HasPromise()) {
-      mAudio.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
-    }
-    mAudio.ShutdownDecoder();
+  if (mAudio.HasPromise()) {
+    mAudio.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
   }
-  if (mAudio.mTrackDemuxer) {
+  if (mVideo.HasPromise()) {
+    mVideo.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+  }
+
+  nsTArray<RefPtr<ShutdownPromise>> promises;
+
+  if (HasAudio()) {
     mAudio.ResetDemuxer();
     mAudio.mTrackDemuxer->BreakCycles();
     mAudio.mTrackDemuxer = nullptr;
+    mAudio.ResetState();
+    promises.AppendElement(ShutdownDecoderWithPromise(TrackInfo::kAudioTrack));
   }
+
+  if (HasVideo()) {
+    mVideo.ResetDemuxer();
+    mVideo.mTrackDemuxer->BreakCycles();
+    mVideo.mTrackDemuxer = nullptr;
+    mVideo.ResetState();
+    promises.AppendElement(ShutdownDecoderWithPromise(TrackInfo::kVideoTrack));
+  }
+
+  promises.AppendElement(mDemuxer->Shutdown());
+  mDemuxer = nullptr;
+
+  mCompositorUpdatedListener.DisconnectIfExists();
+  mOnTrackWaitingForKeyListener.Disconnect();
+
+  RefPtr<ShutdownPromise> p = mShutdownPromise.Ensure(__func__);
+  ShutdownPromise::All(OwnerThread(), promises)
+    ->Then(OwnerThread(), __func__, this,
+           &MediaFormatReader::TearDownDecoders,
+           &MediaFormatReader::TearDownDecoders);
+
+  mShutdown = true;
+
+  return p;
+}
+
+RefPtr<ShutdownPromise>
+MediaFormatReader::ShutdownDecoderWithPromise(TrackType aTrack)
+{
+  LOGV("%s", TrackTypeToStr(aTrack));
+
+  auto& decoder = GetDecoderData(aTrack);
+  if (!decoder.mFlushed && decoder.mDecoder) {
+    // The decoder has yet to be flushed.
+    // We always flush the decoder prior to a shutdown to ensure that all the
+    // potentially pending operations on the decoder are completed.
+    decoder.Flush();
+    return decoder.mShutdownPromise.Ensure(__func__);
+  }
+
+  if (decoder.mFlushRequest.Exists() || decoder.mShutdownRequest.Exists()) {
+    // Let the current flush or shutdown operation complete, Flush will continue
+    // shutting down the current decoder now that the shutdown promise is set.
+    return decoder.mShutdownPromise.Ensure(__func__);
+  }
+
+  if (!decoder.mDecoder) {
+    // Shutdown any decoders that may be in the process of being initialized
+    // in the Decoder Factory.
+    // This will be a no-op until we're processing the final decoder shutdown
+    // prior to the MediaFormatReader being shutdown.
+    return mDecoderFactory->ShutdownDecoder(aTrack);
+  }
+
+  // Finally, let's just shut down the currently active decoder.
+  decoder.ShutdownDecoder();
+  return decoder.mShutdownPromise.Ensure(__func__);
+}
+
+void
+MediaFormatReader::ShutdownDecoder(TrackType aTrack)
+{
+  LOG("%s", TrackTypeToStr(aTrack));
+  auto& decoder = GetDecoderData(aTrack);
+  if (!decoder.mDecoder) {
+    LOGV("Already shut down");
+    return;
+  }
+  if (!decoder.mShutdownPromise.IsEmpty()) {
+    LOGV("Shutdown already in progress");
+    return;
+  }
+  Unused << ShutdownDecoderWithPromise(aTrack);
+}
+
+void
+MediaFormatReader::TearDownDecoders()
+{
   if (mAudio.mTaskQueue) {
     mAudio.mTaskQueue->BeginShutdown();
     mAudio.mTaskQueue->AwaitShutdownAndIdle();
     mAudio.mTaskQueue = nullptr;
   }
-  MOZ_ASSERT(!mAudio.HasPromise());
-
-  if (mVideo.mDecoder) {
-    Reset(TrackInfo::kVideoTrack);
-    if (mVideo.HasPromise()) {
-      mVideo.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
-    }
-    mVideo.ShutdownDecoder();
-  }
-  if (mVideo.mTrackDemuxer) {
-    mVideo.ResetDemuxer();
-    mVideo.mTrackDemuxer->BreakCycles();
-    mVideo.mTrackDemuxer = nullptr;
-  }
   if (mVideo.mTaskQueue) {
     mVideo.mTaskQueue->BeginShutdown();
     mVideo.mTaskQueue->AwaitShutdownAndIdle();
     mVideo.mTaskQueue = nullptr;
   }
-  MOZ_ASSERT(!mVideo.HasPromise());
-
-  mDemuxer = nullptr;
+
+  mDecoderFactory = nullptr;
   mPlatform = nullptr;
   mVideoFrameContainer = nullptr;
 
-  mCompositorUpdatedListener.DisconnectIfExists();
-
-  return MediaDecoderReader::Shutdown();
+  if (mShutdownPromise.IsEmpty()) {
+    return;
+  }
+
+  MediaDecoderReader::Shutdown();
+  mShutdownPromise.Resolve(true, __func__);
 }
 
 void
 MediaFormatReader::InitLayersBackendType()
 {
   // Extract the layer manager backend type so that platform decoders
   // can determine whether it's worthwhile using hardware accelerated
   // video decoding.
@@ -917,27 +1031,29 @@ MediaFormatReader::InitInternal()
   if (mDecoder) {
     // Note: GMPCrashHelper must be created on main thread, as it may use
     // weak references, which aren't threadsafe.
     mCrashHelper = mDecoder->GetCrashHelper();
   }
   return NS_OK;
 }
 
-class DispatchKeyNeededEvent : public Runnable {
+class DispatchKeyNeededEvent : public Runnable
+{
 public:
   DispatchKeyNeededEvent(AbstractMediaDecoder* aDecoder,
                          nsTArray<uint8_t>& aInitData,
                          const nsString& aInitDataType)
     : mDecoder(aDecoder)
     , mInitData(aInitData)
     , mInitDataType(aInitDataType)
   {
   }
-  NS_IMETHOD Run() override {
+  NS_IMETHOD Run() override
+  {
     // Note: Null check the owner, as the decoder could have been shutdown
     // since this event was dispatched.
     MediaDecoderOwner* owner = mDecoder->GetOwner();
     if (owner) {
       owner->DispatchEncrypted(mInitData, mInitDataType);
     }
     mDecoder = nullptr;
     return NS_OK;
@@ -956,17 +1072,18 @@ MediaFormatReader::SetCDMProxy(CDMProxy*
   nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([=] () {
     MOZ_ASSERT(self->OnTaskQueue());
     self->mCDMProxy = proxy;
   });
   OwnerThread()->Dispatch(r.forget());
 }
 
 bool
-MediaFormatReader::IsWaitingOnCDMResource() {
+MediaFormatReader::IsWaitingOnCDMResource()
+{
   MOZ_ASSERT(OnTaskQueue());
   return IsEncrypted() && !mCDMProxy;
 }
 
 RefPtr<MediaDecoderReader::MetadataPromise>
 MediaFormatReader::AsyncReadMetadata()
 {
   MOZ_ASSERT(OnTaskQueue());
@@ -1002,41 +1119,41 @@ MediaFormatReader::OnDemuxerInitDone(nsr
   UniquePtr<MetadataTags> tags(MakeUnique<MetadataTags>());
 
   RefPtr<PDMFactory> platform;
   if (!IsWaitingOnCDMResource()) {
     platform = new PDMFactory();
   }
 
   // To decode, we need valid video and a place to put it.
-  bool videoActive = !!mDemuxer->GetNumberTracks(TrackInfo::kVideoTrack) &&
-    GetImageContainer();
+  bool videoActive =
+    !!mDemuxer->GetNumberTracks(TrackInfo::kVideoTrack) && GetImageContainer();
 
   if (videoActive) {
     // We currently only handle the first video track.
     mVideo.mTrackDemuxer = mDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0);
     if (!mVideo.mTrackDemuxer) {
       mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
       return;
     }
 
     UniquePtr<TrackInfo> videoInfo = mVideo.mTrackDemuxer->GetInfo();
     videoActive = videoInfo && videoInfo->IsValid();
     if (videoActive) {
-      if (platform && !platform->SupportsMimeType(videoInfo->mMimeType, nullptr)) {
+      if (platform
+          && !platform->SupportsMimeType(videoInfo->mMimeType, nullptr)) {
         // We have no decoder for this track. Error.
         mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
         return;
       }
       mInfo.mVideo = *videoInfo->GetAsVideoInfo();
       for (const MetadataTag& tag : videoInfo->mTags) {
         tags->Put(tag.mKey, tag.mValue);
       }
       mVideo.mOriginalInfo = Move(videoInfo);
-      mVideo.mCallback = new DecoderCallback(this, TrackInfo::kVideoTrack);
       mTrackDemuxersMayBlock |= mVideo.mTrackDemuxer->GetSamplesMayBlock();
     } else {
       mVideo.mTrackDemuxer->BreakCycles();
       mVideo.mTrackDemuxer = nullptr;
     }
   }
 
   bool audioActive = !!mDemuxer->GetNumberTracks(TrackInfo::kAudioTrack);
@@ -1044,40 +1161,42 @@ MediaFormatReader::OnDemuxerInitDone(nsr
     mAudio.mTrackDemuxer = mDemuxer->GetTrackDemuxer(TrackInfo::kAudioTrack, 0);
     if (!mAudio.mTrackDemuxer) {
       mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
       return;
     }
 
     UniquePtr<TrackInfo> audioInfo = mAudio.mTrackDemuxer->GetInfo();
     // We actively ignore audio tracks that we know we can't play.
-    audioActive = audioInfo && audioInfo->IsValid() &&
-                  (!platform ||
-                   platform->SupportsMimeType(audioInfo->mMimeType, nullptr));
+    audioActive =
+      audioInfo
+      && audioInfo->IsValid()
+      && (!platform || platform->SupportsMimeType(audioInfo->mMimeType,
+                                                  nullptr));
 
     if (audioActive) {
       mInfo.mAudio = *audioInfo->GetAsAudioInfo();
       for (const MetadataTag& tag : audioInfo->mTags) {
         tags->Put(tag.mKey, tag.mValue);
       }
       mAudio.mOriginalInfo = Move(audioInfo);
-      mAudio.mCallback = new DecoderCallback(this, TrackInfo::kAudioTrack);
       mTrackDemuxersMayBlock |= mAudio.mTrackDemuxer->GetSamplesMayBlock();
     } else {
       mAudio.mTrackDemuxer->BreakCycles();
       mAudio.mTrackDemuxer = nullptr;
     }
   }
 
   UniquePtr<EncryptionInfo> crypto = mDemuxer->GetCrypto();
   if (mDecoder && crypto && crypto->IsEncrypted()) {
     // Try and dispatch 'encrypted'. Won't go if ready state still HAVE_NOTHING.
     for (uint32_t i = 0; i < crypto->mInitDatas.Length(); i++) {
       NS_DispatchToMainThread(
-        new DispatchKeyNeededEvent(mDecoder, crypto->mInitDatas[i].mInitData, crypto->mInitDatas[i].mType));
+        new DispatchKeyNeededEvent(mDecoder, crypto->mInitDatas[i].mInitData,
+                                   crypto->mInitDatas[i].mType));
     }
     mInfo.mCrypto = *crypto;
   }
 
   int64_t videoDuration = HasVideo() ? mInfo.mVideo.mDuration : 0;
   int64_t audioDuration = HasAudio() ? mInfo.mAudio.mDuration : 0;
 
   int64_t duration = std::max(videoDuration, audioDuration);
@@ -1117,18 +1236,18 @@ MediaFormatReader::OnDemuxerInitDone(nsr
   MaybeResolveMetadataPromise();
 }
 
 void
 MediaFormatReader::MaybeResolveMetadataPromise()
 {
   MOZ_ASSERT(OnTaskQueue());
 
-  if ((HasAudio() && mAudio.mFirstDemuxedSampleTime.isNothing()) ||
-      (HasVideo() && mVideo.mFirstDemuxedSampleTime.isNothing())) {
+  if ((HasAudio() && mAudio.mFirstDemuxedSampleTime.isNothing())
+      || (HasVideo() && mVideo.mFirstDemuxedSampleTime.isNothing())) {
     return;
   }
 
   TimeUnit startTime =
     std::min(mAudio.mFirstDemuxedSampleTime.refOr(TimeUnit::FromInfinity()),
              mVideo.mFirstDemuxedSampleTime.refOr(TimeUnit::FromInfinity()));
 
   if (!startTime.IsInfinite()) {
@@ -1145,18 +1264,18 @@ MediaFormatReader::MaybeResolveMetadataP
   UpdateBuffered();
 
   mMetadataPromise.Resolve(metadata, __func__);
 }
 
 bool
 MediaFormatReader::IsEncrypted() const
 {
-  return (HasAudio() && mInfo.mAudio.mCrypto.mValid) ||
-         (HasVideo() && mInfo.mVideo.mCrypto.mValid);
+  return (HasAudio() && mInfo.mAudio.mCrypto.mValid)
+         || (HasVideo() && mInfo.mVideo.mCrypto.mValid);
 }
 
 void
 MediaFormatReader::OnDemuxerInitFailed(const MediaResult& aError)
 {
   mDemuxerInitRequest.Complete();
   mMetadataPromise.Reject(aError, __func__);
 }
@@ -1165,71 +1284,78 @@ void
 MediaFormatReader::ReadUpdatedMetadata(MediaInfo* aInfo)
 {
   *aInfo = mInfo;
 }
 
 MediaFormatReader::DecoderData&
 MediaFormatReader::GetDecoderData(TrackType aTrack)
 {
-  MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack ||
-             aTrack == TrackInfo::kVideoTrack);
+  MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack
+             || aTrack == TrackInfo::kVideoTrack);
   if (aTrack == TrackInfo::kAudioTrack) {
     return mAudio;
   }
   return mVideo;
 }
 
 bool
-MediaFormatReader::ShouldSkip(bool aSkipToNextKeyframe, media::TimeUnit aTimeThreshold)
+MediaFormatReader::ShouldSkip(bool aSkipToNextKeyframe,
+                              media::TimeUnit aTimeThreshold)
 {
   MOZ_ASSERT(HasVideo());
   media::TimeUnit nextKeyframe;
   nsresult rv = mVideo.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe);
   if (NS_FAILED(rv)) {
     return aSkipToNextKeyframe;
   }
   return (nextKeyframe < aTimeThreshold ||
-          (mVideo.mTimeThreshold &&
-           mVideo.mTimeThreshold.ref().EndTime() < aTimeThreshold)) &&
-         nextKeyframe.ToMicroseconds() >= 0 && !nextKeyframe.IsInfinite();
+          (mVideo.mTimeThreshold
+           && mVideo.mTimeThreshold.ref().EndTime() < aTimeThreshold))
+         && nextKeyframe.ToMicroseconds() >= 0
+         && !nextKeyframe.IsInfinite();
 }
 
 RefPtr<MediaDecoderReader::MediaDataPromise>
 MediaFormatReader::RequestVideoData(bool aSkipToNextKeyframe,
                                     int64_t aTimeThreshold)
 {
   MOZ_ASSERT(OnTaskQueue());
-  MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(), "No sample requests allowed while seeking");
+  MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(),
+                        "No sample requests allowed while seeking");
   MOZ_DIAGNOSTIC_ASSERT(!mVideo.HasPromise(), "No duplicate sample requests");
-  MOZ_DIAGNOSTIC_ASSERT(!mVideo.mSeekRequest.Exists() ||
-                        mVideo.mTimeThreshold.isSome());
+  MOZ_DIAGNOSTIC_ASSERT(!mVideo.mSeekRequest.Exists()
+                        || mVideo.mTimeThreshold.isSome());
   MOZ_DIAGNOSTIC_ASSERT(!IsSeeking(), "called mid-seek");
   LOGV("RequestVideoData(%d, %lld)", aSkipToNextKeyframe, aTimeThreshold);
 
   if (!HasVideo()) {
     LOG("called with no video track");
-    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+                                             __func__);
   }
 
   if (IsSeeking()) {
     LOG("called mid-seek. Rejecting.");
-    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
+                                             __func__);
   }
 
   if (mShutdown) {
     NS_WARNING("RequestVideoData on shutdown MediaFormatReader!");
-    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
+                                             __func__);
   }
 
-  media::TimeUnit timeThreshold{media::TimeUnit::FromMicroseconds(aTimeThreshold)};
+  media::TimeUnit timeThreshold{ media::TimeUnit::FromMicroseconds(
+    aTimeThreshold) };
   // Ensure we have no pending seek going as ShouldSkip could return out of date
   // information.
-  if (!mVideo.HasInternalSeekPending() &&
-      ShouldSkip(aSkipToNextKeyframe, timeThreshold)) {
+  if (!mVideo.HasInternalSeekPending()
+      && ShouldSkip(aSkipToNextKeyframe, timeThreshold)) {
     RefPtr<MediaDataPromise> p = mVideo.EnsurePromise(__func__);
     SkipVideoDemuxToNextKeyFrame(timeThreshold);
     return p;
   }
 
   RefPtr<MediaDataPromise> p = mVideo.EnsurePromise(__func__);
   ScheduleUpdate(TrackInfo::kVideoTrack);
 
@@ -1286,52 +1412,58 @@ MediaFormatReader::DoDemuxVideo()
 
   p->Then(OwnerThread(), __func__, this,
           &MediaFormatReader::OnVideoDemuxCompleted,
           &MediaFormatReader::OnVideoDemuxFailed)
    ->Track(mVideo.mDemuxRequest);
 }
 
 void
-MediaFormatReader::OnVideoDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
+MediaFormatReader::OnVideoDemuxCompleted(
+  RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
 {
   LOGV("%d video samples demuxed (sid:%d)",
        aSamples->mSamples.Length(),
-       aSamples->mSamples[0]->mTrackInfo ? aSamples->mSamples[0]->mTrackInfo->GetID() : 0);
+       aSamples->mSamples[0]->mTrackInfo
+       ? aSamples->mSamples[0]->mTrackInfo->GetID()
+       : 0);
   mVideo.mDemuxRequest.Complete();
   mVideo.mQueuedSamples.AppendElements(aSamples->mSamples);
   ScheduleUpdate(TrackInfo::kVideoTrack);
 }
 
 RefPtr<MediaDecoderReader::MediaDataPromise>
 MediaFormatReader::RequestAudioData()
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_DIAGNOSTIC_ASSERT(!mAudio.HasPromise(), "No duplicate sample requests");
   MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() || mSeekPromise.IsEmpty(),
                         "No sample requests allowed while seeking");
-  MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() ||
-                        !mAudio.mSeekRequest.Exists() ||
-                        mAudio.mTimeThreshold.isSome());
+  MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking()
+                        || !mAudio.mSeekRequest.Exists()
+                        || mAudio.mTimeThreshold.isSome());
   MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() || !IsSeeking(), "called mid-seek");
   LOGV("");
 
   if (!HasAudio()) {
     LOG("called with no audio track");
-    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+                                             __func__);
   }
 
   if (IsSeeking()) {
     LOG("called mid-seek. Rejecting.");
-    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
+                                             __func__);
   }
 
   if (mShutdown) {
     NS_WARNING("RequestAudioData on shutdown MediaFormatReader!");
-    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+    return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
+                                             __func__);
   }
 
   RefPtr<MediaDataPromise> p = mAudio.EnsurePromise(__func__);
   ScheduleUpdate(TrackInfo::kAudioTrack);
 
   return p;
 }
 
@@ -1353,63 +1485,52 @@ MediaFormatReader::DoDemuxAudio()
 
   p->Then(OwnerThread(), __func__, this,
           &MediaFormatReader::OnAudioDemuxCompleted,
           &MediaFormatReader::OnAudioDemuxFailed)
    ->Track(mAudio.mDemuxRequest);
 }
 
 void
-MediaFormatReader::OnAudioDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
+MediaFormatReader::OnAudioDemuxCompleted(
+  RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
 {
   LOGV("%d audio samples demuxed (sid:%d)",
        aSamples->mSamples.Length(),
-       aSamples->mSamples[0]->mTrackInfo ? aSamples->mSamples[0]->mTrackInfo->GetID() : 0);
+       aSamples->mSamples[0]->mTrackInfo
+       ? aSamples->mSamples[0]->mTrackInfo->GetID()
+       : 0);
   mAudio.mDemuxRequest.Complete();
   mAudio.mQueuedSamples.AppendElements(aSamples->mSamples);
   ScheduleUpdate(TrackInfo::kAudioTrack);
 }
 
 void
-MediaFormatReader::NotifyNewOutput(TrackType aTrack, MediaData* aSample)
+MediaFormatReader::NotifyNewOutput(
+  TrackType aTrack, const MediaDataDecoder::DecodedData& aResults)
 {
   MOZ_ASSERT(OnTaskQueue());
-  LOGV("Received new %s sample time:%lld duration:%lld",
-       TrackTypeToStr(aTrack), aSample->mTime, aSample->mDuration);
   auto& decoder = GetDecoderData(aTrack);
-  if (!decoder.mOutputRequested) {
-    LOG("MediaFormatReader produced output while flushing, discarding.");
-    return;
+  for (auto& sample : aResults) {
+    LOGV("Received new %s sample time:%lld duration:%lld",
+        TrackTypeToStr(aTrack), sample->mTime, sample->mDuration);
+    decoder.mOutput.AppendElement(sample);
+    decoder.mNumSamplesOutput++;
+    decoder.mNumOfConsecutiveError = 0;
   }
-  decoder.mOutput.AppendElement(aSample);
-  decoder.mNumSamplesOutput++;
-  decoder.mNumOfConsecutiveError = 0;
-  ScheduleUpdate(aTrack);
-}
-
-void
-MediaFormatReader::NotifyInputExhausted(TrackType aTrack)
-{
-  MOZ_ASSERT(OnTaskQueue());
-  LOGV("Decoder has requested more %s data", TrackTypeToStr(aTrack));
-  auto& decoder = GetDecoderData(aTrack);
-  decoder.mDecodePending = false;
+  LOG("Done processing new %s samples", TrackTypeToStr(aTrack));
   ScheduleUpdate(aTrack);
 }
 
 void
 MediaFormatReader::NotifyDrainComplete(TrackType aTrack)
 {
   MOZ_ASSERT(OnTaskQueue());
   auto& decoder = GetDecoderData(aTrack);
   LOG("%s", TrackTypeToStr(aTrack));
-  if (!decoder.mOutputRequested) {
-    LOG("MediaFormatReader called DrainComplete() before flushing, ignoring.");
-    return;
-  }
   decoder.mDrainComplete = true;
   ScheduleUpdate(aTrack);
 }
 
 void
 MediaFormatReader::NotifyError(TrackType aTrack, const MediaResult& aError)
 {
   MOZ_ASSERT(OnTaskQueue());
@@ -1435,17 +1556,17 @@ MediaFormatReader::NotifyWaitingForData(
 void
 MediaFormatReader::NotifyWaitingForKey(TrackType aTrack)
 {
   MOZ_ASSERT(OnTaskQueue());
   auto& decoder = GetDecoderData(aTrack);
   if (mDecoder) {
     mDecoder->NotifyWaitingForKey();
   }
-  if (!decoder.mDecodePending) {
+  if (!decoder.mDecodeRequest.Exists()) {
     LOGV("WaitingForKey received while no pending decode. Ignoring");
   }
   decoder.mWaitingForKey = true;
   ScheduleUpdate(aTrack);
 }
 
 void
 MediaFormatReader::NotifyEndOfStream(TrackType aTrack)
@@ -1454,28 +1575,26 @@ MediaFormatReader::NotifyEndOfStream(Tra
   auto& decoder = GetDecoderData(aTrack);
   decoder.mDemuxEOS = true;
   ScheduleUpdate(aTrack);
 }
 
 bool
 MediaFormatReader::NeedInput(DecoderData& aDecoder)
 {
-  // To account for H.264 streams which may require a longer
-  // run of input than we input, decoders fire an "input exhausted" callback.
-  // The decoder will not be fed a new raw sample until InputExhausted
-  // has been called.
+  // The decoder will not be fed a new raw sample until the current decoding
+  // requests has completed.
   return
-    (aDecoder.HasPromise() || aDecoder.mTimeThreshold.isSome()) &&
-    !aDecoder.HasPendingDrain() &&
-    !aDecoder.HasFatalError() &&
-    !aDecoder.mDemuxRequest.Exists() &&
-    !aDecoder.mOutput.Length() &&
-    !aDecoder.HasInternalSeekPending() &&
-    !aDecoder.mDecodePending;
+    (aDecoder.HasPromise() || aDecoder.mTimeThreshold.isSome())
+    && !aDecoder.HasPendingDrain()
+    && !aDecoder.HasFatalError()
+    && !aDecoder.mDemuxRequest.Exists()
+    && !aDecoder.mOutput.Length()
+    && !aDecoder.HasInternalSeekPending()
+    && !aDecoder.mDecodeRequest.Exists();
 }
 
 void
 MediaFormatReader::ScheduleUpdate(TrackType aTrack)
 {
   MOZ_ASSERT(OnTaskQueue());
   if (mShutdown) {
     return;
@@ -1533,27 +1652,28 @@ MediaFormatReader::UpdateReceivedNewData
     decoder.mTimeThreshold.ref().mWaiting = false;
   }
   decoder.mWaitingForData = false;
 
   if (decoder.HasFatalError()) {
     return false;
   }
 
-  if (!mSeekPromise.IsEmpty() &&
-      (!IsVideoSeeking() || aTrack == TrackInfo::kVideoTrack)) {
+  if (!mSeekPromise.IsEmpty()
+      && (!IsVideoSeeking() || aTrack == TrackInfo::kVideoTrack)) {
     MOZ_ASSERT(!decoder.HasPromise());
-    MOZ_DIAGNOSTIC_ASSERT((IsVideoSeeking() || !mAudio.mTimeThreshold) &&
-                          !mVideo.mTimeThreshold,
-                          "InternalSeek must have been aborted when Seek was first called");
-    MOZ_DIAGNOSTIC_ASSERT((IsVideoSeeking() || !mAudio.HasWaitingPromise()) &&
-                          !mVideo.HasWaitingPromise(),
-                          "Waiting promises must have been rejected when Seek was first called");
-    if (mVideo.mSeekRequest.Exists() ||
-        (!IsVideoSeeking() && mAudio.mSeekRequest.Exists())) {
+    MOZ_DIAGNOSTIC_ASSERT(
+      (IsVideoSeeking() || !mAudio.mTimeThreshold) && !mVideo.mTimeThreshold,
+      "InternalSeek must have been aborted when Seek was first called");
+    MOZ_DIAGNOSTIC_ASSERT(
+      (IsVideoSeeking() || !mAudio.HasWaitingPromise())
+      && !mVideo.HasWaitingPromise(),
+      "Waiting promises must have been rejected when Seek was first called");
+    if (mVideo.mSeekRequest.Exists()
+        || (!IsVideoSeeking() && mAudio.mSeekRequest.Exists())) {
       // Already waiting for a seek to complete. Nothing more to do.
       return true;
     }
     LOG("Attempting Seek");
     ScheduleSeek();
     return true;
   }
   if (decoder.HasInternalSeekPending() || decoder.HasWaitingPromise()) {
@@ -1599,53 +1719,64 @@ MediaFormatReader::RequestDemuxSamples(T
 }
 
 void
 MediaFormatReader::DecodeDemuxedSamples(TrackType aTrack,
                                         MediaRawData* aSample)
 {
   MOZ_ASSERT(OnTaskQueue());
   auto& decoder = GetDecoderData(aTrack);
-  decoder.mDecoder->Input(aSample);
-  decoder.mDecodePending = true;
+  RefPtr<MediaFormatReader> self = this;
+  decoder.mFlushed = false;
+  decoder.mDecoder->Decode(aSample)
+    ->Then(mTaskQueue, __func__,
+           [self, this, aTrack, &decoder]
+           (const MediaDataDecoder::DecodedData& aResults) {
+             decoder.mDecodeRequest.Complete();
+             NotifyNewOutput(aTrack, aResults);
+           },
+           [self, this, aTrack, &decoder](const MediaResult& aError) {
+             decoder.mDecodeRequest.Complete();
+             NotifyError(aTrack, aError);
+           })
+    ->Track(decoder.mDecodeRequest);
 }
 
 void
-MediaFormatReader::HandleDemuxedSamples(TrackType aTrack,
-                                        AbstractMediaDecoder::AutoNotifyDecoded& aA)
+MediaFormatReader::HandleDemuxedSamples(
+  TrackType aTrack, AbstractMediaDecoder::AutoNotifyDecoded& aA)
 {
   MOZ_ASSERT(OnTaskQueue());
 
   auto& decoder = GetDecoderData(aTrack);
 
+  if (decoder.mFlushRequest.Exists() || decoder.mShutdownRequest.Exists()) {
+    LOGV("Decoder operation in progress, let it complete.");
+    return;
+  }
+
   if (decoder.mQueuedSamples.IsEmpty()) {
     return;
   }
 
   if (!decoder.mDecoder) {
     mDecoderFactory->CreateDecoder(aTrack);
     return;
   }
 
   LOGV("Giving %s input to decoder", TrackTypeToStr(aTrack));
 
   // Decode all our demuxed frames.
-  bool samplesPending = false;
   while (decoder.mQueuedSamples.Length()) {
     RefPtr<MediaRawData> sample = decoder.mQueuedSamples[0];
     RefPtr<SharedTrackInfo> info = sample->mTrackInfo;
 
     if (info && decoder.mLastStreamSourceID != info->GetID()) {
-      if (samplesPending) {
-        // Let existing samples complete their decoding. We'll resume later.
-        return;
-      }
-
-      bool supportRecycling = MediaPrefs::MediaDecoderCheckRecycling() &&
-                              decoder.mDecoder->SupportDecoderRecycling();
+      bool supportRecycling = MediaPrefs::MediaDecoderCheckRecycling()
+                              && decoder.mDecoder->SupportDecoderRecycling();
       if (decoder.mNextStreamSourceID.isNothing() ||
           decoder.mNextStreamSourceID.ref() != info->GetID()) {
         if (!supportRecycling) {
           LOG("%s stream id has changed from:%d to:%d, draining decoder.",
             TrackTypeToStr(aTrack), decoder.mLastStreamSourceID,
             info->GetID());
           decoder.mNeedDraining = true;
           decoder.mNextStreamSourceID = Some(info->GetID());
@@ -1654,28 +1785,29 @@ MediaFormatReader::HandleDemuxedSamples(
         }
       }
 
       LOG("%s stream id has changed from:%d to:%d.",
           TrackTypeToStr(aTrack), decoder.mLastStreamSourceID,
           info->GetID());
       decoder.mLastStreamSourceID = info->GetID();
       decoder.mNextStreamSourceID.reset();
+      decoder.mInfo = info;
+
       if (!supportRecycling) {
         LOG("Decoder does not support recycling, recreate decoder.");
-        // Reset will clear our array of queued samples. So make a copy now.
-        nsTArray<RefPtr<MediaRawData>> samples{decoder.mQueuedSamples};
-        Reset(aTrack);
-        decoder.ShutdownDecoder();
+        // If flushing is required, it will clear our array of queued samples.
+        // So make a copy now.
+        nsTArray<RefPtr<MediaRawData>> samples{ Move(decoder.mQueuedSamples) };
+        ShutdownDecoder(aTrack);
         if (sample->mKeyframe) {
           decoder.mQueuedSamples.AppendElements(Move(samples));
         }
       }
 
-      decoder.mInfo = info;
       if (sample->mKeyframe) {
         ScheduleUpdate(aTrack);
       } else {
         TimeInterval time =
           TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
                        TimeUnit::FromMicroseconds(sample->GetEndTime()));
         InternalSeekTarget seekTarget =
           decoder.mTimeThreshold.refOr(InternalSeekTarget(time, false));
@@ -1683,49 +1815,50 @@ MediaFormatReader::HandleDemuxedSamples(
             sample->mTime);
         InternalSeek(aTrack, seekTarget);
       }
       return;
     }
 
     LOGV("Input:%lld (dts:%lld kf:%d)",
          sample->mTime, sample->mTimecode, sample->mKeyframe);
-    decoder.mOutputRequested = true;
     decoder.mNumSamplesInput++;
     decoder.mSizeOfQueue++;
     if (aTrack == TrackInfo::kVideoTrack) {
       aA.mStats.mParsedFrames++;
     }
 
     DecodeDemuxedSamples(aTrack, sample);
 
     decoder.mQueuedSamples.RemoveElementAt(0);
-    samplesPending = true;
+    break;
   }
 }
 
 void
-MediaFormatReader::InternalSeek(TrackType aTrack, const InternalSeekTarget& aTarget)
+MediaFormatReader::InternalSeek(TrackType aTrack,
+                                const InternalSeekTarget& aTarget)
 {
   MOZ_ASSERT(OnTaskQueue());
   LOG("%s internal seek to %f",
       TrackTypeToStr(aTrack), aTarget.Time().ToSeconds());
 
   auto& decoder = GetDecoderData(aTrack);
   decoder.Flush();
   decoder.ResetDemuxer();
   decoder.mTimeThreshold = Some(aTarget);
   RefPtr<MediaFormatReader> self = this;
   decoder.mTrackDemuxer->Seek(decoder.mTimeThreshold.ref().Time())
     ->Then(OwnerThread(), __func__,
            [self, aTrack] (media::TimeUnit aTime) {
              auto& decoder = self->GetDecoderData(aTrack);
              decoder.mSeekRequest.Complete();
-             MOZ_ASSERT(decoder.mTimeThreshold,
-                        "Seek promise must be disconnected when timethreshold is reset");
+             MOZ_ASSERT(
+               decoder.mTimeThreshold,
+               "Seek promise must be disconnected when timethreshold is reset");
              decoder.mTimeThreshold.ref().mHasSeeked = true;
              self->SetVideoDecodeThreshold();
              self->ScheduleUpdate(aTrack);
            },
            [self, aTrack] (const MediaResult& aError) {
              auto& decoder = self->GetDecoderData(aTrack);
              decoder.mSeekRequest.Complete();
              switch (aError.Code()) {
@@ -1753,27 +1886,38 @@ MediaFormatReader::DrainDecoder(TrackTyp
 {
   MOZ_ASSERT(OnTaskQueue());
 
   auto& decoder = GetDecoderData(aTrack);
   if (!decoder.mNeedDraining || decoder.mDraining) {
     return;
   }
   decoder.mNeedDraining = false;
-  // mOutputRequest must be set, otherwise NotifyDrainComplete()
-  // may reject the drain if a Flush recently occurred.
-  decoder.mOutputRequested = true;
   if (!decoder.mDecoder ||
       decoder.mNumSamplesInput == decoder.mNumSamplesOutput) {
     // No frames to drain.
+    LOGV("Draining %s with nothing to drain", TrackTypeToStr(aTrack));
     NotifyDrainComplete(aTrack);
     return;
   }
-  decoder.mDecoder->Drain();
   decoder.mDraining = true;
+  RefPtr<MediaFormatReader> self = this;
+  decoder.mDecoder->Drain()
+    ->Then(mTaskQueue, __func__,
+           [self, this, aTrack, &decoder]
+           (const MediaDataDecoder::DecodedData& aResults) {
+             decoder.mDrainRequest.Complete();
+             NotifyNewOutput(aTrack, aResults);
+             NotifyDrainComplete(aTrack);
+           },
+           [self, this, aTrack, &decoder](const MediaResult& aError) {
+             decoder.mDrainRequest.Complete();
+             NotifyError(aTrack, aError);
+           })
+    ->Track(decoder.mDrainRequest);
   LOG("Requesting %s decoder to drain", TrackTypeToStr(aTrack));
 }
 
 void
 MediaFormatReader::Update(TrackType aTrack)
 {
   MOZ_ASSERT(OnTaskQueue());
 
@@ -1801,20 +1945,20 @@ MediaFormatReader::Update(TrackType aTra
     return;
   }
 
   if (decoder.mSeekRequest.Exists()) {
     LOGV("Seeking hasn't completed, nothing more to do");
     return;
   }
 
-  MOZ_DIAGNOSTIC_ASSERT(!decoder.HasInternalSeekPending() ||
-                        (!decoder.mOutput.Length() &&
-                         !decoder.mQueuedSamples.Length()),
-                        "No frames can be demuxed or decoded while an internal seek is pending");
+  MOZ_DIAGNOSTIC_ASSERT(
+    !decoder.HasInternalSeekPending()
+    || (!decoder.mOutput.Length() && !decoder.mQueuedSamples.Length()),
+    "No frames can be demuxed or decoded while an internal seek is pending");
 
   // Record number of frames decoded and parsed. Automatically update the
   // stats counters using the AutoNotifyDecoded stack-based class.
   AbstractMediaDecoder::AutoNotifyDecoded a(mDecoder);
 
   // Drop any frames found prior our internal seek target.
   while (decoder.mTimeThreshold && decoder.mOutput.Length()) {
     RefPtr<MediaData>& output = decoder.mOutput[0];
@@ -1832,17 +1976,18 @@ MediaFormatReader::Update(TrackType aTra
            media::TimeUnit::FromMicroseconds(output->mTime).ToSeconds(),
            target.Time().ToSeconds(),
            output->mKeyframe);
       decoder.mOutput.RemoveElementAt(0);
       decoder.mSizeOfQueue -= 1;
     }
   }
 
-  while (decoder.mOutput.Length() && decoder.mOutput[0]->mType == MediaData::NULL_DATA) {
+  while (decoder.mOutput.Length()
+         && decoder.mOutput[0]->mType == MediaData::NULL_DATA) {
     LOGV("Dropping null data. Time: %lld", decoder.mOutput[0]->mTime);
     decoder.mOutput.RemoveElementAt(0);
     decoder.mSizeOfQueue -= 1;
   }
 
   if (decoder.HasPromise()) {
     needOutput = true;
     if (decoder.mOutput.Length()) {
@@ -1858,17 +2003,18 @@ MediaFormatReader::Update(TrackType aTra
       if (aTrack == TrackType::kVideoTrack) {
         uint64_t delta =
           decoder.mNumSamplesOutputTotal - mLastReportedNumDecodedFrames;
         a.mStats.mDecodedFrames = static_cast<uint32_t>(delta);
         mLastReportedNumDecodedFrames = decoder.mNumSamplesOutputTotal;
         if (output->mKeyframe) {
           if (mPreviousDecodedKeyframeTime_us < output->mTime) {
             // There is a previous keyframe -> Record inter-keyframe stats.
-            uint64_t segment_us = output->mTime - mPreviousDecodedKeyframeTime_us;
+            uint64_t segment_us =
+              output->mTime - mPreviousDecodedKeyframeTime_us;
             a.mStats.mInterKeyframeSum_us += segment_us;
             a.mStats.mInterKeyframeCount += 1;
             if (a.mStats.mInterKeyFrameMax_us < segment_us) {
               a.mStats.mInterKeyFrameMax_us = segment_us;
             }
           }
           mPreviousDecodedKeyframeTime_us = output->mTime;
         }
@@ -1890,17 +2036,18 @@ MediaFormatReader::Update(TrackType aTra
       } else if (decoder.mWaitingForData) {
         if (wasDraining && decoder.mLastSampleTime &&
             !decoder.mNextStreamSourceID) {
           // We have completed draining the decoder following WaitingForData.
           // Set up the internal seek machinery to be able to resume from the
           // last sample decoded.
           LOG("Seeking to last sample time: %lld",
               decoder.mLastSampleTime.ref().mStart.ToMicroseconds());
-          InternalSeek(aTrack, InternalSeekTarget(decoder.mLastSampleTime.ref(), true));
+          InternalSeek(aTrack,
+                       InternalSeekTarget(decoder.mLastSampleTime.ref(), true));
         }
         if (!decoder.mReceivedNewData) {
           LOG("Rejecting %s promise: WAITING_FOR_DATA", TrackTypeToStr(aTrack));
           decoder.RejectPromise(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, __func__);
         }
       }
       // Now that draining has completed, we check if we have received
       // new data again as the result may now be different from the earlier
@@ -1925,50 +2072,55 @@ MediaFormatReader::Update(TrackType aTra
   }
 
   if (decoder.mNeedDraining) {
     DrainDecoder(aTrack);
     return;
   }
 
   if (decoder.mError && !decoder.HasFatalError()) {
-    decoder.mDecodePending = false;
-    bool needsNewDecoder = decoder.mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER;
-    if (!needsNewDecoder && ++decoder.mNumOfConsecutiveError > decoder.mMaxConsecutiveError) {
+    bool needsNewDecoder =
+      decoder.mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER;
+    if (!needsNewDecoder
+        && ++decoder.mNumOfConsecutiveError > decoder.mMaxConsecutiveError) {
       NotifyError(aTrack, decoder.mError.ref());
       return;
     }
     decoder.mError.reset();
     LOG("%s decoded error count %d", TrackTypeToStr(aTrack),
                                      decoder.mNumOfConsecutiveError);
     media::TimeUnit nextKeyframe;
-    if (aTrack == TrackType::kVideoTrack && !decoder.HasInternalSeekPending() &&
-        NS_SUCCEEDED(decoder.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe))) {
+    if (aTrack == TrackType::kVideoTrack && !decoder.HasInternalSeekPending()
+        && NS_SUCCEEDED(
+          decoder.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe))) {
       if (needsNewDecoder) {
-        decoder.ShutdownDecoder();
+        ShutdownDecoder(aTrack);
       }
-      SkipVideoDemuxToNextKeyFrame(decoder.mLastSampleTime.refOr(TimeInterval()).Length());
-      return;
+      SkipVideoDemuxToNextKeyFrame(
+        decoder.mLastSampleTime.refOr(TimeInterval()).Length());
     } else if (aTrack == TrackType::kAudioTrack) {
       decoder.Flush();
     }
+    return;
   }
 
   bool needInput = NeedInput(decoder);
 
-  LOGV("Update(%s) ni=%d no=%d ie=%d, in:%llu out:%llu qs=%u pending:%u waiting:%d promise:%d wfk:%d sid:%u",
-       TrackTypeToStr(aTrack), needInput, needOutput, decoder.mDecodePending,
-       decoder.mNumSamplesInput, decoder.mNumSamplesOutput,
-       uint32_t(size_t(decoder.mSizeOfQueue)), uint32_t(decoder.mOutput.Length()),
+  LOGV("Update(%s) ni=%d no=%d in:%llu out:%llu qs=%u decoding:%d flushing:%d "
+       "shutdown:%d pending:%u waiting:%d promise:%d sid:%u",
+       TrackTypeToStr(aTrack), needInput, needOutput, decoder.mNumSamplesInput,
+       decoder.mNumSamplesOutput, uint32_t(size_t(decoder.mSizeOfQueue)),
+       decoder.mDecodeRequest.Exists(), decoder.mFlushRequest.Exists(),
+       decoder.mShutdownRequest.Exists(), uint32_t(decoder.mOutput.Length()),
        decoder.mWaitingForData, decoder.HasPromise(),
-       decoder.mWaitingForKey, decoder.mLastStreamSourceID);
-
-  if ((decoder.mWaitingForData &&
-       (!decoder.mTimeThreshold || decoder.mTimeThreshold.ref().mWaiting)) ||
-      (decoder.mWaitingForKey && decoder.mDecodePending)) {
+       decoder.mLastStreamSourceID);
+
+  if ((decoder.mWaitingForData
+       && (!decoder.mTimeThreshold || decoder.mTimeThreshold.ref().mWaiting))
+      || (decoder.mWaitingForKey && decoder.mDecodeRequest.Exists())) {
     // Nothing more we can do at present.
     LOGV("Still waiting for data or key.");
     return;
   }
 
   if (decoder.mWaitingForKey) {
     decoder.mWaitingForKey = false;
     if (decoder.HasWaitingPromise() && !decoder.IsWaiting()) {
@@ -2098,71 +2250,16 @@ MediaFormatReader::ResetDecode(TrackSet 
       mAudio.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
     }
   }
 
   return MediaDecoderReader::ResetDecode(aTracks);
 }
 
 void
-MediaFormatReader::Output(TrackType aTrack, MediaData* aSample)
-{
-  if (!aSample) {
-    NS_WARNING("MediaFormatReader::Output() passed a null sample");
-    Error(aTrack, MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__));
-    return;
-  }
-
-  LOGV("Decoded %s sample time=%lld timecode=%lld kf=%d dur=%lld",
-       TrackTypeToStr(aTrack), aSample->mTime, aSample->mTimecode,
-       aSample->mKeyframe, aSample->mDuration);
-
-  RefPtr<nsIRunnable> task =
-    NewRunnableMethod<TrackType, MediaData*>(
-      this, &MediaFormatReader::NotifyNewOutput, aTrack, aSample);
-  OwnerThread()->Dispatch(task.forget());
-}
-
-void
-MediaFormatReader::DrainComplete(TrackType aTrack)
-{
-  RefPtr<nsIRunnable> task =
-    NewRunnableMethod<TrackType>(
-      this, &MediaFormatReader::NotifyDrainComplete, aTrack);
-  OwnerThread()->Dispatch(task.forget());
-}
-
-void
-MediaFormatReader::InputExhausted(TrackType aTrack)
-{
-  RefPtr<nsIRunnable> task =
-    NewRunnableMethod<TrackType>(
-      this, &MediaFormatReader::NotifyInputExhausted, aTrack);
-  OwnerThread()->Dispatch(task.forget());
-}
-
-void
-MediaFormatReader::Error(TrackType aTrack, const MediaResult& aError)
-{
-  RefPtr<nsIRunnable> task =
-    NewRunnableMethod<TrackType, MediaResult>(
-      this, &MediaFormatReader::NotifyError, aTrack, aError);
-  OwnerThread()->Dispatch(task.forget());
-}
-
-void
-MediaFormatReader::WaitingForKey(TrackType aTrack)
-{
-  RefPtr<nsIRunnable> task =
-    NewRunnableMethod<TrackType>(
-      this, &MediaFormatReader::NotifyWaitingForKey, aTrack);
-  OwnerThread()->Dispatch(task.forget());
-}
-
-void
 MediaFormatReader::Reset(TrackType aTrack)
 {
   MOZ_ASSERT(OnTaskQueue());
   LOG("Reset(%s) BEGIN", TrackTypeToStr(aTrack));
 
   auto& decoder = GetDecoderData(aTrack);
 
   decoder.ResetState();
@@ -2196,20 +2293,16 @@ void
 MediaFormatReader::SkipVideoDemuxToNextKeyFrame(media::TimeUnit aTimeThreshold)
 {
   MOZ_ASSERT(OnTaskQueue());
   LOG("Skipping up to %lld", aTimeThreshold.ToMicroseconds());
 
   // We've reached SkipVideoDemuxToNextKeyFrame when our decoding is late.
   // As such we can drop all already decoded samples and discard all pending
   // samples.
-  // TODO: Ideally we should set mOutputRequested to false so that all pending
-  // frames are dropped too. However, we can't do such thing as the code assumes
-  // that the decoder just got flushed. Once bug 1257107 land, we could set the
-  // decoder threshold to the value of currentTime.
   DropDecodedSamples(TrackInfo::kVideoTrack);
 
   mVideo.mTrackDemuxer->SkipToNextRandomAccessPoint(aTimeThreshold)
     ->Then(OwnerThread(), __func__, this,
            &MediaFormatReader::OnVideoSkipCompleted,
            &MediaFormatReader::OnVideoSkipFailed)
     ->Track(mSkipRequest);
   return;
@@ -2247,17 +2340,18 @@ MediaFormatReader::OnVideoSkipCompleted(
   mSkipRequest.Complete();
 
   VideoSkipReset(aSkipped);
 
   ScheduleUpdate(TrackInfo::kVideoTrack);
 }
 
 void
-MediaFormatReader::OnVideoSkipFailed(MediaTrackDemuxer::SkipFailureHolder aFailure)
+MediaFormatReader::OnVideoSkipFailed(
+  MediaTrackDemuxer::SkipFailureHolder aFailure)
 {
   MOZ_ASSERT(OnTaskQueue());
   LOG("Skipping failed, skipped %u frames", aFailure.mSkipped);
   mSkipRequest.Complete();
 
   switch (aFailure.mFailure.Code()) {
     case NS_ERROR_DOM_MEDIA_END_OF_STREAM:
     case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA:
@@ -2286,17 +2380,18 @@ MediaFormatReader::Seek(const SeekTarget
 
   LOG("aTarget=(%lld)", aTarget.GetTime().ToMicroseconds());
 
   MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty());
   MOZ_DIAGNOSTIC_ASSERT(!mVideo.HasPromise());
   MOZ_DIAGNOSTIC_ASSERT(aTarget.IsVideoOnly() || !mAudio.HasPromise());
   MOZ_DIAGNOSTIC_ASSERT(mPendingSeekTime.isNothing());
   MOZ_DIAGNOSTIC_ASSERT(mVideo.mTimeThreshold.isNothing());
-  MOZ_DIAGNOSTIC_ASSERT(aTarget.IsVideoOnly() || mAudio.mTimeThreshold.isNothing());
+  MOZ_DIAGNOSTIC_ASSERT(aTarget.IsVideoOnly()
+                        || mAudio.mTimeThreshold.isNothing());
 
   if (!mInfo.mMediaSeekable && !mInfo.mMediaSeekableOnlyInBufferedRanges) {
     LOG("Seek() END (Unseekable)");
     return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
   }
 
   if (mShutdown) {
     return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
@@ -2322,17 +2417,18 @@ MediaFormatReader::SetSeekTarget(const S
 
 void
 MediaFormatReader::ScheduleSeek()
 {
   if (mSeekScheduled) {
     return;
   }
   mSeekScheduled = true;
-  OwnerThread()->Dispatch(NewRunnableMethod(this, &MediaFormatReader::AttemptSeek));
+  OwnerThread()->Dispatch(
+    NewRunnableMethod(this, &MediaFormatReader::AttemptSeek));
 }
 
 void
 MediaFormatReader::AttemptSeek()
 {
   MOZ_ASSERT(OnTaskQueue());
 
   mSeekScheduled = false;
@@ -2370,19 +2466,20 @@ MediaFormatReader::OnSeekFailed(TrackTyp
   LOGV("%s failure:%u", TrackTypeToStr(aTrack), aError.Code());
   if (aTrack == TrackType::kVideoTrack) {
     mVideo.mSeekRequest.Complete();
   } else {
     mAudio.mSeekRequest.Complete();
   }
 
   if (aError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) {
-    if (HasVideo() && aTrack == TrackType::kAudioTrack &&
-        mFallbackSeekTime.isSome() &&
-        mPendingSeekTime.ref() != mFallbackSeekTime.ref()) {
+    if (HasVideo()
+        && aTrack == TrackType::kAudioTrack
+        && mFallbackSeekTime.isSome()
+        && mPendingSeekTime.ref() != mFallbackSeekTime.ref()) {
       // We have failed to seek audio where video seeked to earlier.
       // Attempt to seek instead to the closest point that we know we have in
       // order to limit A/V sync discrepency.
 
       // Ensure we have the most up to date buffered ranges.
       UpdateReceivedNewData(TrackType::kAudioTrack);
       Maybe<media::TimeUnit> nextSeekTime;
       // Find closest buffered time found after video seeked time.
@@ -2403,18 +2500,18 @@ MediaFormatReader::OnSeekFailed(TrackTyp
       DoAudioSeek();
       return;
     }
     NotifyWaitingForData(aTrack);
   }
   MOZ_ASSERT(!mVideo.mSeekRequest.Exists() && !mAudio.mSeekRequest.Exists());
   mPendingSeekTime.reset();
 
-  auto type = aTrack == TrackType::kAudioTrack
-    ? MediaData::AUDIO_DATA : MediaData::VIDEO_DATA;
+  auto type = aTrack == TrackType::kAudioTrack ? MediaData::AUDIO_DATA
+                                               : MediaData::VIDEO_DATA;
   mSeekPromise.Reject(SeekRejectValue(type, aError), __func__);
 }
 
 void
 MediaFormatReader::DoVideoSeek()
 {
   MOZ_ASSERT(mPendingSeekTime.isSome());
   LOGV("Seeking video to %lld", mPendingSeekTime.ref().ToMicroseconds());
@@ -2522,18 +2619,22 @@ MediaFormatReader::OnAudioSeekCompleted(
 void
 MediaFormatReader::OnAudioSeekFailed(const MediaResult& aError)
 {
   OnSeekFailed(TrackType::kAudioTrack, aError);
 }
 
 void MediaFormatReader::ReleaseResources()
 {
-  mVideo.ShutdownDecoder();
-  mAudio.ShutdownDecoder();
+  LOGV("");
+  if (mShutdown) {
+    return;
+  }
+  ShutdownDecoder(TrackInfo::kAudioTrack);
+  ShutdownDecoder(TrackInfo::kVideoTrack);
 }
 
 bool
 MediaFormatReader::VideoIsHardwareAccelerated() const
 {
   return mVideo.mIsHardwareAccelerated;
 }
 
@@ -2558,18 +2659,19 @@ MediaFormatReader::NotifyTrackDemuxers()
   }
 }
 
 void
 MediaFormatReader::NotifyDataArrived()
 {
   MOZ_ASSERT(OnTaskQueue());
 
-  if (mShutdown || !mDemuxer ||
-      (!mDemuxerInitDone && !mDemuxerInitRequest.Exists())) {
+  if (mShutdown
+      || !mDemuxer
+      || (!mDemuxerInitDone && !mDemuxerInitRequest.Exists())) {
     return;
   }
 
   if (mNotifyDataArrivedPromise.Exists()) {
     // Already one in progress. Reschedule for later.
     RefPtr<nsIRunnable> task(
         NewRunnableMethod(this, &MediaFormatReader::NotifyDataArrived));
     OwnerThread()->Dispatch(task.forget());
@@ -2648,18 +2750,18 @@ MediaFormatReader::UpdateBuffered()
     mBuffered =
       intervals.Shift(media::TimeUnit() - mInfo.mStartTime);
   }
 }
 
 layers::ImageContainer*
 MediaFormatReader::GetImageContainer()
 {
-  return mVideoFrameContainer
-    ? mVideoFrameContainer->GetImageContainer() : nullptr;
+  return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer()
+                              : nullptr;
 }
 
 void
 MediaFormatReader::GetMozDebugReaderData(nsACString& aString)
 {
   nsAutoCString result;
   const char* audioName = "unavailable";
   const char* videoName = audioName;
@@ -2672,56 +2774,47 @@ MediaFormatReader::GetMozDebugReaderData
     MutexAutoLock mon(mVideo.mMutex);
     videoName = mVideo.mDescription;
   }
 
   result += nsPrintfCString("audio decoder: %s\n", audioName);
   result += nsPrintfCString("audio frames decoded: %lld\n",
                             mAudio.mNumSamplesOutputTotal);
   if (HasAudio()) {
-    result += nsPrintfCString("audio state: ni=%d no=%d ie=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu out:%llu qs=%u pending:%u waiting:%d wfk:%d sid:%u\n",
-                              NeedInput(mAudio), mAudio.HasPromise(),
-                              mAudio.mDecodePending,
-                              mAudio.mDemuxRequest.Exists(),
-                              int(mAudio.mQueuedSamples.Length()),
-                              mAudio.mTimeThreshold
-                              ? mAudio.mTimeThreshold.ref().Time().ToSeconds()
-                              : -1.0,
-                              mAudio.mTimeThreshold
-                              ? mAudio.mTimeThreshold.ref().mHasSeeked
-                              : -1,
-                              mAudio.mNumSamplesInput, mAudio.mNumSamplesOutput,
-                              unsigned(size_t(mAudio.mSizeOfQueue)),
-                              unsigned(mAudio.mOutput.Length()),
-                              mAudio.mWaitingForData, mAudio.mWaitingForKey,
-                              mAudio.mLastStreamSourceID);
+    result += nsPrintfCString(
+      "audio state: ni=%d no=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu "
+      "out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
+      NeedInput(mAudio), mAudio.HasPromise(), mAudio.mDemuxRequest.Exists(),
+      int(mAudio.mQueuedSamples.Length()),
+      mAudio.mTimeThreshold ? mAudio.mTimeThreshold.ref().Time().ToSeconds()
+                            : -1.0,
+      mAudio.mTimeThreshold ? mAudio.mTimeThreshold.ref().mHasSeeked : -1,
+      mAudio.mNumSamplesInput, mAudio.mNumSamplesOutput,
+      unsigned(size_t(mAudio.mSizeOfQueue)), unsigned(mAudio.mOutput.Length()),
+      mAudio.mWaitingForData, mAudio.mLastStreamSourceID);
   }
   result += nsPrintfCString("video decoder: %s\n", videoName);
-  result += nsPrintfCString("hardware video decoding: %s\n",
-                            VideoIsHardwareAccelerated() ? "enabled" : "disabled");
+  result +=
+    nsPrintfCString("hardware video decoding: %s\n",
+                    VideoIsHardwareAccelerated() ? "enabled" : "disabled");
   result += nsPrintfCString("video frames decoded: %lld (skipped:%lld)\n",
                             mVideo.mNumSamplesOutputTotal,
                             mVideo.mNumSamplesSkippedTotal);
   if (HasVideo()) {
-    result += nsPrintfCString("video state: ni=%d no=%d ie=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu out:%llu qs=%u pending:%u waiting:%d wfk:%d, sid:%u\n",
-                              NeedInput(mVideo), mVideo.HasPromise(),
-                              mVideo.mDecodePending,
-                              mVideo.mDemuxRequest.Exists(),
-                              int(mVideo.mQueuedSamples.Length()),
-                              mVideo.mTimeThreshold
-                              ? mVideo.mTimeThreshold.ref().Time().ToSeconds()
-                              : -1.0,
-                              mVideo.mTimeThreshold
-                              ? mVideo.mTimeThreshold.ref().mHasSeeked
-                              : -1,
-                              mVideo.mNumSamplesInput, mVideo.mNumSamplesOutput,
-                              unsigned(size_t(mVideo.mSizeOfQueue)),
-                              unsigned(mVideo.mOutput.Length()),
-                              mVideo.mWaitingForData, mVideo.mWaitingForKey,
-                              mVideo.mLastStreamSourceID);
+    result += nsPrintfCString(
+      "video state: ni=%d no=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu "
+      "out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
+      NeedInput(mVideo), mVideo.HasPromise(), mVideo.mDemuxRequest.Exists(),
+      int(mVideo.mQueuedSamples.Length()),
+      mVideo.mTimeThreshold ? mVideo.mTimeThreshold.ref().Time().ToSeconds()
+                            : -1.0,
+      mVideo.mTimeThreshold ? mVideo.mTimeThreshold.ref().mHasSeeked : -1,
+      mVideo.mNumSamplesInput, mVideo.mNumSamplesOutput,
+      unsigned(size_t(mVideo.mSizeOfQueue)), unsigned(mVideo.mOutput.Length()),
+      mVideo.mWaitingForData, mVideo.mLastStreamSourceID);
   }
   aString += result;
 }
 
 void
 MediaFormatReader::SetVideoBlankDecode(bool aIsBlankDecode)
 {
   MOZ_ASSERT(OnTaskQueue());
@@ -2737,23 +2830,22 @@ MediaFormatReader::SetBlankDecode(TrackT
   if (decoder.mIsBlankDecode == aIsBlankDecode) {
     return;
   }
 
   LOG("%s, decoder.mIsBlankDecode = %d => aIsBlankDecode = %d",
       TrackTypeToStr(aTrack), decoder.mIsBlankDecode, aIsBlankDecode);
 
   decoder.mIsBlankDecode = aIsBlankDecode;
-  decoder.Flush();
-  decoder.ShutdownDecoder();
+  ShutdownDecoder(aTrack);
 }
 
 void
-MediaFormatReader::OnFirstDemuxCompleted(TrackInfo::TrackType aType,
-                                         RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
+MediaFormatReader::OnFirstDemuxCompleted(
+  TrackInfo::TrackType aType, RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
 {
   MOZ_ASSERT(OnTaskQueue());
 
   if (mShutdown) {
     return;
   }
 
   auto& decoder = GetDecoderData(aType);
--- a/dom/media/MediaFormatReader.h
+++ b/dom/media/MediaFormatReader.h
@@ -104,23 +104,25 @@ private:
   void RequestDemuxSamples(TrackType aTrack);
   // Handle demuxed samples by the input behavior.
   void HandleDemuxedSamples(TrackType aTrack,
                             AbstractMediaDecoder::AutoNotifyDecoded& aA);
   // Decode any pending already demuxed samples.
   void DecodeDemuxedSamples(TrackType aTrack,
                             MediaRawData* aSample);
 
-  struct InternalSeekTarget {
+  struct InternalSeekTarget
+  {
     InternalSeekTarget(const media::TimeInterval& aTime, bool aDropTarget)
       : mTime(aTime)
       , mDropTarget(aDropTarget)
       , mWaiting(false)
       , mHasSeeked(false)
-    {}
+    {
+    }
 
     media::TimeUnit Time() const { return mTime.mStart; }
     media::TimeUnit EndTime() const { return mTime.mEnd; }
     bool Contains(const media::TimeUnit& aTime) const
     {
       return mTime.Contains(aTime);
     }
 
@@ -131,131 +133,101 @@ private:
   };
 
   // Perform an internal seek to aTime. If aDropTarget is true then
   // the first sample past the target will be dropped.
   void InternalSeek(TrackType aTrack, const InternalSeekTarget& aTarget);
 
   // Drain the current decoder.
   void DrainDecoder(TrackType aTrack);
-  void NotifyNewOutput(TrackType aTrack, MediaData* aSample);
-  void NotifyInputExhausted(TrackType aTrack);
+  void NotifyNewOutput(TrackType aTrack,
+                       const MediaDataDecoder::DecodedData& aResults);
   void NotifyDrainComplete(TrackType aTrack);
   void NotifyError(TrackType aTrack, const MediaResult& aError);
   void NotifyWaitingForData(TrackType aTrack);
   void NotifyWaitingForKey(TrackType aTrack);
   void NotifyEndOfStream(TrackType aTrack);
 
   void ExtractCryptoInitData(nsTArray<uint8_t>& aInitData);
 
   // Initializes mLayersBackendType if possible.
   void InitLayersBackendType();
 
-  // DecoderCallback proxies the MediaDataDecoderCallback calls to these
-  // functions.
-  void Output(TrackType aType, MediaData* aSample);
-  void InputExhausted(TrackType aTrack);
-  void Error(TrackType aTrack, const MediaResult& aError);
   void Reset(TrackType aTrack);
-  void DrainComplete(TrackType aTrack);
   void DropDecodedSamples(TrackType aTrack);
-  void WaitingForKey(TrackType aTrack);
 
   bool ShouldSkip(bool aSkipToNextKeyframe, media::TimeUnit aTimeThreshold);
 
   void SetVideoDecodeThreshold();
 
   size_t SizeOfQueue(TrackType aTrack);
 
   RefPtr<PDMFactory> mPlatform;
 
-  class DecoderCallback : public MediaDataDecoderCallback {
-  public:
-    DecoderCallback(MediaFormatReader* aReader, TrackType aType)
-      : mReader(aReader)
-      , mType(aType)
-    {
-    }
-    void Output(MediaData* aSample) override {
-      mReader->Output(mType, aSample);
-    }
-    void InputExhausted() override {
-      mReader->InputExhausted(mType);
-    }
-    void Error(const MediaResult& aError) override {
-      mReader->Error(mType, aError);
-    }
-    void DrainComplete() override {
-      mReader->DrainComplete(mType);
-    }
-    void ReleaseMediaResources() override {
-      mReader->ReleaseResources();
-    }
-    bool OnReaderTaskQueue() override {
-      return mReader->OnTaskQueue();
-    }
-    void WaitingForKey() override {
-      mReader->WaitingForKey(mType);
-    }
-
-  private:
-    MediaFormatReader* mReader;
-    TrackType mType;
-  };
-
-  struct DecoderData {
+  struct DecoderData
+  {
     DecoderData(MediaFormatReader* aOwner,
                 MediaData::Type aType,
                 uint32_t aNumOfMaxError)
       : mOwner(aOwner)
       , mType(aType)
       , mMutex("DecoderData")
       , mDescription("shutdown")
       , mUpdateScheduled(false)
       , mDemuxEOS(false)
       , mWaitingForData(false)
       , mWaitingForKey(false)
       , mReceivedNewData(false)
-      , mOutputRequested(false)
-      , mDecodePending(false)
       , mNeedDraining(false)
       , mDraining(false)
       , mDrainComplete(false)
+      , mFlushed(true)
       , mNumOfConsecutiveError(0)
       , mMaxConsecutiveError(aNumOfMaxError)
       , mNumSamplesInput(0)
       , mNumSamplesOutput(0)
       , mNumSamplesOutputTotal(0)
       , mNumSamplesSkippedTotal(0)
       , mSizeOfQueue(0)
       , mIsHardwareAccelerated(false)
       , mLastStreamSourceID(UINT32_MAX)
       , mIsBlankDecode(false)
-    {}
+    {
+    }
 
     MediaFormatReader* mOwner;
     // Disambiguate Audio vs Video.
     MediaData::Type mType;
     RefPtr<MediaTrackDemuxer> mTrackDemuxer;
     // TaskQueue on which decoder can choose to decode.
     // Only non-null up until the decoder is created.
     RefPtr<TaskQueue> mTaskQueue;
-    // Callback that receives output and error notifications from the decoder.
-    nsAutoPtr<DecoderCallback> mCallback;
 
     // Mutex protecting mDescription and mDecoder.
     Mutex mMutex;
     // The platform decoder.
     RefPtr<MediaDataDecoder> mDecoder;
     const char* mDescription;
     void ShutdownDecoder()
     {
       MutexAutoLock lock(mMutex);
       if (mDecoder) {
-        mDecoder->Shutdown();
+        RefPtr<MediaFormatReader> owner = mOwner;
+        TrackType type = mType == MediaData::AUDIO_DATA
+                         ? TrackType::kAudioTrack
+                         : TrackType::kVideoTrack;
+        mDecoder->Shutdown()
+          ->Then(mOwner->OwnerThread(), __func__,
+                 [owner, this, type]() {
+                   mShutdownRequest.Complete();
+                   mShutdownPromise.ResolveIfExists(true, __func__);
+                   owner->ScheduleUpdate(type);
+                 },
+                 []() { MOZ_RELEASE_ASSERT(false, "Can't ever be here"); })
+          ->Track(mShutdownRequest);
       }
       mDescription = "shutdown";
       mDecoder = nullptr;
     }
 
     // Only accessed from reader's task queue.
     bool mUpdateScheduled;
     bool mDemuxEOS;
@@ -279,26 +251,26 @@ private:
     }
     bool IsWaiting() const
     {
       MOZ_ASSERT(mOwner->OnTaskQueue());
       return mWaitingForData || mWaitingForKey;
     }
 
     // MediaDataDecoder handler's variables.
-    bool mOutputRequested;
-    // Set to true once the MediaDataDecoder has been fed a compressed sample.
-    // No more samples will be passed to the decoder while true.
-    // mDecodePending is reset when:
-    // 1- The decoder calls InputExhausted
-    // 2- The decoder is Flushed or Reset.
-    bool mDecodePending;
+    MozPromiseRequestHolder<MediaDataDecoder::DecodePromise> mDecodeRequest;
     bool mNeedDraining;
+    MozPromiseRequestHolder<MediaDataDecoder::DecodePromise> mDrainRequest;
     bool mDraining;
     bool mDrainComplete;
+    MozPromiseRequestHolder<MediaDataDecoder::FlushPromise> mFlushRequest;
+    // Set to true if the last operation run on the decoder was a flush.
+    bool mFlushed;
+    MozPromiseHolder<ShutdownPromise> mShutdownPromise;
+    MozPromiseRequestHolder<ShutdownPromise> mShutdownRequest;
 
     bool HasPendingDrain() const
     {
       return mDraining || mDrainComplete;
     }
 
     uint32_t mNumOfConsecutiveError;
     uint32_t mMaxConsecutiveError;
@@ -352,47 +324,72 @@ private:
     {
       mDemuxRequest.DisconnectIfExists();
       mSeekRequest.DisconnectIfExists();
       mTrackDemuxer->Reset();
       mQueuedSamples.Clear();
     }
 
     // Flush the decoder if present and reset decoding related data.
-    // Decoding will be suspended until mInputRequested is set again.
     // Following a flush, the decoder is ready to accept any new data.
     void Flush()
     {
-      if (mDecoder) {
-        mDecoder->Flush();
+      if (mFlushRequest.Exists() || mFlushed) {
+        // Flush still pending or already flushed, nothing more to do.
+        return;
       }
-      mOutputRequested = false;
-      mDecodePending = false;
+      mDecodeRequest.DisconnectIfExists();
+      mDrainRequest.DisconnectIfExists();
       mOutput.Clear();
       mNumSamplesInput = 0;
       mNumSamplesOutput = 0;
       mSizeOfQueue = 0;
       mDraining = false;
       mDrainComplete = false;
+      if (mDecoder && !mFlushed) {
+        RefPtr<MediaFormatReader> owner = mOwner;
+        TrackType type = mType == MediaData::AUDIO_DATA
+                         ? TrackType::kAudioTrack
+                         : TrackType::kVideoTrack;
+        mDecoder->Flush()
+          ->Then(mOwner->OwnerThread(), __func__,
+                 [owner, type, this]() {
+                   mFlushRequest.Complete();
+                   if (!mShutdownPromise.IsEmpty()) {
+                     ShutdownDecoder();
+                     return;
+                   }
+                   owner->ScheduleUpdate(type);
+                 },
+                 [owner, type, this](const MediaResult& aError) {
+                   mFlushRequest.Complete();
+                   if (!mShutdownPromise.IsEmpty()) {
+                     ShutdownDecoder();
+                     return;
+                   }
+                   owner->NotifyError(type, aError);
+                 })
+          ->Track(mFlushRequest);
+      }
+      mFlushed = true;
     }
 
     // Reset the state of the DecoderData, clearing all queued frames
     // (pending demuxed and decoded).
-    // Decoding will be suspended until mInputRequested is set again.
     // The track demuxer is *not* reset.
     void ResetState()
     {
       MOZ_ASSERT(mOwner->OnTaskQueue());
       mDemuxEOS = false;
       mWaitingForData = false;
       mWaitingForKey = false;
       mQueuedSamples.Clear();
-      mOutputRequested = false;
       mNeedDraining = false;
-      mDecodePending = false;
+      mDecodeRequest.DisconnectIfExists();
+      mDrainRequest.DisconnectIfExists();
       mDraining = false;
       mDrainComplete = false;
       mTimeThreshold.reset();
       mLastSampleTime.reset();
       mOutput.Clear();
       mNumSamplesInput = 0;
       mNumSamplesOutput = 0;
       mSizeOfQueue = 0;
@@ -421,25 +418,26 @@ private:
     UniquePtr<TrackInfo> mOriginalInfo;
     RefPtr<SharedTrackInfo> mInfo;
     Maybe<media::TimeUnit> mFirstDemuxedSampleTime;
     // Use BlankDecoderModule or not.
     bool mIsBlankDecode;
 
   };
 
-  class DecoderDataWithPromise : public DecoderData {
+  class DecoderDataWithPromise : public DecoderData
+  {
   public:
     DecoderDataWithPromise(MediaFormatReader* aOwner,
                            MediaData::Type aType,
                            uint32_t aNumOfMaxError)
       : DecoderData(aOwner, aType, aNumOfMaxError)
       , mHasPromise(false)
-
-    {}
+    {
+    }
 
     bool HasPromise() const override
     {
       return mHasPromise;
     }
 
     RefPtr<MediaDataPromise> EnsurePromise(const char* aMethodName) override
     {
@@ -566,25 +564,31 @@ private:
   RefPtr<GMPCrashHelper> mCrashHelper;
 
   void SetBlankDecode(TrackType aTrack, bool aIsBlankDecode);
 
   class DecoderFactory;
   UniquePtr<DecoderFactory> mDecoderFactory;
 
   MediaEventListener mCompositorUpdatedListener;
+  MediaEventListener mOnTrackWaitingForKeyListener;
 
   void OnFirstDemuxCompleted(TrackInfo::TrackType aType,
                              RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples);