Merge inbound to m-c. a=merge
authorRyan VanderMeulen <ryanvm@gmail.com>
Tue, 14 Apr 2015 10:06:03 -0400
changeset 270345 388f5861dc7d293efce38a5e3741ca5267f455a3
parent 270261 bddebae866b436d8fd3cf7a0c3cf9d5f9857b938 (current diff)
parent 270344 b9ec1ecedfbbbf91288f5a810a694c21fcb1f9b8 (diff)
child 270346 459352500f98c49d6cf49ca48ec1df09494d9a42
child 270368 5b262452eb6a97a63c1bfe6fa5734829173b3aa5
child 270455 001ebbaae41598412bea1d9c96d647fc5d53b1f2
push id863
push userraliiev@mozilla.com
push dateMon, 03 Aug 2015 13:22:43 +0000
treeherdermozilla-release@f6321b14228d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone40.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge inbound to m-c. a=merge
browser/base/content/browser.js
dom/cache/PCacheTypes.ipdlh
dom/fetch/FetchIPCUtils.h
dom/push/PushServiceLauncher.js
layout/reftests/bugs/reftest.list
security/pkix/include/pkix/ScopedPtr.h
toolkit/modules/WindowsPrefSync.jsm
--- a/browser/base/content/browser.js
+++ b/browser/base/content/browser.js
@@ -5,17 +5,16 @@
 
 let Ci = Components.interfaces;
 let Cu = Components.utils;
 let Cc = Components.classes;
 
 Cu.import("resource://gre/modules/XPCOMUtils.jsm");
 Cu.import("resource://gre/modules/NotificationDB.jsm");
 Cu.import("resource:///modules/RecentWindow.jsm");
-Cu.import("resource://gre/modules/WindowsPrefSync.jsm");
 
 
 XPCOMUtils.defineLazyModuleGetter(this, "Preferences",
                                   "resource://gre/modules/Preferences.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "Deprecated",
                                   "resource://gre/modules/Deprecated.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "BrowserUITelemetry",
                                   "resource:///modules/BrowserUITelemetry.jsm");
--- a/browser/base/content/test/general/browser_tabDrop.js
+++ b/browser/base/content/test/general/browser_tabDrop.js
@@ -47,20 +47,26 @@ function test() {
   function drop(text, valid) {
     triggeredDropCount++;
     if (valid)
       validDropCount++;
     executeSoon(function () {
       // A drop type of "link" onto an existing tab would normally trigger a
       // load in that same tab, but tabbrowser code in _getDragTargetTab treats
       // drops on the outer edges of a tab differently (loading a new tab
-      // instead). The events created by synthesizeDrop have all of their
+      // instead). Make events created by synthesizeDrop have all of their
       // coordinates set to 0 (screenX/screenY), so they're treated as drops
       // on the outer edge of the tab, thus they open new tabs.
-      ChromeUtils.synthesizeDrop(newTab, newTab, [[{type: "text/plain", data: text}]], "link", window);
+      var event = {
+        clientX: 0,
+        clientY: 0,
+        screenX: 0,
+        screenY: 0,
+      };
+      ChromeUtils.synthesizeDrop(newTab, newTab, [[{type: "text/plain", data: text}]], "link", window, undefined, event);
     });
   }
 
   // Begin and end with valid drops to make sure we wait for all drops before
   // ending the test
   drop("mochi.test/first", true);
   drop("javascript:'bad'");
   drop("jAvascript:'bad'");
--- a/browser/base/content/test/general/browser_tabReorder.js
+++ b/browser/base/content/test/general/browser_tabReorder.js
@@ -13,64 +13,31 @@ function test() {
     }
   });
 
   is(gBrowser.tabs.length, initialTabsLength + 3, "new tabs are opened");
   is(gBrowser.tabs[initialTabsLength], newTab1, "newTab1 position is correct");
   is(gBrowser.tabs[initialTabsLength + 1], newTab2, "newTab2 position is correct");
   is(gBrowser.tabs[initialTabsLength + 2], newTab3, "newTab3 position is correct");
 
-  let dataTransfer;
-  let trapDrag = function(event) {
-    dataTransfer = event.dataTransfer;
-  };
-  window.addEventListener("dragstart", trapDrag, true);
-  registerCleanupFunction(function () {
-    window.removeEventListener("dragstart", trapDrag, true);
-  });
-
-  let windowUtil = window.QueryInterface(Components.interfaces.nsIInterfaceRequestor).
-                          getInterface(Components.interfaces.nsIDOMWindowUtils);
-  let ds = Components.classes["@mozilla.org/widget/dragservice;1"].
-           getService(Components.interfaces.nsIDragService);
+  let scriptLoader = Cc["@mozilla.org/moz/jssubscript-loader;1"].
+                     getService(Ci.mozIJSSubScriptLoader);
+  let ChromeUtils = {};
+  scriptLoader.loadSubScript("chrome://mochikit/content/tests/SimpleTest/ChromeUtils.js", ChromeUtils);
 
   function dragAndDrop(tab1, tab2, copy) {
-    let ctrlKey = copy;
-    let altKey = copy;
-
-    let rect = tab1.getBoundingClientRect();
-    let x = rect.width / 2;
-    let y = rect.height / 2;
-    let diffX = 10;
-
-    ds.startDragSession();
-    try {
-      EventUtils.synthesizeMouse(tab1, x, y, { type: "mousedown" }, window);
-      EventUtils.synthesizeMouse(tab1, x + diffX, y, { type: "mousemove" }, window);
-
-      dataTransfer.dropEffect = copy ? "copy" : "move";
+    let rect = tab2.getBoundingClientRect();
+    let event = {
+      ctrlKey: copy,
+      altKey: copy,
+      clientX: rect.left + rect.width / 2 + 10,
+      clientY: rect.top + rect.height / 2,
+    };
 
-      let event = window.document.createEvent("DragEvents");
-      event.initDragEvent("dragover", true, true, window, 0,
-                          tab2.boxObject.screenX + x + diffX,
-                          tab2.boxObject.screenY + y,
-                          x + diffX, y, ctrlKey, altKey, false, false, 0, null, dataTransfer);
-      windowUtil.dispatchDOMEventViaPresShell(tab2, event, true);
-
-      event = window.document.createEvent("DragEvents");
-      event.initDragEvent("drop", true, true, window, 0,
-                          tab2.boxObject.screenX + x + diffX,
-                          tab2.boxObject.screenY + y,
-                          x + diffX, y, ctrlKey, altKey, false, false, 0, null, dataTransfer);
-      windowUtil.dispatchDOMEventViaPresShell(tab2, event, true);
-
-      EventUtils.synthesizeMouse(tab2, x + diffX, y, { type: "mouseup" }, window);
-    } finally {
-      ds.endDragSession(true);
-    }
+    ChromeUtils.synthesizeDrop(tab1, tab2, null, copy ? "copy" : "move", window, window, event);
   }
 
   dragAndDrop(newTab1, newTab2, false);
   is(gBrowser.tabs.length, initialTabsLength + 3, "tabs are still there");
   is(gBrowser.tabs[initialTabsLength], newTab2, "newTab2 and newTab1 are swapped");
   is(gBrowser.tabs[initialTabsLength + 1], newTab1, "newTab1 and newTab2 are swapped");
   is(gBrowser.tabs[initialTabsLength + 2], newTab3, "newTab3 stays same place");
 
--- a/caps/nsScriptSecurityManager.cpp
+++ b/caps/nsScriptSecurityManager.cpp
@@ -300,16 +300,25 @@ nsScriptSecurityManager::AppStatusForPri
     if (!appOriginPunned.Equals(origin)) {
         return nsIPrincipal::APP_STATUS_NOT_INSTALLED;
     }
 
     return status;
 
 }
 
+/*
+ * GetChannelResultPrincipal will return the principal that the resource
+ * returned by this channel will use.  For example, if the resource is in
+ * a sandbox, it will return the nullprincipal.  If the resource is forced
+ * to inherit principal, it will return the principal of its parent.  If
+ * the load doesn't require sandboxing or inheriting, it will return the same
+ * principal as GetChannelURIPrincipal. Namely the principal of the URI
+ * that is being loaded.
+ */
 NS_IMETHODIMP
 nsScriptSecurityManager::GetChannelResultPrincipal(nsIChannel* aChannel,
                                                    nsIPrincipal** aPrincipal)
 {
     NS_PRECONDITION(aChannel, "Must have channel!");
     nsCOMPtr<nsISupports> owner;
     aChannel->GetOwner(getter_AddRefs(owner));
     if (owner) {
@@ -334,16 +343,27 @@ nsScriptSecurityManager::GetChannelResul
         if (loadInfo->GetForceInheritPrincipal()) {
             NS_ADDREF(*aPrincipal = loadInfo->TriggeringPrincipal());
             return NS_OK;
         }
     }
     return GetChannelURIPrincipal(aChannel, aPrincipal);
 }
 
+/* The principal of the URI that this channel is loading. This is never
+ * affected by things like sandboxed loads, or loads where we forcefully
+ * inherit the principal.  Think of this as the principal of the server
+ * which this channel is loading from.  Most callers should use
+ * GetChannelResultPrincipal instead of GetChannelURIPrincipal.  Only
+ * call GetChannelURIPrincipal if you are sure that you want the
+ * principal that matches the uri, even in cases when the load is
+ * sandboxed or when the load could be a blob or data uri (i.e even when
+ * you encounter loads that may or may not be sandboxed and loads
+ * that may or may not inherit)."
+ */
 NS_IMETHODIMP
 nsScriptSecurityManager::GetChannelURIPrincipal(nsIChannel* aChannel,
                                                 nsIPrincipal** aPrincipal)
 {
     NS_PRECONDITION(aChannel, "Must have channel!");
 
     // Get the principal from the URI.  Make sure this does the same thing
     // as nsDocument::Reset and XULDocument::StartDocumentLoad.
--- a/chrome/nsChromeProtocolHandler.cpp
+++ b/chrome/nsChromeProtocolHandler.cpp
@@ -19,16 +19,17 @@
 #include "nsIFile.h"
 #include "nsIFileChannel.h"
 #include "nsIIOService.h"
 #include "nsILoadGroup.h"
 #include "nsIScriptSecurityManager.h"
 #include "nsIStandardURL.h"
 #include "nsNetUtil.h"
 #include "nsString.h"
+#include "nsStandardURL.h"
 
 ////////////////////////////////////////////////////////////////////////////////
 
 NS_IMPL_ISUPPORTS(nsChromeProtocolHandler,
                   nsIProtocolHandler,
                   nsISupportsWeakReference)
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -64,42 +65,38 @@ nsChromeProtocolHandler::GetProtocolFlag
 }
 
 NS_IMETHODIMP
 nsChromeProtocolHandler::NewURI(const nsACString &aSpec,
                                 const char *aCharset,
                                 nsIURI *aBaseURI,
                                 nsIURI **result)
 {
-    nsresult rv;
 
     // Chrome: URLs (currently) have no additional structure beyond that provided
     // by standard URLs, so there is no "outer" given to CreateInstance
 
-    nsCOMPtr<nsIStandardURL> surl(do_CreateInstance(NS_STANDARDURL_CONTRACTID, &rv));
-    NS_ENSURE_SUCCESS(rv, rv);
+    nsRefPtr<nsStandardURL> surl = new nsStandardURL();
 
-    rv = surl->Init(nsIStandardURL::URLTYPE_STANDARD, -1, aSpec, aCharset, aBaseURI);
+    nsresult rv = surl->Init(nsIStandardURL::URLTYPE_STANDARD, -1, aSpec,
+                             aCharset, aBaseURI);
     if (NS_FAILED(rv))
         return rv;
 
-    nsCOMPtr<nsIURL> url(do_QueryInterface(surl, &rv));
-    NS_ENSURE_SUCCESS(rv, rv);
-
     // Canonify the "chrome:" URL; e.g., so that we collapse
     // "chrome://navigator/content/" and "chrome://navigator/content"
     // and "chrome://navigator/content/navigator.xul".
 
-    rv = nsChromeRegistry::Canonify(url);
+    rv = nsChromeRegistry::Canonify(surl);
     if (NS_FAILED(rv))
         return rv;
 
     surl->SetMutable(false);
 
-    NS_ADDREF(*result = url);
+    surl.forget(result);
     return NS_OK;
 }
 
 NS_IMETHODIMP
 nsChromeProtocolHandler::NewChannel2(nsIURI* aURI,
                                      nsILoadInfo* aLoadInfo,
                                      nsIChannel** aResult)
 {
--- a/config/config.mk
+++ b/config/config.mk
@@ -628,17 +628,17 @@ EXPAND_LD = $(EXPAND_LIBS_EXEC) --uselis
 EXPAND_MKSHLIB_ARGS = --uselist
 ifdef SYMBOL_ORDER
 EXPAND_MKSHLIB_ARGS += --symbol-order $(SYMBOL_ORDER)
 endif
 EXPAND_MKSHLIB = $(EXPAND_LIBS_EXEC) $(EXPAND_MKSHLIB_ARGS) -- $(MKSHLIB)
 
 ifneq (,$(MOZ_LIBSTDCXX_TARGET_VERSION)$(MOZ_LIBSTDCXX_HOST_VERSION))
 ifneq ($(OS_ARCH),Darwin)
-CHECK_STDCXX = @$(TOOLCHAIN_PREFIX)objdump -p $(1) | grep -v -e 'GLIBCXX_3\.4\.\(9\|[1-9][0-9]\)' > /dev/null || ( echo 'TEST-UNEXPECTED-FAIL | check_stdcxx | We do not want these libstdc++ symbols to be used:' && $(TOOLCHAIN_PREFIX)objdump -T $(1) | grep -e 'GLIBCXX_3\.4\.\(9\|[1-9][0-9]\)' && false)
+CHECK_STDCXX = @$(TOOLCHAIN_PREFIX)objdump -p $(1) | grep -e 'GLIBCXX_3\.4\.\(9\|[1-9][0-9]\)' > /dev/null && echo 'TEST-UNEXPECTED-FAIL | check_stdcxx | We do not want these libstdc++ symbols to be used:' && $(TOOLCHAIN_PREFIX)objdump -T $(1) | grep -e 'GLIBCXX_3\.4\.\(9\|[1-9][0-9]\)' && exit 1 || true
 endif
 endif
 
 ifeq (,$(filter $(OS_TARGET),WINNT Darwin))
 CHECK_TEXTREL = @$(TOOLCHAIN_PREFIX)readelf -d $(1) | grep TEXTREL > /dev/null && echo 'TEST-UNEXPECTED-FAIL | check_textrel | We do not want text relocations in libraries and programs' || true
 endif
 
 ifeq ($(MOZ_WIDGET_TOOLKIT),android)
--- a/configure.in
+++ b/configure.in
@@ -7380,16 +7380,18 @@ STDCXX_COMPAT=
 MOZ_ARG_ENABLE_BOOL(stdcxx-compat,
 [  --enable-stdcxx-compat  Enable compatibility with older libstdc++],
     STDCXX_COMPAT=1)
 
 if test -n "$STDCXX_COMPAT"; then
    eval $(CXX="$CXX" HOST_CXX="$HOST_CXX" $PYTHON -m mozbuild.configure.libstdcxx)
    AC_SUBST(MOZ_LIBSTDCXX_TARGET_VERSION)
    AC_SUBST(MOZ_LIBSTDCXX_HOST_VERSION)
+   CXXFLAGS="$CXXFLAGS -D_GLIBCXX_USE_CXX11_ABI=0"
+   HOST_CXXFLAGS="$HOST_CXXFLAGS -D_GLIBCXX_USE_CXX11_ABI=0"
 fi
 
 dnl ========================================================
 dnl =
 dnl = Profiling and Instrumenting
 dnl =
 dnl ========================================================
 MOZ_ARG_HEADER(Profiling and Instrumenting)
--- a/dom/animation/Animation.h
+++ b/dom/animation/Animation.h
@@ -244,17 +244,17 @@ public:
 
   const AnimationTiming& Timing() const {
     return mTiming;
   }
   AnimationTiming& Timing() {
     return mTiming;
   }
 
-  const nsString& Name() const {
+  virtual const nsString& Name() const {
     return mName;
   }
 
   // Return the duration from the start the active interval to the point where
   // the animation begins playback. This is zero unless the animation has
   // a negative delay in which case it is the absolute value of the delay.
   // This is used for setting the elapsedTime member of CSS AnimationEvents.
   TimeDuration InitialAdvance() const {
--- a/dom/animation/AnimationPlayer.cpp
+++ b/dom/animation/AnimationPlayer.cpp
@@ -553,17 +553,17 @@ AnimationPlayer::DoPlay(LimitBehavior aL
 
   // We may have updated the current time when we set the hold time above.
   UpdateTiming();
 }
 
 void
 AnimationPlayer::DoPause()
 {
-  if (mPendingState == PendingState::PausePending) {
+  if (IsPausedOrPausing()) {
     return;
   }
 
   bool reuseReadyPromise = false;
   if (mPendingState == PendingState::PlayPending) {
     CancelPendingTasks();
     reuseReadyPromise = true;
   }
--- a/dom/animation/test/css-transitions/test_animation-effect-name.html
+++ b/dom/animation/test/css-transitions/test_animation-effect-name.html
@@ -11,13 +11,14 @@ test(function(t) {
   var div = addDiv(t);
 
   // Add a transition
   div.style.left = '0px';
   window.getComputedStyle(div).transitionProperty;
   div.style.transition = 'all 100s';
   div.style.left = '100px';
 
-  assert_equals(div.getAnimations()[0].source.effect.name, '',
-                'Animation effects for transitions have an empty name');
+  assert_equals(div.getAnimations()[0].source.effect.name, 'left',
+                'The name for the transitions corresponds to the property ' +
+                'being transitioned');
 }, 'Effect name for transitions');
 
 </script>
--- a/dom/apps/AppsService.js
+++ b/dom/apps/AppsService.js
@@ -30,23 +30,34 @@ function AppsService()
                     .processType == Ci.nsIXULRuntime.PROCESS_TYPE_DEFAULT;
   debug("inParent: " + this.inParent);
   Cu.import(this.inParent ? "resource://gre/modules/Webapps.jsm" :
                             "resource://gre/modules/AppsServiceChild.jsm");
 }
 
 AppsService.prototype = {
 
+  isInvalidId: function(localId) {
+    return (localId == Ci.nsIScriptSecurityManager.NO_APP_ID ||
+            localId == Ci.nsIScriptSecurityManager.UNKNOWN_APP_ID);
+  },
+
   getManifestCSPByLocalId: function getCSPByLocalId(localId) {
     debug("GetManifestCSPByLocalId( " + localId + " )");
+    if (this.isInvalidId(localId)) {
+      return null;
+    }
     return DOMApplicationRegistry.getManifestCSPByLocalId(localId);
   },
 
   getDefaultCSPByLocalId: function getCSPByLocalId(localId) {
     debug("GetDefaultCSPByLocalId( " + localId + " )");
+    if (this.isInvalidId(localId)) {
+      return null;
+    }
     return DOMApplicationRegistry.getDefaultCSPByLocalId(localId);
   },
 
   getAppByManifestURL: function getAppByManifestURL(aManifestURL) {
     debug("GetAppByManifestURL( " + aManifestURL + " )");
     return DOMApplicationRegistry.getAppByManifestURL(aManifestURL);
   },
 
@@ -67,21 +78,27 @@ AppsService.prototype = {
 
   getAppLocalIdByStoreId: function getAppLocalIdByStoreId(aStoreId) {
     debug("getAppLocalIdByStoreId( " + aStoreId + " )");
     return DOMApplicationRegistry.getAppLocalIdByStoreId(aStoreId);
   },
 
   getAppByLocalId: function getAppByLocalId(aLocalId) {
     debug("getAppByLocalId( " + aLocalId + " )");
+    if (this.isInvalidId(aLocalId)) {
+      return null;
+    }
     return DOMApplicationRegistry.getAppByLocalId(aLocalId);
   },
 
   getManifestURLByLocalId: function getManifestURLByLocalId(aLocalId) {
     debug("getManifestURLByLocalId( " + aLocalId + " )");
+    if (this.isInvalidId(aLocalId)) {
+      return null;
+    }
     return DOMApplicationRegistry.getManifestURLByLocalId(aLocalId);
   },
 
   getCoreAppsBasePath: function getCoreAppsBasePath() {
     debug("getCoreAppsBasePath()");
     return DOMApplicationRegistry.getCoreAppsBasePath();
   },
 
@@ -92,18 +109,17 @@ AppsService.prototype = {
 
   getAppInfo: function getAppInfo(aAppId) {
     debug("getAppInfo()");
     return DOMApplicationRegistry.getAppInfo(aAppId);
   },
 
   getRedirect: function getRedirect(aLocalId, aURI) {
     debug("getRedirect for " + aLocalId + " " + aURI.spec);
-    if (aLocalId == Ci.nsIScriptSecurityManager.NO_APP_ID ||
-        aLocalId == Ci.nsIScriptSecurityManager.UNKNOWN_APP_ID) {
+    if (this.isInvalidId(aLocalId)) {
       return null;
     }
 
     let app = DOMApplicationRegistry.getAppByLocalId(aLocalId);
     if (app && app.redirects) {
       let spec = aURI.spec;
       for (let i = 0; i < app.redirects.length; i++) {
         let redirect = app.redirects[i];
--- a/dom/base/nsContentPermissionHelper.cpp
+++ b/dom/base/nsContentPermissionHelper.cpp
@@ -145,17 +145,17 @@ ContentPermissionType::GetOptions(nsIArr
 
     rv = isupportsString->SetData(mOptions[i]);
     NS_ENSURE_SUCCESS(rv, rv);
 
     rv = options->AppendElement(isupportsString, false);
     NS_ENSURE_SUCCESS(rv, rv);
   }
 
-  NS_ADDREF(*aOptions = options);
+  options.forget(aOptions);
   return NS_OK;
 }
 
 // nsContentPermissionUtils
 
 /* static */ uint32_t
 nsContentPermissionUtils::ConvertPermissionRequestToArray(nsTArray<PermissionRequest>& aSrcArray,
                                                           nsIMutableArray* aDesArray)
--- a/dom/base/nsDOMSerializer.cpp
+++ b/dom/base/nsDOMSerializer.cpp
@@ -78,18 +78,17 @@ SetUpEncoder(nsIDOMNode *aRoot, const ns
 
   // If we are working on the entire document we do not need to
   // specify which part to serialize
   if (!entireDocument) {
     rv = encoder->SetNode(aRoot);
   }
 
   if (NS_SUCCEEDED(rv)) {
-    *aEncoder = encoder.get();
-    NS_ADDREF(*aEncoder);
+    encoder.forget(aEncoder);
   }
 
   return rv;
 }
 
 void
 nsDOMSerializer::SerializeToString(nsINode& aRoot, nsAString& aStr,
                                    ErrorResult& rv)
--- a/dom/base/nsDOMWindowUtils.cpp
+++ b/dom/base/nsDOMWindowUtils.cpp
@@ -536,22 +536,17 @@ nsDOMWindowUtils::GetResolution(float* a
 {
   MOZ_RELEASE_ASSERT(nsContentUtils::IsCallerChrome());
 
   nsIPresShell* presShell = GetPresShell();
   if (!presShell) {
     return NS_ERROR_FAILURE;
   }
 
-  nsIScrollableFrame* sf = presShell->GetRootScrollFrameAsScrollable();
-  if (sf) {
-    *aResolution = sf->GetResolution();
-  } else {
-    *aResolution = presShell->GetResolution();
-  }
+  *aResolution = nsLayoutUtils::GetResolution(presShell);
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsDOMWindowUtils::GetIsResolutionSet(bool* aIsResolutionSet) {
   MOZ_RELEASE_ASSERT(nsContentUtils::IsCallerChrome());
 
--- a/dom/base/nsFocusManager.cpp
+++ b/dom/base/nsFocusManager.cpp
@@ -2632,18 +2632,19 @@ nsFocusManager::DetermineElementToMoveFo
       NS_ENSURE_SUCCESS(rv, rv);
 
       // found a content node to focus.
       if (nextFocus) {
         LOGCONTENTNAVIGATION("Next Content: %s", nextFocus.get());
 
         // as long as the found node was not the same as the starting node,
         // set it as the return value.
-        if (nextFocus != originalStartContent)
-          NS_ADDREF(*aNextContent = nextFocus);
+        if (nextFocus != originalStartContent) {
+          nextFocus.forget(aNextContent);
+        }
         return NS_OK;
       }
 
       if (popupFrame) {
         // in a popup, so start again from the beginning of the popup. However,
         // if we already started at the beginning, then there isn't anything to
         // focus, so just return
         if (startContent != rootContent) {
@@ -3115,18 +3116,17 @@ nsFocusManager::GetLastDocShell(nsIDocSh
 {
   *aResult = nullptr;
 
   nsCOMPtr<nsIDocShellTreeItem> curItem = aItem;
   while (curItem) {
     int32_t childCount = 0;
     curItem->GetChildCount(&childCount);
     if (!childCount) {
-      *aResult = curItem;
-      NS_ADDREF(*aResult);
+      curItem.forget(aResult);
       return;
     }
 
     
     curItem->GetChildAt(childCount - 1, getter_AddRefs(curItem));
   }
 }
 
@@ -3194,17 +3194,17 @@ nsFocusManager::GetPreviousDocShell(nsID
     if (iterItem == aItem)
       break;
     prevItem = iterItem;
   }
 
   if (prevItem)
     GetLastDocShell(prevItem, aResult);
   else
-    NS_ADDREF(*aResult = parentItem);
+    parentItem.forget(aResult);
 }
 
 nsIContent*
 nsFocusManager::GetNextTabbablePanel(nsIDocument* aDocument, nsIFrame* aCurrentPopup, bool aForward)
 {
   nsXULPopupManager* pm = nsXULPopupManager::GetInstance();
   if (!pm)
     return nullptr;
@@ -3412,17 +3412,17 @@ nsFocusManager::GetFocusInSelection(nsPI
   // with a variable holding the starting selectionContent
   while (testContent) {
     // Keep testing while selectionContent is equal to something,
     // eventually we'll run out of ancestors
 
     nsCOMPtr<nsIURI> uri;
     if (testContent == currentFocus ||
         testContent->IsLink(getter_AddRefs(uri))) {
-      NS_ADDREF(*aFocusedContent = testContent);
+      testContent.forget(aFocusedContent);
       return;
     }
 
     // Get the parent
     testContent = testContent->GetParent();
 
     if (!testContent) {
       // We run this loop again, checking the ancestor chain of the selection's end point
@@ -3442,17 +3442,17 @@ nsFocusManager::GetFocusInSelection(nsPI
   do {
     testContent = do_QueryInterface(selectionNode);
 
     // We're looking for any focusable link that could be part of the
     // main document's selection.
     nsCOMPtr<nsIURI> uri;
     if (testContent == currentFocus ||
         testContent->IsLink(getter_AddRefs(uri))) {
-      NS_ADDREF(*aFocusedContent = testContent);
+      testContent.forget(aFocusedContent);
       return;
     }
 
     selectionNode->GetFirstChild(getter_AddRefs(testNode));
     if (testNode) {
       selectionNode = testNode;
       continue;
     }
--- a/dom/base/nsGlobalWindow.cpp
+++ b/dom/base/nsGlobalWindow.cpp
@@ -4043,17 +4043,17 @@ nsGlobalWindow::GetPrompter(nsIPrompt** 
   }
 
   if (!mDocShell)
     return NS_ERROR_FAILURE;
 
   nsCOMPtr<nsIPrompt> prompter(do_GetInterface(mDocShell));
   NS_ENSURE_TRUE(prompter, NS_ERROR_NO_INTERFACE);
 
-  NS_ADDREF(*aPrompt = prompter);
+  prompter.forget(aPrompt);
   return NS_OK;
 }
 
 BarProp*
 nsGlobalWindow::GetMenubar(ErrorResult& aError)
 {
   FORWARD_TO_INNER_OR_THROW(GetMenubar, (aError), aError, nullptr);
 
--- a/dom/base/nsGlobalWindowCommands.cpp
+++ b/dom/base/nsGlobalWindowCommands.cpp
@@ -626,18 +626,17 @@ nsSelectionCommand::GetContentViewerEdit
   nsIDocShell *docShell = window->GetDocShell();
   NS_ENSURE_TRUE(docShell, NS_ERROR_FAILURE);
 
   nsCOMPtr<nsIContentViewer> viewer;
   docShell->GetContentViewer(getter_AddRefs(viewer));
   nsCOMPtr<nsIContentViewerEdit> edit(do_QueryInterface(viewer));
   NS_ENSURE_TRUE(edit, NS_ERROR_FAILURE);
 
-  *aEditInterface = edit;
-  NS_ADDREF(*aEditInterface);
+  edit.forget(aEditInterface);
   return NS_OK;
 }
 
 #if 0
 #pragma mark -
 #endif
 
 #define NS_DECL_CLIPBOARD_COMMAND(_cmd)                                                     \
--- a/dom/base/nsQueryContentEventResult.cpp
+++ b/dom/base/nsQueryContentEventResult.cpp
@@ -33,16 +33,31 @@ nsQueryContentEventResult::GetOffset(uin
   bool notFound;
   nsresult rv = GetNotFound(&notFound);
   NS_ENSURE_SUCCESS(rv, rv);
   NS_ENSURE_TRUE(!notFound, NS_ERROR_NOT_AVAILABLE);
   *aOffset = mOffset;
   return NS_OK;
 }
 
+NS_IMETHODIMP
+nsQueryContentEventResult::GetTentativeCaretOffset(uint32_t* aOffset)
+{
+  bool notFound;
+  nsresult rv = GetTentativeCaretOffsetNotFound(&notFound);
+  if (NS_WARN_IF(NS_FAILED(rv))) {
+    return rv;
+  }
+  if (NS_WARN_IF(notFound)) {
+    return NS_ERROR_NOT_AVAILABLE;
+  }
+  *aOffset = mTentativeCaretOffset;
+  return NS_OK;
+}
+
 static bool IsRectEnabled(uint32_t aEventID)
 {
   return aEventID == NS_QUERY_CARET_RECT ||
          aEventID == NS_QUERY_TEXT_RECT ||
          aEventID == NS_QUERY_EDITOR_RECT ||
          aEventID == NS_QUERY_CHARACTER_AT_POINT;
 }
 
@@ -121,25 +136,39 @@ nsQueryContentEventResult::GetNotFound(b
   NS_ENSURE_TRUE(mSucceeded, NS_ERROR_NOT_AVAILABLE);
   NS_ENSURE_TRUE(mEventID == NS_QUERY_SELECTED_TEXT ||
                  mEventID == NS_QUERY_CHARACTER_AT_POINT,
                  NS_ERROR_NOT_AVAILABLE);
   *aNotFound = (mOffset == WidgetQueryContentEvent::NOT_FOUND);
   return NS_OK;
 }
 
+NS_IMETHODIMP
+nsQueryContentEventResult::GetTentativeCaretOffsetNotFound(bool* aNotFound)
+{
+  if (NS_WARN_IF(!mSucceeded)) {
+    return NS_ERROR_NOT_AVAILABLE;
+  }
+  if (NS_WARN_IF(mEventID != NS_QUERY_CHARACTER_AT_POINT)) {
+    return NS_ERROR_NOT_AVAILABLE;
+  }
+  *aNotFound = (mTentativeCaretOffset == WidgetQueryContentEvent::NOT_FOUND);
+  return NS_OK;
+}
+
 void
 nsQueryContentEventResult::SetEventResult(nsIWidget* aWidget,
                                           const WidgetQueryContentEvent &aEvent)
 {
   mEventID = aEvent.message;
   mSucceeded = aEvent.mSucceeded;
   mReversed = aEvent.mReply.mReversed;
   mRect = aEvent.mReply.mRect;
   mOffset = aEvent.mReply.mOffset;
+  mTentativeCaretOffset = aEvent.mReply.mTentativeCaretOffset;
   mString = aEvent.mReply.mString;
 
   if (!IsRectEnabled(mEventID) || !aWidget || !mSucceeded) {
     return;
   }
 
   nsIWidget* topWidget = aWidget->GetTopLevelWidget();
   if (!topWidget || topWidget == aWidget) {
--- a/dom/base/nsQueryContentEventResult.h
+++ b/dom/base/nsQueryContentEventResult.h
@@ -25,16 +25,17 @@ public:
                       const mozilla::WidgetQueryContentEvent &aEvent);
 
 protected:
   ~nsQueryContentEventResult();
 
   uint32_t mEventID;
 
   uint32_t mOffset;
+  uint32_t mTentativeCaretOffset;
   nsString mString;
   mozilla::LayoutDeviceIntRect mRect;
 
   bool mSucceeded;
   bool mReversed;
 };
 
 #endif // mozilla_dom_nsQueryContentEventResult_h
--- a/dom/base/nsRange.cpp
+++ b/dom/base/nsRange.cpp
@@ -1803,17 +1803,17 @@ nsRange::CutContents(DocumentFragment** 
   nsresult rv = iter.Init(this);
   if (NS_FAILED(rv)) return rv;
 
   if (iter.IsDone())
   {
     // There's nothing for us to delete.
     rv = CollapseRangeAfterDelete(this);
     if (NS_SUCCEEDED(rv) && aFragment) {
-      NS_ADDREF(*aFragment = retval);
+      retval.forget(aFragment);
     }
     return rv;
   }
 
   // We delete backwards to avoid iterator problems!
 
   iter.Last();
 
@@ -2010,17 +2010,17 @@ nsRange::CutContents(DocumentFragment** 
         NS_ENSURE_STATE(newCloneAncestor);
       }
       commonCloneAncestor = newCloneAncestor;
     }
   }
 
   rv = CollapseRangeAfterDelete(this);
   if (NS_SUCCEEDED(rv) && aFragment) {
-    NS_ADDREF(*aFragment = retval);
+    retval.forget(aFragment);
   }
   return rv;
 }
 
 NS_IMETHODIMP
 nsRange::DeleteContents()
 {
   return CutContents(nullptr);
--- a/dom/bindings/test/test_dom_xrays.html
+++ b/dom/bindings/test/test_dom_xrays.html
@@ -34,19 +34,19 @@ function checkXrayProperty(obj, name, va
       ok(!obj.hasOwnProperty(name), "hasOwnProperty shouldn't see \"" + name + "\" through Xrays");
       ise(Object.getOwnPropertyDescriptor(obj, name), undefined, "getOwnPropertyDescriptor shouldn't see \"" + name + "\" through Xrays");
       ok(Object.keys(obj).indexOf(name) == -1, "Enumerating the Xray should not return \"" + name + "\"");
     } else {
       ok(obj.hasOwnProperty(name), "hasOwnProperty should see \"" + name + "\" through Xrays");
       var pd = Object.getOwnPropertyDescriptor(obj, name);
       ok(pd, "getOwnPropertyDescriptor should see \"" + name + "\" through Xrays");
       if (pd && pd.get) {
-        is(pd.get.call(instance), value, "Should get the right value for \"" + name + "\" through Xrays");
+        ise(pd.get.call(instance), value, "Should get the right value for \"" + name + "\" through Xrays");
       } else {
-        is(obj[name], value, "Should get the right value for \"" + name + "\" through Xrays");
+        ise(obj[name], value, "Should get the right value for \"" + name + "\" through Xrays");
       }
       if (pd && pd.enumerable) {
         ok(Object.keys(obj).indexOf("" + name) > -1, "Enumerating the Xray should return \"" + name + "\"");
       }
     }
   } while ((obj = Object.getPrototypeOf(obj)));
 }
 
@@ -99,17 +99,19 @@ function test()
   // Named properties shouldn't shadow WebIDL- or ECMAScript-defined properties.
   checkWindowXrayProperty(win, "addEventListener", undefined, undefined, undefined, eventTargetProto.addEventListener);
   ise(win.addEventListener, eventTargetProto.addEventListener, "Named properties shouldn't shadow WebIDL-defined properties");
 
   ise(win.toString, win.Object.prototype.toString, "Named properties shouldn't shadow ECMAScript-defined properties");
 
   // HTMLDocument
   // Unforgeable properties live on the instance.
-  checkXrayProperty(doc, "location", [ document.getElementById("t").src ]);
+  checkXrayProperty(doc, "location", [ win.location ]);
+  ise(String(win.location), document.getElementById("t").src,
+      "Should have the right stringification");
 
   // HTMLHtmlElement
   var elem = doc.documentElement;
 
   var elemProto = Object.getPrototypeOf(elem);
   ise(elemProto, win.HTMLHtmlElement.prototype, "The proto chain of the Xray should mirror the prototype chain of the Xrayed object");
 
   elemProto = Object.getPrototypeOf(elemProto);
--- a/dom/datastore/DataStoreService.cpp
+++ b/dom/datastore/DataStoreService.cpp
@@ -1099,18 +1099,17 @@ DataStoreService::GetAppManifestURLsForD
   HashApp* apps = nullptr;
   if (mStores.Get(aName, &apps)) {
     apps->EnumerateRead(GetAppManifestURLsEnumerator, manifestURLs.get());
   }
   if (mAccessStores.Get(aName, &apps)) {
     apps->EnumerateRead(GetAppManifestURLsEnumerator, manifestURLs.get());
   }
 
-  *aManifestURLs = manifestURLs;
-  NS_ADDREF(*aManifestURLs);
+  manifestURLs.forget(aManifestURLs);
   return NS_OK;
 }
 
 bool
 DataStoreService::CheckPermission(nsIPrincipal* aPrincipal)
 {
   // First of all, the general pref has to be turned on.
   bool enabled = false;
--- a/dom/devicestorage/nsDeviceStorage.cpp
+++ b/dom/devicestorage/nsDeviceStorage.cpp
@@ -3510,17 +3510,17 @@ nsDOMDeviceStorage::CreateDeviceStorageF
     GetDefaultStorageName(aType, storageName);
   }
 
   nsRefPtr<nsDOMDeviceStorage> ds = new nsDOMDeviceStorage(aWin);
   if (NS_FAILED(ds->Init(aWin, aType, storageName))) {
     *aStore = nullptr;
     return;
   }
-  NS_ADDREF(*aStore = ds.get());
+  ds.forget(aStore);
 }
 
 // static
 void
 nsDOMDeviceStorage::CreateDeviceStoragesFor(
   nsPIDOMWindow* aWin,
   const nsAString &aType,
   nsTArray<nsRefPtr<nsDOMDeviceStorage> > &aStores)
--- a/dom/events/ContentEventHandler.cpp
+++ b/dom/events/ContentEventHandler.cpp
@@ -1130,16 +1130,19 @@ ContentEventHandler::OnQuerySelectionAsT
 nsresult
 ContentEventHandler::OnQueryCharacterAtPoint(WidgetQueryContentEvent* aEvent)
 {
   nsresult rv = Init(aEvent);
   if (NS_FAILED(rv)) {
     return rv;
   }
 
+  aEvent->mReply.mOffset = aEvent->mReply.mTentativeCaretOffset =
+    WidgetQueryContentEvent::NOT_FOUND;
+
   nsIFrame* rootFrame = mPresShell->GetRootFrame();
   NS_ENSURE_TRUE(rootFrame, NS_ERROR_FAILURE);
   nsIWidget* rootWidget = rootFrame->GetNearestWidget();
   NS_ENSURE_TRUE(rootWidget, NS_ERROR_FAILURE);
 
   // The root frame's widget might be different, e.g., the event was fired on
   // a popup but the rootFrame is the document root.
   if (rootWidget != aEvent->widget) {
@@ -1159,30 +1162,57 @@ ContentEventHandler::OnQueryCharacterAtP
   if (rootWidget != aEvent->widget) {
     eventOnRoot.refPoint += aEvent->widget->WidgetToScreenOffset() -
       rootWidget->WidgetToScreenOffset();
   }
   nsPoint ptInRoot =
     nsLayoutUtils::GetEventCoordinatesRelativeTo(&eventOnRoot, rootFrame);
 
   nsIFrame* targetFrame = nsLayoutUtils::GetFrameForPoint(rootFrame, ptInRoot);
-  if (!targetFrame || targetFrame->GetType() != nsGkAtoms::textFrame ||
-      !targetFrame->GetContent() ||
+  if (!targetFrame || !targetFrame->GetContent() ||
       !nsContentUtils::ContentIsDescendantOf(targetFrame->GetContent(),
                                              mRootContent)) {
-    // there is no character at the point.
-    aEvent->mReply.mOffset = WidgetQueryContentEvent::NOT_FOUND;
+    // There is no character at the point.
     aEvent->mSucceeded = true;
     return NS_OK;
   }
   nsPoint ptInTarget = ptInRoot + rootFrame->GetOffsetToCrossDoc(targetFrame);
   int32_t rootAPD = rootFrame->PresContext()->AppUnitsPerDevPixel();
   int32_t targetAPD = targetFrame->PresContext()->AppUnitsPerDevPixel();
   ptInTarget = ptInTarget.ScaleToOtherAppUnits(rootAPD, targetAPD);
 
+  nsIFrame::ContentOffsets tentativeCaretOffsets =
+    targetFrame->GetContentOffsetsFromPoint(ptInTarget);
+  if (!tentativeCaretOffsets.content ||
+      !nsContentUtils::ContentIsDescendantOf(tentativeCaretOffsets.content,
+                                             mRootContent)) {
+    // There is no character nor tentative caret point at the point.
+    aEvent->mSucceeded = true;
+    return NS_OK;
+  }
+
+  rv = GetFlatTextOffsetOfRange(mRootContent, tentativeCaretOffsets.content,
+                                tentativeCaretOffsets.offset,
+                                &aEvent->mReply.mTentativeCaretOffset,
+                                GetLineBreakType(aEvent));
+  if (NS_WARN_IF(NS_FAILED(rv))) {
+    return rv;
+  }
+
+  if (targetFrame->GetType() != nsGkAtoms::textFrame) {
+    // There is no character at the point but there is tentative caret point.
+    aEvent->mSucceeded = true;
+    return NS_OK;
+  }
+
+  MOZ_ASSERT(
+    aEvent->mReply.mTentativeCaretOffset != WidgetQueryContentEvent::NOT_FOUND,
+    "The point is inside a character bounding box.  Why tentative caret point "
+    "hasn't been found?");
+
   nsTextFrame* textframe = static_cast<nsTextFrame*>(targetFrame);
   nsIFrame::ContentOffsets contentOffsets =
     textframe->GetCharacterOffsetAtFramePoint(ptInTarget);
   NS_ENSURE_TRUE(contentOffsets.content, NS_ERROR_FAILURE);
   uint32_t offset;
   rv = GetFlatTextOffsetOfRange(mRootContent, contentOffsets.content,
                                 contentOffsets.offset, &offset,
                                 GetLineBreakType(aEvent));
--- a/dom/events/DataTransfer.cpp
+++ b/dom/events/DataTransfer.cpp
@@ -977,28 +977,28 @@ DataTransfer::ConvertFromVariant(nsIVari
     nsCOMPtr<nsISupports> data;
     if (NS_FAILED(aVariant->GetAsISupports(getter_AddRefs(data))))
        return false;
  
     nsCOMPtr<nsIFlavorDataProvider> fdp = do_QueryInterface(data);
     if (fdp) {
       // for flavour data providers, use kFlavorHasDataProvider (which has the
       // value 0) as the length.
-      NS_ADDREF(*aSupports = fdp);
+      fdp.forget(aSupports);
       *aLength = nsITransferable::kFlavorHasDataProvider;
     }
     else {
       // wrap the item in an nsISupportsInterfacePointer
       nsCOMPtr<nsISupportsInterfacePointer> ptrSupports =
         do_CreateInstance(NS_SUPPORTS_INTERFACE_POINTER_CONTRACTID);
       if (!ptrSupports)
         return false;
 
       ptrSupports->SetData(data);
-      NS_ADDREF(*aSupports = ptrSupports);
+      ptrSupports.forget(aSupports);
 
       *aLength = sizeof(nsISupportsInterfacePointer *);
     }
 
     return true;
   }
 
   char16_t* chrs;
@@ -1012,18 +1012,17 @@ DataTransfer::ConvertFromVariant(nsIVari
 
   nsCOMPtr<nsISupportsString>
     strSupports(do_CreateInstance(NS_SUPPORTS_STRING_CONTRACTID));
   if (!strSupports)
     return false;
 
   strSupports->SetData(str);
 
-  *aSupports = strSupports;
-  NS_ADDREF(*aSupports);
+  strSupports.forget(aSupports);
 
   // each character is two bytes
   *aLength = str.Length() << 1;
 
   return true;
 }
 
 void
--- a/dom/html/HTMLFormElement.cpp
+++ b/dom/html/HTMLFormElement.cpp
@@ -1738,18 +1738,17 @@ HTMLFormElement::GetActionURL(nsIURI** a
     if (!permitsFormAction) {
       rv = NS_ERROR_CSP_FORM_ACTION_VIOLATION;
     }
   }
 
   //
   // Assign to the output
   //
-  *aActionURL = actionURL;
-  NS_ADDREF(*aActionURL);
+  actionURL.forget(aActionURL);
 
   return rv;
 }
 
 NS_IMETHODIMP_(nsIFormControl*)
 HTMLFormElement::GetDefaultSubmitElement() const
 {
   NS_PRECONDITION(mDefaultSubmitElement == mFirstSubmitInElements ||
--- a/dom/html/HTMLMediaElement.cpp
+++ b/dom/html/HTMLMediaElement.cpp
@@ -807,21 +807,20 @@ NS_IMETHODIMP HTMLMediaElement::Load()
   ResetState();
   mIsRunningLoadMethod = false;
 
   return NS_OK;
 }
 
 void HTMLMediaElement::ResetState()
 {
-  mMediaSize = nsIntSize(-1, -1);
   // There might be a pending MediaDecoder::PlaybackPositionChanged() which
-  // will overwrite |mMediaSize| in UpdateMediaSize() to give staled videoWidth
-  // and videoHeight. We have to call ForgetElement() here such that the staled
-  // callbacks won't reach us.
+  // will overwrite |mMediaInfo.mVideo.mDisplay| in UpdateMediaSize() to give
+  // staled videoWidth and videoHeight. We have to call ForgetElement() here
+  // such that the staled callbacks won't reach us.
   if (mVideoFrameContainer) {
     mVideoFrameContainer->ForgetElement();
     mVideoFrameContainer = nullptr;
   }
 }
 
 static bool HasSourceChildren(nsIContent* aElement)
 {
@@ -934,27 +933,25 @@ void HTMLMediaElement::NotifyMediaTrackE
 }
 
 void HTMLMediaElement::NotifyMediaStreamTracksAvailable(DOMMediaStream* aStream)
 {
   if (!mSrcStream || mSrcStream != aStream) {
     return;
   }
 
-  bool oldHasVideo = HasVideo();
-
-  mMediaInfo.mAudio.mHasAudio = !AudioTracks()->IsEmpty();
-  mMediaInfo.mVideo.mHasVideo = !VideoTracks()->IsEmpty();
-
-  if (IsVideo() && oldHasVideo != HasVideo()) {
-    // We are a video element and HasVideo() changed so update the screen wakelock
-    NotifyOwnerDocumentActivityChanged();
-  }
+  bool videoHasChanged = IsVideo() && HasVideo() != !VideoTracks()->IsEmpty();
 
   UpdateReadyStateForData(mLastNextFrameStatus);
+
+  if (videoHasChanged) {
+    // We are a video element and HasVideo() changed so update the screen
+    // wakelock
+    NotifyOwnerDocumentActivityChanged();
+  }
 }
 
 void HTMLMediaElement::LoadFromSourceChildren()
 {
   NS_ASSERTION(mDelayingLoadEvent,
                "Should delay load event (if in document) during load");
   NS_ASSERTION(mIsLoadingFromSourceChildren,
                "Must remember we're loading from source children");
@@ -1890,21 +1887,21 @@ HTMLMediaElement::CaptureStreamInternal(
   // back into the output stream.
   out->mStream->GetStream()->ChangeExplicitBlockerCount(1);
   if (mDecoder) {
     mDecoder->AddOutputStream(out->mStream->GetStream()->AsProcessedStream(),
                               aFinishWhenEnded);
     if (mReadyState >= HAVE_METADATA) {
       // Expose the tracks to JS directly.
       if (HasAudio()) {
-        TrackID audioTrackId = mMediaInfo.mAudio.mTrackInfo.mOutputId;
+        TrackID audioTrackId = mMediaInfo.mAudio.mTrackId;
         out->mStream->CreateDOMTrack(audioTrackId, MediaSegment::AUDIO);
       }
       if (HasVideo()) {
-        TrackID videoTrackId = mMediaInfo.mVideo.mTrackInfo.mOutputId;
+        TrackID videoTrackId = mMediaInfo.mVideo.mTrackId;
         out->mStream->CreateDOMTrack(videoTrackId, MediaSegment::VIDEO);
       }
     }
   }
   nsRefPtr<DOMMediaStream> result = out->mStream;
   return result.forget();
 }
 
@@ -2047,17 +2044,16 @@ HTMLMediaElement::HTMLMediaElement(alrea
   : nsGenericHTMLElement(aNodeInfo),
     mCurrentLoadID(0),
     mNetworkState(nsIDOMHTMLMediaElement::NETWORK_EMPTY),
     mReadyState(nsIDOMHTMLMediaElement::HAVE_NOTHING),
     mLastNextFrameStatus(NEXT_FRAME_UNINITIALIZED),
     mLoadWaitStatus(NOT_WAITING),
     mVolume(1.0),
     mPreloadAction(PRELOAD_UNDEFINED),
-    mMediaSize(-1,-1),
     mLastCurrentTime(0.0),
     mFragmentStart(-1.0),
     mFragmentEnd(-1.0),
     mDefaultPlaybackRate(1.0),
     mPlaybackRate(1.0),
     mPreservesPitch(true),
     mPlayed(new TimeRanges),
     mCurrentPlayRangeStart(-1.0),
@@ -3151,16 +3147,18 @@ void HTMLMediaElement::ProcessMediaFragm
     SetCurrentTime(parser.GetStartTime());
     mFragmentStart = parser.GetStartTime();
   }
 }
 
 void HTMLMediaElement::MetadataLoaded(const MediaInfo* aInfo,
                                       nsAutoPtr<const MetadataTags> aTags)
 {
+  MOZ_ASSERT(NS_IsMainThread());
+
   mMediaInfo = *aInfo;
   mIsEncrypted = aInfo->IsEncrypted()
 #ifdef MOZ_EME
                  || mPendingEncryptedInitData.IsEncrypted()
 #endif // MOZ_EME
                  ;
   mTags = aTags.forget();
   mLoadedDataFired = false;
@@ -3168,17 +3166,16 @@ void HTMLMediaElement::MetadataLoaded(co
 
   if (mIsEncrypted) {
     nsCOMPtr<nsIObserverService> obs = services::GetObserverService();
     obs->NotifyObservers(static_cast<nsIContent*>(this), "media-eme-metadataloaded", nullptr);
   }
 
   DispatchAsyncEvent(NS_LITERAL_STRING("durationchange"));
   if (IsVideo() && HasVideo()) {
-    mMediaSize = aInfo->mVideo.mDisplay;
     DispatchAsyncEvent(NS_LITERAL_STRING("resize"));
   }
   DispatchAsyncEvent(NS_LITERAL_STRING("loadedmetadata"));
   if (mDecoder && mDecoder->IsTransportSeekable() && mDecoder->IsMediaSeekable()) {
     ProcessMediaFragmentURI();
     mDecoder->SetFragmentEndTime(mFragmentEnd);
   }
   if (mIsEncrypted) {
@@ -3194,33 +3191,33 @@ void HTMLMediaElement::MetadataLoaded(co
     }
     mPendingEncryptedInitData.mInitDatas.Clear();
 #endif // MOZ_EME
   }
 
   // Expose the tracks to JS directly.
   for (OutputMediaStream& out : mOutputStreams) {
     if (aInfo->HasAudio()) {
-      TrackID audioTrackId = aInfo->mAudio.mTrackInfo.mOutputId;
+      TrackID audioTrackId = aInfo->mAudio.mTrackId;
       out.mStream->CreateDOMTrack(audioTrackId, MediaSegment::AUDIO);
     }
     if (aInfo->HasVideo()) {
-      TrackID videoTrackId = aInfo->mVideo.mTrackInfo.mOutputId;
+      TrackID videoTrackId = aInfo->mVideo.mTrackId;
       out.mStream->CreateDOMTrack(videoTrackId, MediaSegment::VIDEO);
     }
   }
 
   // If this element had a video track, but consists only of an audio track now,
   // delete the VideoFrameContainer. This happens when the src is changed to an
   // audio only file.
   // Else update its dimensions.
   if (!aInfo->HasVideo()) {
     ResetState();
   } else {
-    UpdateMediaSize(aInfo->mVideo.mDisplay);
+    UpdateReadyStateForData(mLastNextFrameStatus);
   }
 
   if (IsVideo() && aInfo->HasVideo()) {
     // We are a video element playing video so update the screen wakelock
     NotifyOwnerDocumentActivityChanged();
   }
 }
 
@@ -3503,29 +3500,27 @@ void HTMLMediaElement::UpdateReadyStateF
   if (mDecoder && mReadyState < nsIDOMHTMLMediaElement::HAVE_METADATA) {
     // aNextFrame might have a next frame because the decoder can advance
     // on its own thread before MetadataLoaded gets a chance to run.
     // The arrival of more data can't change us out of this readyState.
     return;
   }
 
   if (mSrcStream && mReadyState < nsIDOMHTMLMediaElement::HAVE_METADATA) {
-    if ((!HasAudio() && !HasVideo()) ||
-        (IsVideo() && HasVideo() && mMediaSize == nsIntSize(-1, -1))) {
+    bool hasAudio = !AudioTracks()->IsEmpty();
+    bool hasVideo = !VideoTracks()->IsEmpty();
+
+    if ((!hasAudio && !hasVideo) ||
+        (IsVideo() && hasVideo && !HasVideo())) {
       return;
     }
 
     // We are playing a stream that has video and a video frame is now set.
     // This means we have all metadata needed to change ready state.
-    MediaInfo mediaInfo;
-    mediaInfo.mAudio.mHasAudio = !AudioTracks()->IsEmpty();
-    mediaInfo.mVideo.mHasVideo = !VideoTracks()->IsEmpty();
-    if (mediaInfo.HasVideo()) {
-      mediaInfo.mVideo.mDisplay = mMediaSize;
-    }
+    MediaInfo mediaInfo = mMediaInfo;
     MetadataLoaded(&mediaInfo, nsAutoPtr<const MetadataTags>(nullptr));
   }
 
   if (aNextFrame == MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING) {
     ChangeReadyState(nsIDOMHTMLMediaElement::HAVE_METADATA);
     return;
   }
 
@@ -3863,27 +3858,28 @@ void HTMLMediaElement::NotifyDecoderPrin
     OutputMediaStream* ms = &mOutputStreams[i];
     ms->mStream->SetCORSMode(mCORSMode);
     ms->mStream->CombineWithPrincipal(principal);
   }
 }
 
 void HTMLMediaElement::UpdateMediaSize(const nsIntSize& aSize)
 {
-  if (IsVideo() && mReadyState != HAVE_NOTHING && mMediaSize != aSize) {
+  if (IsVideo() && mReadyState != HAVE_NOTHING &&
+      mMediaInfo.mVideo.mDisplay != aSize) {
     DispatchAsyncEvent(NS_LITERAL_STRING("resize"));
   }
 
-  mMediaSize = aSize;
+  mMediaInfo.mVideo.mDisplay = aSize;
   UpdateReadyStateForData(mLastNextFrameStatus);
 }
 
 void HTMLMediaElement::UpdateInitialMediaSize(const nsIntSize& aSize)
 {
-  if (mMediaSize == nsIntSize(-1, -1)) {
+  if (mMediaInfo.mVideo.mDisplay == nsIntSize(0, 0)) {
     UpdateMediaSize(aSize);
   }
 }
 
 void HTMLMediaElement::SuspendOrResumeElement(bool aPauseElement, bool aSuspendEvents)
 {
   if (aPauseElement != mPausedForInactiveDocumentOrChannel) {
     mPausedForInactiveDocumentOrChannel = aPauseElement;
@@ -4150,17 +4146,17 @@ already_AddRefed<nsILoadGroup> HTMLMedia
 
 nsresult
 HTMLMediaElement::CopyInnerTo(Element* aDest)
 {
   nsresult rv = nsGenericHTMLElement::CopyInnerTo(aDest);
   NS_ENSURE_SUCCESS(rv, rv);
   if (aDest->OwnerDoc()->IsStaticDocument()) {
     HTMLMediaElement* dest = static_cast<HTMLMediaElement*>(aDest);
-    dest->mMediaSize = mMediaSize;
+    dest->mMediaInfo = mMediaInfo;
   }
   return rv;
 }
 
 already_AddRefed<TimeRanges>
 HTMLMediaElement::Buffered() const
 {
   nsRefPtr<TimeRanges> ranges = new TimeRanges();
--- a/dom/html/HTMLMediaElement.h
+++ b/dom/html/HTMLMediaElement.h
@@ -1128,23 +1128,16 @@ protected:
   // redirects etc.
   nsCOMPtr<nsIURI> mLoadingSrc;
 
   // Stores the current preload action for this element. Initially set to
   // PRELOAD_UNDEFINED, its value is changed by calling
   // UpdatePreloadAction().
   PreloadAction mPreloadAction;
 
-  // Size of the media. Updated by the decoder on the main thread if
-  // it changes. Defaults to a width and height of -1 if not set.
-  // We keep this separate from the intrinsic size stored in the
-  // VideoFrameContainer so that it doesn't change unexpectedly under us
-  // due to decoder activity.
-  nsIntSize mMediaSize;
-
   // Time that the last timeupdate event was fired. Read/Write from the
   // main thread only.
   TimeStamp mTimeUpdateTime;
 
   // Time that the last progress event was fired. Read/Write from the
   // main thread only.
   TimeStamp mProgressTime;
 
--- a/dom/html/HTMLVideoElement.cpp
+++ b/dom/html/HTMLVideoElement.cpp
@@ -47,26 +47,26 @@ HTMLVideoElement::HTMLVideoElement(alrea
 }
 
 HTMLVideoElement::~HTMLVideoElement()
 {
 }
 
 nsresult HTMLVideoElement::GetVideoSize(nsIntSize* size)
 {
-  if (mMediaSize.width == -1 && mMediaSize.height == -1) {
+  if (!mMediaInfo.HasVideo()) {
     return NS_ERROR_FAILURE;
   }
 
   if (mDisableVideo) {
     return NS_ERROR_FAILURE;
   }
 
-  size->height = mMediaSize.height;
-  size->width = mMediaSize.width;
+  size->height = mMediaInfo.mVideo.mDisplay.height;
+  size->width = mMediaInfo.mVideo.mDisplay.width;
   return NS_OK;
 }
 
 bool
 HTMLVideoElement::ParseAttribute(int32_t aNamespaceID,
                                  nsIAtom* aAttribute,
                                  const nsAString& aValue,
                                  nsAttrValue& aResult)
--- a/dom/html/HTMLVideoElement.h
+++ b/dom/html/HTMLVideoElement.h
@@ -71,22 +71,22 @@ public:
 
   void SetHeight(uint32_t aValue, ErrorResult& aRv)
   {
     SetHTMLIntAttr(nsGkAtoms::height, aValue, aRv);
   }
 
   uint32_t VideoWidth() const
   {
-    return mMediaSize.width == -1 ? 0 : mMediaSize.width;
+    return mMediaInfo.HasVideo() ? mMediaInfo.mVideo.mDisplay.width : 0;
   }
 
   uint32_t VideoHeight() const
   {
-    return mMediaSize.height == -1 ? 0 : mMediaSize.height;
+    return mMediaInfo.HasVideo() ? mMediaInfo.mVideo.mDisplay.height : 0;
   }
 
   void GetPoster(nsAString& aValue)
   {
     GetURIAttr(nsGkAtoms::poster, nullptr, aValue);
   }
   void SetPoster(const nsAString& aValue, ErrorResult& aRv)
   {
--- a/dom/interfaces/base/nsIQueryContentEventResult.idl
+++ b/dom/interfaces/base/nsIQueryContentEventResult.idl
@@ -6,23 +6,25 @@
 #include "nsISupports.idl"
 
 /**
  * The result of query content events.  succeeded propery can be used always.
  * Whether other properties can be used or not depends on the event.
  * See nsIDOMWindowUtils.idl, which properites can be used was documented.
  */
 
-[scriptable, uuid(4b4ba266-b51e-4f0f-8d0e-9f13cb2a0056)]
+[scriptable, uuid(e2c39e0e-345f-451a-a7b2-e0230d555847)]
 interface nsIQueryContentEventResult : nsISupports
 {
   readonly attribute unsigned long offset;
+  readonly attribute unsigned long tentativeCaretOffset;
   readonly attribute boolean reversed;
 
   readonly attribute long left;
   readonly attribute long top;
   readonly attribute long width;
   readonly attribute long height;
   readonly attribute AString text;
 
   readonly attribute boolean succeeded;
   readonly attribute boolean notFound;
+  readonly attribute boolean tentativeCaretOffsetNotFound;
 };
--- a/dom/ipc/TabChild.cpp
+++ b/dom/ipc/TabChild.cpp
@@ -514,17 +514,20 @@ TabChildBase::ProcessUpdateFrame(const F
 {
     if (!mGlobal || !mTabChildGlobal) {
         return aFrameMetrics;
     }
 
     nsCOMPtr<nsIDOMWindowUtils> utils(GetDOMWindowUtils());
 
     FrameMetrics newMetrics = aFrameMetrics;
-    APZCCallbackHelper::UpdateRootFrame(utils, newMetrics);
+    nsCOMPtr<nsIDocument> doc = GetDocument();
+    if (doc && doc->GetShell()) {
+      APZCCallbackHelper::UpdateRootFrame(utils, doc->GetShell(), newMetrics);
+    }
 
     CSSSize cssCompositedSize = newMetrics.CalculateCompositedSizeInCssPixels();
     // The BrowserElementScrolling helper must know about these updated metrics
     // for other functions it performs, such as double tap handling.
     // Note, %f must not be used because it is locale specific!
     nsString data;
     data.AppendPrintf("{ \"x\" : %d", NS_lround(newMetrics.GetScrollOffset().x));
     data.AppendPrintf(", \"y\" : %d", NS_lround(newMetrics.GetScrollOffset().y));
--- a/dom/jsurl/nsJSProtocolHandler.cpp
+++ b/dom/jsurl/nsJSProtocolHandler.cpp
@@ -1209,37 +1209,33 @@ nsJSProtocolHandler::NewURI(const nsACSt
 }
 
 NS_IMETHODIMP
 nsJSProtocolHandler::NewChannel2(nsIURI* uri,
                                  nsILoadInfo* aLoadInfo,
                                  nsIChannel** result)
 {
     nsresult rv;
-    nsJSChannel * channel;
 
     NS_ENSURE_ARG_POINTER(uri);
 
-    channel = new nsJSChannel();
+    nsRefPtr<nsJSChannel> channel = new nsJSChannel();
     if (!channel) {
         return NS_ERROR_OUT_OF_MEMORY;
     }
-    NS_ADDREF(channel);
 
     rv = channel->Init(uri);
 
     // set the loadInfo on the new channel
     rv = channel->SetLoadInfo(aLoadInfo);
     NS_ENSURE_SUCCESS(rv, rv);
 
     if (NS_SUCCEEDED(rv)) {
-        *result = channel;
-        NS_ADDREF(*result);
+        channel.forget(result);
     }
-    NS_RELEASE(channel);
     return rv;
 }
 
 NS_IMETHODIMP
 nsJSProtocolHandler::NewChannel(nsIURI* uri, nsIChannel* *result)
 {
     return NewChannel2(uri, nullptr, result);
 }
--- a/dom/media/AbstractThread.cpp
+++ b/dom/media/AbstractThread.cpp
@@ -59,13 +59,13 @@ AbstractThread::MainThread()
 void
 AbstractThread::InitStatics()
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(!sMainThread);
   nsCOMPtr<nsIThread> mainThread;
   NS_GetMainThread(getter_AddRefs(mainThread));
   MOZ_DIAGNOSTIC_ASSERT(mainThread);
-  sMainThread = AbstractThread::Create(mainThread.get());
+  sMainThread = new AbstractThreadImpl<nsIThread>(mainThread.get());
   ClearOnShutdown(&sMainThread);
 }
 
 } // namespace mozilla
--- a/dom/media/AbstractThread.h
+++ b/dom/media/AbstractThread.h
@@ -18,35 +18,33 @@ namespace mozilla {
 /*
  * We often want to run tasks on a target that guarantees that events will never
  * run in parallel. There are various target types that achieve this - namely
  * nsIThread and MediaTaskQueue. Note that nsIThreadPool (which implements
  * nsIEventTarget) does not have this property, so we do not want to use
  * nsIEventTarget for this purpose. This class encapsulates the specifics of
  * the structures we might use here and provides a consistent interface.
  *
- * Use AbstractThread::Create() to instantiate an AbstractThread. Note that
- * if you use different types than the ones currently supported (MediaTaskQueue
- * and nsIThread), you'll need to implement the relevant guts in
- * AbstractThread.cpp to avoid linkage errors.
+ * At present, the supported AbstractThread implementations are MediaTaskQueue
+ * and AbstractThread::MainThread. If you add support for another thread that is
+ * not the MainThread, you'll need to figure out how to make it unique such that
+ * comparing AbstractThread pointers is equivalent to comparing nsIThread pointers.
  */
 class AbstractThread
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AbstractThread);
   virtual nsresult Dispatch(already_AddRefed<nsIRunnable> aRunnable) = 0;
   virtual bool IsCurrentThreadIn() = 0;
 
   // Convenience method for dispatching a runnable when we may be running on
   // a thread that requires runnables to be dispatched with tail dispatch.
   void MaybeTailDispatch(already_AddRefed<nsIRunnable> aRunnable,
                          bool aAssertDispatchSuccess = true);
 
-  template<typename TargetType> static AbstractThread* Create(TargetType* aTarget);
-
   // Convenience method for getting an AbstractThread for the main thread.
   static AbstractThread* MainThread();
 
   // Must be called exactly once during startup.
   static void InitStatics();
 
 protected:
   virtual ~AbstractThread() {}
@@ -58,18 +56,11 @@ class AbstractThreadImpl : public Abstra
 public:
   explicit AbstractThreadImpl(TargetType* aTarget) : mTarget(aTarget) {}
   virtual nsresult Dispatch(already_AddRefed<nsIRunnable> aRunnable);
   virtual bool IsCurrentThreadIn();
 private:
   nsRefPtr<TargetType> mTarget;
 };
 
-template<typename TargetType>
-AbstractThread*
-AbstractThread::Create(TargetType* aTarget)
-{
-  return new AbstractThreadImpl<TargetType>(aTarget);
-};
-
 } // namespace mozilla
 
 #endif
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -147,50 +147,50 @@ VideoData::SizeOfIncludingThis(MallocSiz
     size += img->SizeOfIncludingThis(aMallocSizeOf);
   }
 
   return size;
 }
 
 /* static */
 already_AddRefed<VideoData>
-VideoData::ShallowCopyUpdateDuration(VideoData* aOther,
+VideoData::ShallowCopyUpdateDuration(const VideoData* aOther,
                                      int64_t aDuration)
 {
   nsRefPtr<VideoData> v = new VideoData(aOther->mOffset,
                                         aOther->mTime,
                                         aDuration,
                                         aOther->mKeyframe,
                                         aOther->mTimecode,
                                         aOther->mDisplay);
   v->mDiscontinuity = aOther->mDiscontinuity;
   v->mImage = aOther->mImage;
   return v.forget();
 }
 
 /* static */
 already_AddRefed<VideoData>
-VideoData::ShallowCopyUpdateTimestamp(VideoData* aOther,
+VideoData::ShallowCopyUpdateTimestamp(const VideoData* aOther,
                                       int64_t aTimestamp)
 {
   NS_ENSURE_TRUE(aOther, nullptr);
   nsRefPtr<VideoData> v = new VideoData(aOther->mOffset,
                                         aTimestamp,
                                         aOther->GetEndTime() - aTimestamp,
                                         aOther->mKeyframe,
                                         aOther->mTimecode,
                                         aOther->mDisplay);
   v->mDiscontinuity = aOther->mDiscontinuity;
   v->mImage = aOther->mImage;
   return v.forget();
 }
 
 /* static */
 already_AddRefed<VideoData>
-VideoData::ShallowCopyUpdateTimestampAndDuration(VideoData* aOther,
+VideoData::ShallowCopyUpdateTimestampAndDuration(const VideoData* aOther,
                                                  int64_t aTimestamp,
                                                  int64_t aDuration)
 {
   NS_ENSURE_TRUE(aOther, nullptr);
   nsRefPtr<VideoData> v = new VideoData(aOther->mOffset,
                                         aTimestamp,
                                         aDuration,
                                         aOther->mKeyframe,
@@ -198,17 +198,17 @@ VideoData::ShallowCopyUpdateTimestampAnd
                                         aOther->mDisplay);
   v->mDiscontinuity = aOther->mDiscontinuity;
   v->mImage = aOther->mImage;
   return v.forget();
 }
 
 /* static */
 void VideoData::SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
-                                    VideoInfo& aInfo,
+                                    const VideoInfo& aInfo,
                                     const YCbCrBuffer &aBuffer,
                                     const IntRect& aPicture,
                                     bool aCopyData)
 {
   if (!aVideoImage) {
     return;
   }
   const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0];
@@ -236,17 +236,17 @@ void VideoData::SetVideoDataToImage(Plan
     aVideoImage->SetData(data);
   } else {
     aVideoImage->SetDataNoCopy(data);
   }
 }
 
 /* static */
 already_AddRefed<VideoData>
-VideoData::Create(VideoInfo& aInfo,
+VideoData::Create(const VideoInfo& aInfo,
                   ImageContainer* aContainer,
                   Image* aImage,
                   int64_t aOffset,
                   int64_t aTime,
                   int64_t aDuration,
                   const YCbCrBuffer& aBuffer,
                   bool aKeyframe,
                   int64_t aTimecode,
@@ -352,49 +352,49 @@ VideoData::Create(VideoInfo& aInfo,
                                    true /* aCopyData */);
   }
 #endif
   return v.forget();
 }
 
 /* static */
 already_AddRefed<VideoData>
-VideoData::Create(VideoInfo& aInfo,
+VideoData::Create(const VideoInfo& aInfo,
                   ImageContainer* aContainer,
                   int64_t aOffset,
                   int64_t aTime,
                   int64_t aDuration,
                   const YCbCrBuffer& aBuffer,
                   bool aKeyframe,
                   int64_t aTimecode,
                   const IntRect& aPicture)
 {
   return Create(aInfo, aContainer, nullptr, aOffset, aTime, aDuration, aBuffer,
                 aKeyframe, aTimecode, aPicture);
 }
 
 /* static */
 already_AddRefed<VideoData>
-VideoData::Create(VideoInfo& aInfo,
+VideoData::Create(const VideoInfo& aInfo,
                   Image* aImage,
                   int64_t aOffset,
                   int64_t aTime,
                   int64_t aDuration,
                   const YCbCrBuffer& aBuffer,
                   bool aKeyframe,
                   int64_t aTimecode,
                   const IntRect& aPicture)
 {
   return Create(aInfo, nullptr, aImage, aOffset, aTime, aDuration, aBuffer,
                 aKeyframe, aTimecode, aPicture);
 }
 
 /* static */
 already_AddRefed<VideoData>
-VideoData::CreateFromImage(VideoInfo& aInfo,
+VideoData::CreateFromImage(const VideoInfo& aInfo,
                            ImageContainer* aContainer,
                            int64_t aOffset,
                            int64_t aTime,
                            int64_t aDuration,
                            const nsRefPtr<Image>& aImage,
                            bool aKeyframe,
                            int64_t aTimecode,
                            const IntRect& aPicture)
@@ -407,17 +407,17 @@ VideoData::CreateFromImage(VideoInfo& aI
                                       aInfo.mDisplay));
   v->mImage = aImage;
   return v.forget();
 }
 
 #ifdef MOZ_OMX_DECODER
 /* static */
 already_AddRefed<VideoData>
-VideoData::Create(VideoInfo& aInfo,
+VideoData::Create(const VideoInfo& aInfo,
                   ImageContainer* aContainer,
                   int64_t aOffset,
                   int64_t aTime,
                   int64_t aDuration,
                   mozilla::layers::TextureClient* aBuffer,
                   bool aKeyframe,
                   int64_t aTimecode,
                   const IntRect& aPicture)
@@ -483,25 +483,27 @@ VideoData::Create(VideoInfo& aInfo,
 #define RAW_DATA_ALIGNMENT 31U
 
 #define RAW_DATA_DEFAULT_SIZE 4096
 
 MediaRawData::MediaRawData()
   : MediaData(RAW_DATA)
   , mData(nullptr)
   , mSize(0)
+  , mCrypto(mCryptoInternal)
   , mBuffer(new LargeDataBuffer(RAW_DATA_DEFAULT_SIZE))
   , mPadding(0)
 {
 }
 
 MediaRawData::MediaRawData(const uint8_t* aData, size_t aSize)
   : MediaData(RAW_DATA)
   , mData(nullptr)
   , mSize(0)
+  , mCrypto(mCryptoInternal)
   , mBuffer(new LargeDataBuffer(RAW_DATA_DEFAULT_SIZE))
   , mPadding(0)
 {
   if (!EnsureCapacity(aSize)) {
     return;
   }
   mBuffer->AppendElements(aData, aSize);
   mBuffer->AppendElements(RAW_DATA_ALIGNMENT);
@@ -513,16 +515,17 @@ MediaRawData::Clone() const
 {
   nsRefPtr<MediaRawData> s = new MediaRawData;
   s->mTimecode = mTimecode;
   s->mTime = mTime;
   s->mDuration = mDuration;
   s->mOffset = mOffset;
   s->mKeyframe = mKeyframe;
   s->mExtraData = mExtraData;
+  s->mCryptoInternal = mCryptoInternal;
   if (mSize) {
     if (!s->EnsureCapacity(mSize)) {
       return nullptr;
     }
     s->mBuffer->AppendElements(mData, mSize);
     s->mBuffer->AppendElements(RAW_DATA_ALIGNMENT);
     s->mSize = mSize;
   }
@@ -579,16 +582,17 @@ MediaRawDataWriter*
 MediaRawData::CreateWriter()
 {
   return new MediaRawDataWriter(this);
 }
 
 MediaRawDataWriter::MediaRawDataWriter(MediaRawData* aMediaRawData)
   : mData(nullptr)
   , mSize(0)
+  , mCrypto(aMediaRawData->mCryptoInternal)
   , mTarget(aMediaRawData)
   , mBuffer(aMediaRawData->mBuffer.get())
 {
   if (aMediaRawData->mData) {
     mData = mBuffer->Elements() + mTarget->mPadding;
     mSize = mTarget->mSize;
   }
 }
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -169,96 +169,96 @@ public:
   // Constructs a VideoData object. If aImage is nullptr, creates a new Image
   // holding a copy of the YCbCr data passed in aBuffer. If aImage is not
   // nullptr, it's stored as the underlying video image and aBuffer is assumed
   // to point to memory within aImage so no copy is made. aTimecode is a codec
   // specific number representing the timestamp of the frame of video data.
   // Returns nsnull if an error occurs. This may indicate that memory couldn't
   // be allocated to create the VideoData object, or it may indicate some
   // problem with the input data (e.g. negative stride).
-  static already_AddRefed<VideoData> Create(VideoInfo& aInfo,
+  static already_AddRefed<VideoData> Create(const VideoInfo& aInfo,
                                             ImageContainer* aContainer,
                                             Image* aImage,
                                             int64_t aOffset,
                                             int64_t aTime,
                                             int64_t aDuration,
                                             const YCbCrBuffer &aBuffer,
                                             bool aKeyframe,
                                             int64_t aTimecode,
                                             const IntRect& aPicture);
 
   // Variant that always makes a copy of aBuffer
-  static already_AddRefed<VideoData> Create(VideoInfo& aInfo,
+  static already_AddRefed<VideoData> Create(const VideoInfo& aInfo,
                                             ImageContainer* aContainer,
                                             int64_t aOffset,
                                             int64_t aTime,
                                             int64_t aDuration,
                                             const YCbCrBuffer &aBuffer,
                                             bool aKeyframe,
                                             int64_t aTimecode,
                                             const IntRect& aPicture);
 
   // Variant to create a VideoData instance given an existing aImage
-  static already_AddRefed<VideoData> Create(VideoInfo& aInfo,
+  static already_AddRefed<VideoData> Create(const VideoInfo& aInfo,
                                             Image* aImage,
                                             int64_t aOffset,
                                             int64_t aTime,
                                             int64_t aDuration,
                                             const YCbCrBuffer &aBuffer,
                                             bool aKeyframe,
                                             int64_t aTimecode,
                                             const IntRect& aPicture);
 
-  static already_AddRefed<VideoData> Create(VideoInfo& aInfo,
-                                             ImageContainer* aContainer,
-                                             int64_t aOffset,
-                                             int64_t aTime,
-                                             int64_t aDuration,
-                                             layers::TextureClient* aBuffer,
-                                             bool aKeyframe,
-                                             int64_t aTimecode,
-                                             const IntRect& aPicture);
+  static already_AddRefed<VideoData> Create(const VideoInfo& aInfo,
+                                            ImageContainer* aContainer,
+                                            int64_t aOffset,
+                                            int64_t aTime,
+                                            int64_t aDuration,
+                                            layers::TextureClient* aBuffer,
+                                            bool aKeyframe,
+                                            int64_t aTimecode,
+                                            const IntRect& aPicture);
 
-  static already_AddRefed<VideoData> CreateFromImage(VideoInfo& aInfo,
+  static already_AddRefed<VideoData> CreateFromImage(const VideoInfo& aInfo,
                                                      ImageContainer* aContainer,
                                                      int64_t aOffset,
                                                      int64_t aTime,
                                                      int64_t aDuration,
                                                      const nsRefPtr<Image>& aImage,
                                                      bool aKeyframe,
                                                      int64_t aTimecode,
                                                      const IntRect& aPicture);
 
   // Creates a new VideoData identical to aOther, but with a different
   // specified duration. All data from aOther is copied into the new
   // VideoData. The new VideoData's mImage field holds a reference to
   // aOther's mImage, i.e. the Image is not copied. This function is useful
   // in reader backends that can't determine the duration of a VideoData
   // until the next frame is decoded, i.e. it's a way to change the const
   // duration field on a VideoData.
-  static already_AddRefed<VideoData> ShallowCopyUpdateDuration(VideoData* aOther,
+  static already_AddRefed<VideoData> ShallowCopyUpdateDuration(const VideoData* aOther,
                                                                int64_t aDuration);
 
   // Creates a new VideoData identical to aOther, but with a different
   // specified timestamp. All data from aOther is copied into the new
   // VideoData, as ShallowCopyUpdateDuration() does.
-  static already_AddRefed<VideoData> ShallowCopyUpdateTimestamp(VideoData* aOther,
+  static already_AddRefed<VideoData> ShallowCopyUpdateTimestamp(const VideoData* aOther,
                                                                 int64_t aTimestamp);
 
   // Creates a new VideoData identical to aOther, but with a different
   // specified timestamp and duration. All data from aOther is copied
   // into the new VideoData, as ShallowCopyUpdateDuration() does.
   static already_AddRefed<VideoData>
-  ShallowCopyUpdateTimestampAndDuration(VideoData* aOther, int64_t aTimestamp,
+  ShallowCopyUpdateTimestampAndDuration(const VideoData* aOther, int64_t aTimestamp,
                                         int64_t aDuration);
 
   // Initialize PlanarYCbCrImage. Only When aCopyData is true,
   // video data is copied to PlanarYCbCrImage.
   static void SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
-                                  VideoInfo& aInfo,
+                                  const VideoInfo& aInfo,
                                   const YCbCrBuffer &aBuffer,
                                   const IntRect& aPicture,
                                   bool aCopyData);
 
   // Constructs a duplicate VideoData object. This intrinsically tells the
   // player that it does not need to update the displayed frame when this
   // frame is played; this frame is identical to the previous.
   static already_AddRefed<VideoData> CreateDuplicate(int64_t aOffset,
@@ -344,16 +344,18 @@ class MediaRawData;
 
 class MediaRawDataWriter
 {
 public:
   // Pointer to data or null if not-yet allocated
   uint8_t* mData;
   // Writeable size of buffer.
   size_t mSize;
+  // Writeable reference to MediaRawData::mCryptoInternal
+  CryptoSample& mCrypto;
 
   // Data manipulation methods. mData and mSize may be updated accordingly.
 
   // Set size of buffer, allocating memory as required.
   // If size is increased, new buffer area is filled with 0.
   bool SetSize(size_t aSize);
   // Add aData at the beginning of buffer.
   bool Prepend(const uint8_t* aData, size_t aSize);
@@ -375,17 +377,17 @@ public:
   MediaRawData();
   MediaRawData(const uint8_t* aData, size_t mSize);
 
   // Pointer to data or null if not-yet allocated
   const uint8_t* mData;
   // Size of buffer.
   size_t mSize;
 
-  CryptoSample mCrypto;
+  const CryptoSample& mCrypto;
   nsRefPtr<DataBuffer> mExtraData;
 
   // Return a deep copy or nullptr if out of memory.
   virtual already_AddRefed<MediaRawData> Clone() const;
   // Create a MediaRawDataWriter for this MediaRawData. The caller must
   // delete the writer once done. The writer is not thread-safe.
   virtual MediaRawDataWriter* CreateWriter();
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
@@ -397,16 +399,17 @@ private:
   friend class MediaRawDataWriter;
   // Ensure that the backend buffer can hold aSize data. Will update mData.
   // Will enforce that the start of allocated data is always 32 bytes
   // aligned and that it has sufficient end padding to allow for 32 bytes block
   // read as required by some data decoders.
   // Returns false if memory couldn't be allocated.
   bool EnsureCapacity(size_t aSize);
   nsRefPtr<LargeDataBuffer> mBuffer;
+  CryptoSample mCryptoInternal;
   uint32_t mPadding;
   MediaRawData(const MediaRawData&); // Not implemented
 };
 
   // LargeDataBuffer is a ref counted fallible TArray.
   // It is designed to share potentially big byte arrays.
 class LargeDataBuffer : public FallibleTArray<uint8_t> {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(LargeDataBuffer);
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -1918,26 +1918,26 @@ MediaDecoder::ConstructMediaTracks()
   if (!element) {
     return;
   }
 
   mMediaTracksConstructed = true;
 
   AudioTrackList* audioList = element->AudioTracks();
   if (audioList && mInfo->HasAudio()) {
-    TrackInfo info = mInfo->mAudio.mTrackInfo;
+    const TrackInfo& info = mInfo->mAudio;
     nsRefPtr<AudioTrack> track = MediaTrackList::CreateAudioTrack(
     info.mId, info.mKind, info.mLabel, info.mLanguage, info.mEnabled);
 
     audioList->AddTrack(track);
   }
 
   VideoTrackList* videoList = element->VideoTracks();
   if (videoList && mInfo->HasVideo()) {
-    TrackInfo info = mInfo->mVideo.mTrackInfo;
+    const TrackInfo& info = mInfo->mVideo;
     nsRefPtr<VideoTrack> track = MediaTrackList::CreateVideoTrack(
     info.mId, info.mKind, info.mLabel, info.mLanguage);
 
     videoList->AddTrack(track);
     track->SetEnabledInternal(info.mEnabled, MediaTrack::FIRE_NO_EVENTS);
   }
 }
 
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -402,26 +402,26 @@ void MediaDecoderStateMachine::SendStrea
       (!mInfo.HasAudio() || AudioQueue().IsFinished()) &&
       (!mInfo.HasVideo() || VideoQueue().IsFinished());
   if (mDecoder->IsSameOriginMedia()) {
     SourceMediaStream* mediaStream = stream->mStream;
     StreamTime endPosition = 0;
 
     if (!stream->mStreamInitialized) {
       if (mInfo.HasAudio()) {
-        TrackID audioTrackId = mInfo.mAudio.mTrackInfo.mOutputId;
+        TrackID audioTrackId = mInfo.mAudio.mTrackId;
         AudioSegment* audio = new AudioSegment();
         mediaStream->AddAudioTrack(audioTrackId, mInfo.mAudio.mRate, 0, audio,
                                    SourceMediaStream::ADDTRACK_QUEUED);
         stream->mStream->DispatchWhenNotEnoughBuffered(audioTrackId,
             TaskQueue(), GetWakeDecoderRunnable());
         stream->mNextAudioTime = mStartTime + stream->mInitialTime;
       }
       if (mInfo.HasVideo()) {
-        TrackID videoTrackId = mInfo.mVideo.mTrackInfo.mOutputId;
+        TrackID videoTrackId = mInfo.mVideo.mTrackId;
         VideoSegment* video = new VideoSegment();
         mediaStream->AddTrack(videoTrackId, 0, video,
                               SourceMediaStream::ADDTRACK_QUEUED);
         stream->mStream->DispatchWhenNotEnoughBuffered(videoTrackId,
             TaskQueue(), GetWakeDecoderRunnable());
 
         // TODO: We can't initialize |mNextVideoTime| until |mStartTime|
         // is set. This is a good indication that DecodedStreamData is in
@@ -430,17 +430,17 @@ void MediaDecoderStateMachine::SendStrea
         stream->mNextVideoTime = mStartTime + stream->mInitialTime;
       }
       mediaStream->FinishAddTracks();
       stream->mStreamInitialized = true;
     }
 
     if (mInfo.HasAudio()) {
       MOZ_ASSERT(stream->mNextAudioTime != -1, "Should've been initialized");
-      TrackID audioTrackId = mInfo.mAudio.mTrackInfo.mOutputId;
+      TrackID audioTrackId = mInfo.mAudio.mTrackId;
       nsAutoTArray<nsRefPtr<AudioData>,10> audio;
       // It's OK to hold references to the AudioData because AudioData
       // is ref-counted.
       AudioQueue().GetElementsAfter(stream->mNextAudioTime, &audio);
       AudioSegment output;
       for (uint32_t i = 0; i < audio.Length(); ++i) {
         SendStreamAudio(audio[i], stream, &output);
       }
@@ -456,17 +456,17 @@ void MediaDecoderStateMachine::SendStrea
       }
       endPosition = std::max(endPosition,
           mediaStream->TicksToTimeRoundDown(mInfo.mAudio.mRate,
                                             stream->mAudioFramesWritten));
     }
 
     if (mInfo.HasVideo()) {
       MOZ_ASSERT(stream->mNextVideoTime != -1, "Should've been initialized");
-      TrackID videoTrackId = mInfo.mVideo.mTrackInfo.mOutputId;
+      TrackID videoTrackId = mInfo.mVideo.mTrackId;
       nsAutoTArray<nsRefPtr<VideoData>,10> video;
       // It's OK to hold references to the VideoData because VideoData
       // is ref-counted.
       VideoQueue().GetElementsAfter(stream->mNextVideoTime, &video);
       VideoSegment output;
       for (uint32_t i = 0; i < video.Length(); ++i) {
         VideoData* v = video[i];
         if (stream->mNextVideoTime < v->mTime) {
@@ -567,17 +567,17 @@ bool MediaDecoderStateMachine::HaveEnoug
   if (!mAudioCaptured) {
     return true;
   }
 
   DecodedStreamData* stream = mDecoder->GetDecodedStream();
 
   if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishAudio) {
     MOZ_ASSERT(mInfo.HasAudio());
-    TrackID audioTrackId = mInfo.mAudio.mTrackInfo.mOutputId;
+    TrackID audioTrackId = mInfo.mAudio.mTrackId;
     if (!stream->mStream->HaveEnoughBuffered(audioTrackId)) {
       return false;
     }
     stream->mStream->DispatchWhenNotEnoughBuffered(audioTrackId,
         TaskQueue(), GetWakeDecoderRunnable());
   }
 
   return true;
@@ -590,17 +590,17 @@ bool MediaDecoderStateMachine::HaveEnoug
   if (static_cast<uint32_t>(VideoQueue().GetSize()) < GetAmpleVideoFrames() * mPlaybackRate) {
     return false;
   }
 
   DecodedStreamData* stream = mDecoder->GetDecodedStream();
 
   if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishVideo) {
     MOZ_ASSERT(mInfo.HasVideo());
-    TrackID videoTrackId = mInfo.mVideo.mTrackInfo.mOutputId;
+    TrackID videoTrackId = mInfo.mVideo.mTrackId;
     if (!stream->mStream->HaveEnoughBuffered(videoTrackId)) {
       return false;
     }
     stream->mStream->DispatchWhenNotEnoughBuffered(videoTrackId,
         TaskQueue(), GetWakeDecoderRunnable());
   }
 
   return true;
--- a/dom/media/MediaInfo.h
+++ b/dom/media/MediaInfo.h
@@ -1,111 +1,181 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #if !defined(MediaInfo_h)
 #define MediaInfo_h
 
+#include "nsRect.h"
+#include "nsRefPtr.h"
 #include "nsSize.h"
-#include "nsRect.h"
-#include "ImageTypes.h"
 #include "nsString.h"
 #include "nsTArray.h"
+#include "ImageTypes.h"
+#include "MediaData.h"
 #include "StreamBuffer.h" // for TrackID
 
 namespace mozilla {
 
-struct TrackInfo {
-  void Init(const nsAString& aId,
+class TrackInfo {
+public:
+  enum TrackType {
+    kUndefinedTrack,
+    kAudioTrack,
+    kVideoTrack,
+    kTextTrack
+  };
+  TrackInfo(TrackType aType,
+            const nsAString& aId,
             const nsAString& aKind,
             const nsAString& aLabel,
             const nsAString& aLanguage,
             bool aEnabled,
-            TrackID aOutputId = TRACK_INVALID)
+            TrackID aTrackId = TRACK_INVALID)
+    : mId(aId)
+    , mKind(aKind)
+    , mLabel(aLabel)
+    , mLanguage(aLanguage)
+    , mEnabled(aEnabled)
+    , mTrackId(aTrackId)
+    , mDuration(0)
+    , mMediaTime(0)
+    , mType(aType)
+  {
+  }
+
+  // Only used for backward compatibility. Do not use in new code.
+  void Init(TrackType aType,
+            const nsAString& aId,
+            const nsAString& aKind,
+            const nsAString& aLabel,
+            const nsAString& aLanguage,
+            bool aEnabled,
+            TrackID aTrackId = TRACK_INVALID)
   {
     mId = aId;
     mKind = aKind;
     mLabel = aLabel;
     mLanguage = aLanguage;
     mEnabled = aEnabled;
-    mOutputId = aOutputId;
+    mTrackId = aTrackId;
+    mType = aType;
   }
 
+  // Fields common with MediaTrack object.
   nsString mId;
   nsString mKind;
   nsString mLabel;
   nsString mLanguage;
   bool mEnabled;
-  TrackID mOutputId;
+
+  TrackID mTrackId;
+
+  nsAutoCString mMimeType;
+  int64_t mDuration;
+  int64_t mMediaTime;
+  CryptoTrack mCrypto;
+
+  bool IsAudio() const
+  {
+    return mType == kAudioTrack;
+  }
+  bool IsVideo() const
+  {
+    return mType == kVideoTrack;
+  }
+  bool IsText() const
+  {
+    return mType == kTextTrack;
+  }
+  TrackType GetType() const
+  {
+    return mType;
+  }
+  bool virtual IsValid() const = 0;
+
+private:
+  TrackType mType;
 };
 
 // Stores info relevant to presenting media frames.
-class VideoInfo {
-private:
-  void Init(int32_t aWidth, int32_t aHeight, bool aHasVideo)
-  {
-    mDisplay = nsIntSize(aWidth, aHeight);
-    mStereoMode = StereoMode::MONO;
-    mHasVideo = aHasVideo;
-
-    // TODO: TrackInfo should be initialized by its specific codec decoder.
-    // This following call should be removed once we have that implemented.
-    mTrackInfo.Init(NS_LITERAL_STRING("2"), NS_LITERAL_STRING("main"),
-                    EmptyString(), EmptyString(), true, 2);
-  }
-
+class VideoInfo : public TrackInfo {
 public:
   VideoInfo()
+    : VideoInfo(-1, -1)
   {
-    Init(0, 0, false);
   }
 
   VideoInfo(int32_t aWidth, int32_t aHeight)
+    : TrackInfo(kVideoTrack, NS_LITERAL_STRING("2"), NS_LITERAL_STRING("main"),
+                EmptyString(), EmptyString(), true, 2)
+    , mDisplay(nsIntSize(aWidth, aHeight))
+    , mStereoMode(StereoMode::MONO)
+    , mImage(nsIntSize(aWidth, aHeight))
+    , mExtraData(new DataBuffer)
   {
-    Init(aWidth, aHeight, true);
+  }
+
+  virtual bool IsValid() const override
+  {
+    return mDisplay.width > 0 && mDisplay.height > 0;
   }
 
   // Size in pixels at which the video is rendered. This is after it has
   // been scaled by its aspect ratio.
   nsIntSize mDisplay;
 
   // Indicates the frame layout for single track stereo videos.
   StereoMode mStereoMode;
 
-  // True if we have an active video bitstream.
-  bool mHasVideo;
+  // Size in pixels of decoded video's image.
+  nsIntSize mImage;
 
-  TrackInfo mTrackInfo;
+  nsRefPtr<DataBuffer> mExtraData;
 };
 
-class AudioInfo {
+class AudioInfo : public TrackInfo {
 public:
   AudioInfo()
-    : mRate(44100)
-    , mChannels(2)
-    , mHasAudio(false)
+    : TrackInfo(kAudioTrack, NS_LITERAL_STRING("1"), NS_LITERAL_STRING("main"),
+                EmptyString(), EmptyString(), true, 1)
+    , mRate(0)
+    , mChannels(0)
+    , mBitDepth(0)
+    , mProfile(0)
+    , mExtendedProfile(0)
+    , mCodecSpecificConfig(new DataBuffer)
+    , mExtraData(new DataBuffer)
   {
-    // TODO: TrackInfo should be initialized by its specific codec decoder.
-    // This following call should be removed once we have that implemented.
-    mTrackInfo.Init(NS_LITERAL_STRING("1"), NS_LITERAL_STRING("main"),
-                    EmptyString(), EmptyString(), true, 1);
   }
 
   // Sample rate.
   uint32_t mRate;
 
   // Number of audio channels.
   uint32_t mChannels;
 
-  // True if we have an active audio bitstream.
-  bool mHasAudio;
+  // Bits per sample.
+  uint32_t mBitDepth;
+
+  // Codec profile.
+  int8_t mProfile;
 
-  TrackInfo mTrackInfo;
+  // Extended codec profile.
+  int8_t mExtendedProfile;
+
+  nsRefPtr<DataBuffer> mCodecSpecificConfig;
+  nsRefPtr<DataBuffer> mExtraData;
+
+  virtual bool IsValid() const override
+  {
+    return mChannels > 0 && mRate > 0;
+  }
 };
 
 class EncryptionInfo {
 public:
   struct InitData {
     template<typename AInitDatas>
     InitData(const nsAString& aType, AInitDatas&& aInitData)
       : mType(aType)
@@ -141,22 +211,22 @@ public:
   // One 'InitData' per encrypted buffer.
   InitDatas mInitDatas;
 };
 
 class MediaInfo {
 public:
   bool HasVideo() const
   {
-    return mVideo.mHasVideo;
+    return mVideo.IsValid();
   }
 
   bool HasAudio() const
   {
-    return mAudio.mHasAudio;
+    return mAudio.IsValid();
   }
 
   bool IsEncrypted() const
   {
     return mCrypto.IsEncrypted();
   }
 
   bool HasValidMedia() const
--- a/dom/media/MediaPromise.h
+++ b/dom/media/MediaPromise.h
@@ -306,27 +306,26 @@ protected:
   };
 public:
 
   template<typename ThisType, typename ResolveMethodType, typename RejectMethodType>
   already_AddRefed<Consumer> RefableThen(AbstractThread* aResponseThread, const char* aCallSite, ThisType* aThisVal,
                                          ResolveMethodType aResolveMethod, RejectMethodType aRejectMethod,
                                          TaskDispatcher& aDispatcher = PassByRef<AutoTaskDispatcher>())
   {
-    MutexAutoLock lock(mMutex);
-
     // {Refable,}Then() rarely dispatch directly - they do so only in the case
     // where the promise has already been resolved by the time {Refable,}Then()
     // is invoked. This case is rare, but it _can_ happen, which makes it a ripe
     // target for race bugs. So we do an extra assertion here to make sure our
     // caller is using tail dispatch correctly no matter what, rather than
     // relying on the assertion in Dispatch(), which may be called extremely
     // infrequently.
     aDispatcher.AssertIsTailDispatcherIfRequired();
 
+    MutexAutoLock lock(mMutex);
     MOZ_DIAGNOSTIC_ASSERT(!IsExclusive || !mHaveConsumer);
     mHaveConsumer = true;
     nsRefPtr<ThenValueBase> thenValue = new ThenValue<ThisType, ResolveMethodType, RejectMethodType>(
                                               aResponseThread, aThisVal, aResolveMethod, aRejectMethod, aCallSite);
     PROMISE_LOG("%s invoking Then() [this=%p, thenValue=%p, aThisVal=%p, isPending=%d]",
                 aCallSite, this, thenValue.get(), aThisVal, (int) IsPending());
     if (!IsPending()) {
       thenValue->Dispatch(this, aDispatcher);
--- a/dom/media/MediaTaskQueue.cpp
+++ b/dom/media/MediaTaskQueue.cpp
@@ -214,30 +214,28 @@ MediaTaskQueue::IsEmpty()
 {
   MonitorAutoLock mon(mQueueMonitor);
   return mTasks.empty();
 }
 
 bool
 MediaTaskQueue::IsCurrentThreadIn()
 {
-  MonitorAutoLock mon(mQueueMonitor);
   bool in = NS_GetCurrentThread() == mRunningThread;
   MOZ_ASSERT_IF(in, GetCurrentQueue() == this);
   return in;
 }
 
 nsresult
 MediaTaskQueue::Runner::Run()
 {
   RefPtr<nsIRunnable> event;
   {
     MonitorAutoLock mon(mQueue->mQueueMonitor);
     MOZ_ASSERT(mQueue->mIsRunning);
-    mQueue->mRunningThread = NS_GetCurrentThread();
     if (mQueue->mTasks.size() == 0) {
       mQueue->mIsRunning = false;
       mQueue->mShutdownPromise.ResolveIfExists(true, __func__);
       mon.NotifyAll();
       return NS_OK;
     }
     event = mQueue->mTasks.front().mRunnable;
     mQueue->mTasks.pop();
@@ -263,53 +261,40 @@ MediaTaskQueue::Runner::Run()
 
   {
     MonitorAutoLock mon(mQueue->mQueueMonitor);
     if (mQueue->mTasks.size() == 0) {
       // No more events to run. Exit the task runner.
       mQueue->mIsRunning = false;
       mQueue->mShutdownPromise.ResolveIfExists(true, __func__);
       mon.NotifyAll();
-      mQueue->mRunningThread = nullptr;
       return NS_OK;
     }
   }
 
   // There's at least one more event that we can run. Dispatch this Runner
   // to the thread pool again to ensure it runs again. Note that we don't just
   // run in a loop here so that we don't hog the thread pool. This means we may
   // run on another thread next time, but we rely on the memory fences from
   // mQueueMonitor for thread safety of non-threadsafe tasks.
-  {
+  nsresult rv = mQueue->mPool->Dispatch(this, NS_DISPATCH_NORMAL);
+  if (NS_FAILED(rv)) {
+    // Failed to dispatch, shutdown!
     MonitorAutoLock mon(mQueue->mQueueMonitor);
-    // Note: Hold the monitor *before* we dispatch, in case we context switch
-    // to another thread pool in the queue immediately and take the lock in the
-    // other thread; mRunningThread could be set to the new thread's value and
-    // then incorrectly anulled below in that case.
-    nsresult rv = mQueue->mPool->Dispatch(this, NS_DISPATCH_NORMAL);
-    if (NS_FAILED(rv)) {
-      // Failed to dispatch, shutdown!
-      mQueue->mIsRunning = false;
-      mQueue->mIsShutdown = true;
-      mon.NotifyAll();
-    }
-    mQueue->mRunningThread = nullptr;
+    mQueue->mIsRunning = false;
+    mQueue->mIsShutdown = true;
+    mon.NotifyAll();
   }
 
   return NS_OK;
 }
 
 #ifdef DEBUG
 void
 TaskDispatcher::AssertIsTailDispatcherIfRequired()
 {
   MediaTaskQueue* currentQueue = MediaTaskQueue::GetCurrentQueue();
-
-  // NB: Make sure not to use the TailDispatcher() accessor, since that
-  // asserts IsCurrentThreadIn(), which acquires the queue monitor, which
-  // triggers a deadlock during shutdown between the queue monitor and the
-  // MediaPromise monitor.
   MOZ_ASSERT_IF(currentQueue && currentQueue->RequiresTailDispatch(),
-                this == currentQueue->mTailDispatcher);
+                this == &currentQueue->TailDispatcher());
 }
 #endif
 
 } // namespace mozilla
--- a/dom/media/MediaTaskQueue.h
+++ b/dom/media/MediaTaskQueue.h
@@ -99,17 +99,17 @@ public:
 
   // Blocks until the queue is flagged for shutdown and all tasks have finished
   // executing.
   void AwaitShutdownAndIdle();
 
   bool IsEmpty();
 
   // Returns true if the current thread is currently running a Runnable in
-  // the task queue. This is for debugging/validation purposes only.
+  // the task queue.
   bool IsCurrentThreadIn() override;
 
 protected:
   virtual ~MediaTaskQueue();
 
 
   // Blocks until all task finish executing. Called internally by methods
   // that need to wait until the task queue is idle.
@@ -136,45 +136,54 @@ protected:
   };
 
   // Queue of tasks to run.
   std::queue<TaskQueueEntry> mTasks;
 
   // The thread currently running the task queue. We store a reference
   // to this so that IsCurrentThreadIn() can tell if the current thread
   // is the thread currently running in the task queue.
-  RefPtr<nsIThread> mRunningThread;
+  //
+  // This may be read on any thread, but may only be written on mRunningThread.
+  // The thread can't die while we're running in it, and we only use it for
+  // pointer-comparison with the current thread anyway - so we make it atomic
+  // and don't refcount it.
+  Atomic<nsIThread*> mRunningThread;
 
   // RAII class that gets instantiated for each dispatched task.
   class AutoTaskGuard : public AutoTaskDispatcher
   {
   public:
     explicit AutoTaskGuard(MediaTaskQueue* aQueue) : mQueue(aQueue)
     {
       // NB: We don't hold the lock to aQueue here. Don't do anything that
       // might require it.
       MOZ_ASSERT(!mQueue->mTailDispatcher);
       mQueue->mTailDispatcher = this;
 
       MOZ_ASSERT(sCurrentQueueTLS.get() == nullptr);
       sCurrentQueueTLS.set(aQueue);
 
+      MOZ_ASSERT(mQueue->mRunningThread == nullptr);
+      mQueue->mRunningThread = NS_GetCurrentThread();
     }
 
     ~AutoTaskGuard()
     {
+      MOZ_ASSERT(mQueue->mRunningThread == NS_GetCurrentThread());
+      mQueue->mRunningThread = nullptr;
+
       sCurrentQueueTLS.set(nullptr);
       mQueue->mTailDispatcher = nullptr;
     }
 
   private:
   MediaTaskQueue* mQueue;
   };
 
-  friend class TaskDispatcher;
   TaskDispatcher* mTailDispatcher;
 
   // True if we've dispatched an event to the pool to execute events from
   // the queue.
   bool mIsRunning;
 
   // True if we've started our shutdown process.
   bool mIsShutdown;
--- a/dom/media/TaskDispatcher.h
+++ b/dom/media/TaskDispatcher.h
@@ -64,16 +64,17 @@ public:
   {
     for (size_t i = 0; i < mTaskGroups.Length(); ++i) {
       UniquePtr<PerThreadTaskGroup> group(Move(mTaskGroups[i]));
       nsRefPtr<AbstractThread> thread = group->mThread;
       bool assertDispatchSuccess = group->mAssertDispatchSuccess;
       nsCOMPtr<nsIRunnable> r = new TaskGroupRunnable(Move(group));
       nsresult rv = thread->Dispatch(r.forget());
       MOZ_DIAGNOSTIC_ASSERT(!assertDispatchSuccess || NS_SUCCEEDED(rv));
+      unused << assertDispatchSuccess;
       unused << rv;
     }
   }
 
   void AddStateChangeTask(AbstractThread* aThread,
                           already_AddRefed<nsIRunnable> aRunnable) override
   {
     EnsureTaskGroup(aThread).mStateChangeTasks.AppendElement(aRunnable);
--- a/dom/media/android/AndroidMediaReader.cpp
+++ b/dom/media/android/AndroidMediaReader.cpp
@@ -69,32 +69,32 @@ nsresult AndroidMediaReader::ReadMetadat
     // that our video frame creation code doesn't overflow.
     nsIntSize displaySize(width, height);
     nsIntSize frameSize(width, height);
     if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
       return NS_ERROR_FAILURE;
     }
 
     // Video track's frame sizes will not overflow. Activate the video track.
-    mHasVideo = mInfo.mVideo.mHasVideo = true;
+    mHasVideo = true;
     mInfo.mVideo.mDisplay = displaySize;
     mPicture = pictureRect;
     mInitialFrame = frameSize;
     VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
     if (container) {
       container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height),
                                  nullptr,
                                  mozilla::TimeStamp::Now());
     }
   }
 
   if (mPlugin->HasAudio(mPlugin)) {
     int32_t numChannels, sampleRate;
     mPlugin->GetAudioParameters(mPlugin, &numChannels, &sampleRate);
-    mHasAudio = mInfo.mAudio.mHasAudio = true;
+    mHasAudio = true;
     mInfo.mAudio.mChannels = numChannels;
     mInfo.mAudio.mRate = sampleRate;
   }
 
  *aInfo = mInfo;
  *aTags = nullptr;
   return NS_OK;
 }
--- a/dom/media/apple/AppleMP3Reader.cpp
+++ b/dom/media/apple/AppleMP3Reader.cpp
@@ -409,19 +409,20 @@ AppleMP3Reader::ReadMetadata(MediaInfo* 
     return NS_ERROR_FAILURE;
   }
 
   if (!mMP3FrameParser.IsMP3()) {
     LOGE("Frame parser failed to parse MP3 stream\n");
     return NS_ERROR_FAILURE;
   }
 
-  aInfo->mAudio.mRate = mAudioSampleRate;
-  aInfo->mAudio.mChannels = mAudioChannels;
-  aInfo->mAudio.mHasAudio = mStreamReady;
+  if (mStreamReady) {
+    aInfo->mAudio.mRate = mAudioSampleRate;
+    aInfo->mAudio.mChannels = mAudioChannels;
+  }
 
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     mDuration = mMP3FrameParser.GetDuration();
     mDecoder->SetMediaDuration(mDuration);
   }
 
   return NS_OK;
--- a/dom/media/directshow/DirectShowReader.cpp
+++ b/dom/media/directshow/DirectShowReader.cpp
@@ -192,18 +192,18 @@ DirectShowReader::ReadMetadata(MediaInfo
   NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
 
   WAVEFORMATEX format;
   mAudioSinkFilter->GetSampleSink()->GetAudioFormat(&format);
   NS_ENSURE_TRUE(format.wFormatTag == WAVE_FORMAT_PCM, NS_ERROR_FAILURE);
 
   mInfo.mAudio.mChannels = mNumChannels = format.nChannels;
   mInfo.mAudio.mRate = mAudioRate = format.nSamplesPerSec;
+  mInfo.mAudio.mBitDepth = format.wBitsPerSample;
   mBytesPerSample = format.wBitsPerSample / 8;
-  mInfo.mAudio.mHasAudio = true;
 
   *aInfo = mInfo;
   // Note: The SourceFilter strips ID3v2 tags out of the stream.
   *aTags = nullptr;
 
   // Begin decoding!
   hr = mControl->Run();
   NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
--- a/dom/media/fmp4/BlankDecoderModule.cpp
+++ b/dom/media/fmp4/BlankDecoderModule.cpp
@@ -6,17 +6,17 @@
 
 #include "MediaDecoderReader.h"
 #include "PlatformDecoderModule.h"
 #include "nsRect.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/CheckedInt.h"
 #include "VideoUtils.h"
 #include "ImageContainer.h"
-#include "mp4_demuxer/mp4_demuxer.h"
+#include "MediaInfo.h"
 #include "MediaTaskQueue.h"
 
 namespace mozilla {
 
 // Decoder that uses a passed in object's Create function to create blank
 // MediaData objects.
 template<class BlankMediaDataCreator>
 class BlankMediaDataDecoder : public MediaDataDecoder {
@@ -203,53 +203,53 @@ private:
   uint32_t mSampleRate;
 };
 
 class BlankDecoderModule : public PlatformDecoderModule {
 public:
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
-  CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  CreateVideoDecoder(const VideoInfo& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
                      FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) override {
     BlankVideoDataCreator* creator = new BlankVideoDataCreator(
-      aConfig.display_width, aConfig.display_height, aImageContainer);
+      aConfig.mDisplay.width, aConfig.mDisplay.height, aImageContainer);
     nsRefPtr<MediaDataDecoder> decoder =
       new BlankMediaDataDecoder<BlankVideoDataCreator>(creator,
                                                        aVideoTaskQueue,
                                                        aCallback);
     return decoder.forget();
   }
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
-  CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+  CreateAudioDecoder(const AudioInfo& aConfig,
                      FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) override {
     BlankAudioDataCreator* creator = new BlankAudioDataCreator(
-      aConfig.channel_count, aConfig.samples_per_second);
+      aConfig.mChannels, aConfig.mRate);
 
     nsRefPtr<MediaDataDecoder> decoder =
       new BlankMediaDataDecoder<BlankAudioDataCreator>(creator,
                                                        aAudioTaskQueue,
                                                        aCallback);
     return decoder.forget();
   }
 
   virtual bool
   SupportsMimeType(const nsACString& aMimeType) override
   {
     return true;
   }
 
   virtual ConversionRequired
-  DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const override
+  DecoderNeedsConversion(const TrackInfo& aConfig) const override
   {
     return kNeedNone;
   }
 
 };
 
 already_AddRefed<PlatformDecoderModule> CreateBlankDecoderModule()
 {
--- a/dom/media/fmp4/MP4Reader.cpp
+++ b/dom/media/fmp4/MP4Reader.cpp
@@ -363,23 +363,22 @@ MP4Reader::ReadMetadata(MediaInfo* aInfo
 {
   if (!mDemuxerInitialized) {
     MonitorAutoLock mon(mDemuxerMonitor);
     bool ok = InvokeAndRetry(this, &MP4Reader::InitDemuxer, mStream, &mDemuxerMonitor);
     NS_ENSURE_TRUE(ok, NS_ERROR_FAILURE);
     mIndexReady = true;
 
     // To decode, we need valid video and a place to put it.
-    mInfo.mVideo.mHasVideo = mVideo.mActive = mDemuxer->HasValidVideo() &&
-                                              mDecoder->GetImageContainer();
+    mVideo.mActive = mDemuxer->HasValidVideo() && mDecoder->GetImageContainer();
     if (mVideo.mActive) {
       mVideo.mTrackDemuxer = new MP4VideoDemuxer(mDemuxer);
     }
 
-    mInfo.mAudio.mHasAudio = mAudio.mActive = mDemuxer->HasValidAudio();
+    mAudio.mActive = mDemuxer->HasValidAudio();
     if (mAudio.mActive) {
       mAudio.mTrackDemuxer = new MP4AudioDemuxer(mDemuxer);
     }
     mCrypto = mDemuxer->Crypto();
 
     {
       MonitorAutoUnlock unlock(mDemuxerMonitor);
       ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
@@ -391,31 +390,27 @@ MP4Reader::ReadMetadata(MediaInfo* aInfo
     // need to reinit the demuxer.
     mDemuxerInitialized = true;
   } else if (mPlatform && !IsWaitingMediaResources()) {
     *aInfo = mInfo;
     *aTags = nullptr;
   }
 
   if (HasAudio()) {
-    const AudioDecoderConfig& audio = mDemuxer->AudioConfig();
-    mInfo.mAudio.mRate = audio.samples_per_second;
-    mInfo.mAudio.mChannels = audio.channel_count;
+    mInfo.mAudio = mDemuxer->AudioConfig();
     mAudio.mCallback = new DecoderCallback(this, kAudio);
   }
 
   if (HasVideo()) {
-    const VideoDecoderConfig& video = mDemuxer->VideoConfig();
-    mInfo.mVideo.mDisplay =
-      nsIntSize(video.display_width, video.display_height);
+    mInfo.mVideo = mDemuxer->VideoConfig();
     mVideo.mCallback = new DecoderCallback(this, kVideo);
 
     // Collect telemetry from h264 AVCC SPS.
     if (!mFoundSPSForTelemetry) {
-      mFoundSPSForTelemetry = AccumulateSPSTelemetry(video.extra_data);
+      mFoundSPSForTelemetry = AccumulateSPSTelemetry(mInfo.mVideo.mExtraData);
     }
   }
 
   if (mCrypto.valid) {
     nsTArray<uint8_t> initData;
     ExtractCryptoInitData(initData);
     if (initData.Length() == 0) {
       return NS_ERROR_FAILURE;
@@ -511,30 +506,30 @@ MP4Reader::EnsureDecodersSetup()
     // mPlatform doesn't need to be recreated when resuming from dormant.
     if (!mPlatform) {
       mPlatform = PlatformDecoderModule::Create();
       NS_ENSURE_TRUE(mPlatform, false);
     }
   }
 
   if (HasAudio()) {
-    NS_ENSURE_TRUE(IsSupportedAudioMimeType(mDemuxer->AudioConfig().mime_type),
+    NS_ENSURE_TRUE(IsSupportedAudioMimeType(mDemuxer->AudioConfig().mMimeType),
                    false);
 
     mAudio.mDecoder =
       mPlatform->CreateDecoder(mDemuxer->AudioConfig(),
                                mAudio.mTaskQueue,
                                mAudio.mCallback);
     NS_ENSURE_TRUE(mAudio.mDecoder != nullptr, false);
     nsresult rv = mAudio.mDecoder->Init();
     NS_ENSURE_SUCCESS(rv, false);
   }
 
   if (HasVideo()) {
-    NS_ENSURE_TRUE(IsSupportedVideoMimeType(mDemuxer->VideoConfig().mime_type),
+    NS_ENSURE_TRUE(IsSupportedVideoMimeType(mDemuxer->VideoConfig().mMimeType),
                    false);
 
     if (mSharedDecoderManager && mPlatform->SupportsSharedDecoders(mDemuxer->VideoConfig())) {
       mVideo.mDecoder =
         mSharedDecoderManager->CreateVideoDecoder(mPlatform,
                                                   mDemuxer->VideoConfig(),
                                                   mLayersBackendType,
                                                   mDecoder->GetImageContainer(),
@@ -601,17 +596,17 @@ MP4Reader::GetNextKeyframeTime()
 }
 
 void
 MP4Reader::DisableHardwareAcceleration()
 {
   if (HasVideo() && mSharedDecoderManager) {
     mSharedDecoderManager->DisableHardwareAcceleration();
 
-    const VideoDecoderConfig& video = mDemuxer->VideoConfig();
+    const VideoInfo& video = mDemuxer->VideoConfig();
     if (!mSharedDecoderManager->Recreate(video)) {
       MonitorAutoLock mon(mVideo.mMonitor);
       mVideo.mError = true;
       if (mVideo.HasPromise()) {
         mVideo.RejectPromise(DECODE_ERROR, __func__);
       }
     } else {
       MonitorAutoLock lock(mVideo.mMonitor);
--- a/dom/media/fmp4/PlatformDecoderModule.cpp
+++ b/dom/media/fmp4/PlatformDecoderModule.cpp
@@ -26,17 +26,17 @@
 #include "mozilla/Preferences.h"
 #ifdef MOZ_EME
 #include "EMEDecoderModule.h"
 #include "mozilla/CDMProxy.h"
 #endif
 #include "SharedThreadPool.h"
 #include "MediaTaskQueue.h"
 
-#include "mp4_demuxer/DecoderData.h"
+#include "MediaInfo.h"
 #include "H264Converter.h"
 
 namespace mozilla {
 
 extern already_AddRefed<PlatformDecoderModule> CreateBlankDecoderModule();
 
 bool PlatformDecoderModule::sUseBlankDecoder = false;
 bool PlatformDecoderModule::sFFmpegDecoderEnabled = false;
@@ -173,44 +173,44 @@ PlatformDecoderModule::CreatePDM()
   if (sGMPDecoderEnabled) {
     nsRefPtr<PlatformDecoderModule> m(new GMPDecoderModule());
     return m.forget();
   }
   return nullptr;
 }
 
 already_AddRefed<MediaDataDecoder>
-PlatformDecoderModule::CreateDecoder(const mp4_demuxer::TrackConfig& aConfig,
+PlatformDecoderModule::CreateDecoder(const TrackInfo& aConfig,
                                      FlushableMediaTaskQueue* aTaskQueue,
                                      MediaDataDecoderCallback* aCallback,
                                      layers::LayersBackend aLayersBackend,
                                      layers::ImageContainer* aImageContainer)
 {
   nsRefPtr<MediaDataDecoder> m;
 
-  if (aConfig.IsAudioConfig()) {
-    m = CreateAudioDecoder(static_cast<const mp4_demuxer::AudioDecoderConfig&>(aConfig),
+  if (aConfig.IsAudio()) {
+    m = CreateAudioDecoder(static_cast<const AudioInfo&>(aConfig),
                            aTaskQueue,
                            aCallback);
     return m.forget();
   }
 
-  if (!aConfig.IsVideoConfig()) {
+  if (!aConfig.IsVideo()) {
     return nullptr;
   }
 
   if (H264Converter::IsH264(aConfig)) {
     m = new H264Converter(this,
-                          static_cast<const mp4_demuxer::VideoDecoderConfig&>(aConfig),
+                          static_cast<const VideoInfo&>(aConfig),
                           aLayersBackend,
                           aImageContainer,
                           aTaskQueue,
                           aCallback);
   } else {
-    m = CreateVideoDecoder(static_cast<const mp4_demuxer::VideoDecoderConfig&>(aConfig),
+    m = CreateVideoDecoder(static_cast<const VideoInfo&>(aConfig),
                            aLayersBackend,
                            aImageContainer,
                            aTaskQueue,
                            aCallback);
   }
   return m.forget();
 }
 
--- a/dom/media/fmp4/PlatformDecoderModule.h
+++ b/dom/media/fmp4/PlatformDecoderModule.h
@@ -8,25 +8,22 @@
 #define PlatformDecoderModule_h_
 
 #include "MediaDecoderReader.h"
 #include "mozilla/layers/LayersTypes.h"
 #include "nsTArray.h"
 #include "mozilla/RefPtr.h"
 #include <queue>
 
-namespace mp4_demuxer {
-class TrackConfig;
-class VideoDecoderConfig;
-class AudioDecoderConfig;
-}
-
 class nsIThreadPool;
 
 namespace mozilla {
+class TrackInfo;
+class AudioInfo;
+class VideoInfo;
 class MediaRawData;
 class DataBuffer;
 
 namespace layers {
 class ImageContainer;
 }
 
 class MediaDataDecoder;
@@ -86,17 +83,17 @@ public:
   CreateCDMWrapper(CDMProxy* aProxy,
                    bool aHasAudio,
                    bool aHasVideo);
 #endif
 
   // Creates a decoder.
   // See CreateVideoDecoder and CreateAudioDecoder for implementation details.
   virtual already_AddRefed<MediaDataDecoder>
-  CreateDecoder(const mp4_demuxer::TrackConfig& aConfig,
+  CreateDecoder(const TrackInfo& aConfig,
                 FlushableMediaTaskQueue* aTaskQueue,
                 MediaDataDecoderCallback* aCallback,
                 layers::LayersBackend aLayersBackend = layers::LayersBackend::LAYERS_NONE,
                 layers::ImageContainer* aImageContainer = nullptr);
 
   // An audio decoder module must support AAC by default.
   // A video decoder must support H264 by default.
   // If more codecs are to be supported, SupportsMimeType will have
@@ -107,21 +104,21 @@ public:
     kNeedNone,
     kNeedAVCC,
     kNeedAnnexB,
   };
 
   // Indicates that the decoder requires a specific format.
   // The PlatformDecoderModule will convert the demuxed data accordingly before
   // feeding it to MediaDataDecoder::Input.
-  virtual ConversionRequired DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const = 0;
+  virtual ConversionRequired DecoderNeedsConversion(const TrackInfo& aConfig) const = 0;
 
   virtual void DisableHardwareAcceleration() {}
 
-  virtual bool SupportsSharedDecoders(const mp4_demuxer::VideoDecoderConfig& aConfig) const {
+  virtual bool SupportsSharedDecoders(const VideoInfo& aConfig) const {
     return true;
   }
 
 protected:
   PlatformDecoderModule() {}
   virtual ~PlatformDecoderModule() {}
 
   friend class H264Converter;
@@ -132,34 +129,34 @@ protected:
   // not hold a reference to it.
   // Output and errors should be returned to the reader via aCallback.
   // On Windows the task queue's threads in have MSCOM initialized with
   // COINIT_MULTITHREADED.
   // Returns nullptr if the decoder can't be created.
   // It is safe to store a reference to aConfig.
   // This is called on the decode task queue.
   virtual already_AddRefed<MediaDataDecoder>
-  CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  CreateVideoDecoder(const VideoInfo& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
                      FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) = 0;
 
   // Creates an Audio decoder with the specified properties.
   // Asynchronous decoding of audio should be done in runnables dispatched to
   // aAudioTaskQueue. If the task queue isn't needed, the decoder should
   // not hold a reference to it.
   // Output and errors should be returned to the reader via aCallback.
   // Returns nullptr if the decoder can't be created.
   // On Windows the task queue's threads in have MSCOM initialized with
   // COINIT_MULTITHREADED.
   // It is safe to store a reference to aConfig.
   // This is called on the decode task queue.
   virtual already_AddRefed<MediaDataDecoder>
-  CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+  CreateAudioDecoder(const AudioInfo& aConfig,
                      FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) = 0;
 
   // Caches pref media.fragmented-mp4.use-blank-decoder
   static bool sUseBlankDecoder;
   static bool sFFmpegDecoderEnabled;
   static bool sGonkDecoderEnabled;
   static bool sAndroidMCDecoderPreferred;
@@ -255,19 +252,19 @@ public:
   // For Codec Resource Management
   virtual bool IsWaitingMediaResources() {
     return false;
   };
   virtual bool IsHardwareAccelerated() const { return false; }
 
   // ConfigurationChanged will be called to inform the video or audio decoder
   // that the format of the next input sample is about to change.
-  // If video decoder, aConfig will be a VideoDecoderConfig object.
-  // If audio decoder, aConfig will be a AudioDecoderConfig object.
-  virtual nsresult ConfigurationChanged(const mp4_demuxer::TrackConfig& aConfig)
+  // If video decoder, aConfig will be a VideoInfo object.
+  // If audio decoder, aConfig will be a AudioInfo object.
+  virtual nsresult ConfigurationChanged(const TrackInfo& aConfig)
   {
     return NS_OK;
   }
 };
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/fmp4/SharedDecoderManager.cpp
+++ b/dom/media/fmp4/SharedDecoderManager.cpp
@@ -1,16 +1,15 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "SharedDecoderManager.h"
-#include "mp4_demuxer/DecoderData.h"
 
 namespace mozilla {
 
 class SharedDecoderCallback : public MediaDataDecoderCallback
 {
 public:
   explicit SharedDecoderCallback(SharedDecoderManager* aManager)
     : mManager(aManager)
@@ -71,17 +70,17 @@ SharedDecoderManager::SharedDecoderManag
 
 SharedDecoderManager::~SharedDecoderManager()
 {
 }
 
 already_AddRefed<MediaDataDecoder>
 SharedDecoderManager::CreateVideoDecoder(
   PlatformDecoderModule* aPDM,
-  const mp4_demuxer::VideoDecoderConfig& aConfig,
+  const VideoInfo& aConfig,
   layers::LayersBackend aLayersBackend,
   layers::ImageContainer* aImageContainer,
   FlushableMediaTaskQueue* aVideoTaskQueue,
   MediaDataDecoderCallback* aCallback)
 {
   if (!mDecoder) {
     mLayersBackend = aLayersBackend;
     mImageContainer = aImageContainer;
@@ -111,17 +110,17 @@ SharedDecoderManager::CreateVideoDecoder
 void
 SharedDecoderManager::DisableHardwareAcceleration()
 {
   MOZ_ASSERT(mPDM);
   mPDM->DisableHardwareAcceleration();
 }
 
 bool
-SharedDecoderManager::Recreate(const mp4_demuxer::VideoDecoderConfig& aConfig)
+SharedDecoderManager::Recreate(const VideoInfo& aConfig)
 {
   mDecoder->Flush();
   mDecoder->Shutdown();
   mDecoder = mPDM->CreateDecoder(aConfig,
                                  mTaskQueue,
                                  mCallback,
                                  mLayersBackend,
                                  mImageContainer);
--- a/dom/media/fmp4/SharedDecoderManager.h
+++ b/dom/media/fmp4/SharedDecoderManager.h
@@ -21,33 +21,33 @@ class SharedDecoderManager
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SharedDecoderManager)
 
   SharedDecoderManager();
 
   already_AddRefed<MediaDataDecoder> CreateVideoDecoder(
     PlatformDecoderModule* aPDM,
-    const mp4_demuxer::VideoDecoderConfig& aConfig,
+    const VideoInfo& aConfig,
     layers::LayersBackend aLayersBackend,
     layers::ImageContainer* aImageContainer,
     FlushableMediaTaskQueue* aVideoTaskQueue,
     MediaDataDecoderCallback* aCallback);
 
   void SetReader(MediaDecoderReader* aReader);
   void Select(SharedDecoderProxy* aProxy);
   void SetIdle(MediaDataDecoder* aProxy);
   void ReleaseMediaResources();
   void Shutdown();
 
   friend class SharedDecoderProxy;
   friend class SharedDecoderCallback;
 
   void DisableHardwareAcceleration();
-  bool Recreate(const mp4_demuxer::VideoDecoderConfig& aConfig);
+  bool Recreate(const VideoInfo& aConfig);
 
 private:
   virtual ~SharedDecoderManager();
   void DrainComplete();
 
   nsRefPtr<PlatformDecoderModule> mPDM;
   nsRefPtr<MediaDataDecoder> mDecoder;
   layers::LayersBackend mLayersBackend;
--- a/dom/media/fmp4/android/AndroidDecoderModule.cpp
+++ b/dom/media/fmp4/android/AndroidDecoderModule.cpp
@@ -7,19 +7,17 @@
 #include "GLBlitHelper.h"
 #include "GLContext.h"
 #include "GLContextEGL.h"
 #include "GLContextProvider.h"
 #include "GLImages.h"
 #include "GLLibraryEGL.h"
 
 #include "MediaData.h"
-
-#include "mp4_demuxer/AnnexB.h"
-#include "mp4_demuxer/DecoderData.h"
+#include "MediaInfo.h"
 
 #include "nsThreadUtils.h"
 #include "nsAutoPtr.h"
 #include "nsPromiseFlatString.h"
 
 #include <jni.h>
 #include <string.h>
 
@@ -33,20 +31,20 @@ static MediaCodec::LocalRef CreateDecode
 {
   MediaCodec::LocalRef codec;
   NS_ENSURE_SUCCESS(MediaCodec::CreateDecoderByType(PromiseFlatCString(aMimeType).get(), &codec), nullptr);
   return codec;
 }
 
 class VideoDataDecoder : public MediaCodecDataDecoder {
 public:
-  VideoDataDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  VideoDataDecoder(const VideoInfo& aConfig,
                    MediaFormat::Param aFormat, MediaDataDecoderCallback* aCallback,
                    layers::ImageContainer* aImageContainer)
-    : MediaCodecDataDecoder(MediaData::Type::VIDEO_DATA, aConfig.mime_type, aFormat, aCallback)
+    : MediaCodecDataDecoder(MediaData::Type::VIDEO_DATA, aConfig.mMimeType, aFormat, aCallback)
     , mImageContainer(aImageContainer)
     , mConfig(aConfig)
   {
 
   }
 
   nsresult Init() override {
     mSurfaceTexture = AndroidSurfaceTexture::Create();
@@ -96,23 +94,20 @@ public:
     return eglImage;
   }
 
   virtual nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat, Microseconds aDuration) override {
     if (!EnsureGLContext()) {
       return NS_ERROR_FAILURE;
     }
 
-    VideoInfo videoInfo;
-    videoInfo.mDisplay = nsIntSize(mConfig.display_width, mConfig.display_height);
-
     nsRefPtr<layers::Image> img = mImageContainer->CreateImage(ImageFormat::SURFACE_TEXTURE);
     layers::SurfaceTextureImage::Data data;
     data.mSurfTex = mSurfaceTexture.get();
-    data.mSize = gfx::IntSize(mConfig.display_width, mConfig.display_height);
+    data.mSize = mConfig.mDisplay;
     data.mOriginPos = gl::OriginPos::BottomLeft;
 
     layers::SurfaceTextureImage* stImg = static_cast<layers::SurfaceTextureImage*>(img.get());
     stImg->SetData(data);
 
     if (WantCopy()) {
       EGLImage eglImage = CopySurface(img);
       if (!eglImage) {
@@ -133,17 +128,17 @@ public:
         NS_WARNING("No EGL fence support detected, rendering artifacts may occur!");
       }
 
       img = mImageContainer->CreateImage(ImageFormat::EGLIMAGE);
       layers::EGLImageImage::Data data;
       data.mImage = eglImage;
       data.mSync = eglSync;
       data.mOwns = true;
-      data.mSize = gfx::IntSize(mConfig.display_width, mConfig.display_height);
+      data.mSize = mConfig.mDisplay;
       data.mOriginPos = gl::OriginPos::BottomLeft;
 
       layers::EGLImageImage* typedImg = static_cast<layers::EGLImageImage*>(img.get());
       typedImg->SetData(data);
     }
 
     nsresult rv;
     int32_t flags;
@@ -152,60 +147,64 @@ public:
     bool isSync = !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME);
 
     int32_t offset;
     NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);
 
     int64_t presentationTimeUs;
     NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
 
-    nsRefPtr<VideoData> v = VideoData::CreateFromImage(videoInfo, mImageContainer, offset,
-                                                       presentationTimeUs,
-                                                       aDuration,
-                                                       img, isSync,
-                                                       presentationTimeUs,
-                                                       gfx::IntRect(0, 0,
-                                                         mConfig.display_width,
-                                                         mConfig.display_height));
+    nsRefPtr<VideoData> v =
+      VideoData::CreateFromImage(mConfig,
+                                 mImageContainer,
+                                 offset,
+                                 presentationTimeUs,
+                                 aDuration,
+                                 img,
+                                 isSync,
+                                 presentationTimeUs,
+                                 gfx::IntRect(0, 0,
+                                              mConfig.mDisplay.width,
+                                              mConfig.mDisplay.height));
     mCallback->Output(v);
     return NS_OK;
   }
 
 protected:
   bool EnsureGLContext() {
     if (mGLContext) {
       return true;
     }
 
     mGLContext = GLContextProvider::CreateHeadless(false);
     return mGLContext;
   }
 
   layers::ImageContainer* mImageContainer;
-  const mp4_demuxer::VideoDecoderConfig& mConfig;
+  const VideoInfo& mConfig;
   RefPtr<AndroidSurfaceTexture> mSurfaceTexture;
   nsRefPtr<GLContext> mGLContext;
 };
 
 class AudioDataDecoder : public MediaCodecDataDecoder {
 private:
   uint8_t csd0[2];
 
 public:
-  AudioDataDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig, MediaFormat::Param aFormat, MediaDataDecoderCallback* aCallback)
-    : MediaCodecDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mime_type, aFormat, aCallback)
+  AudioDataDecoder(const AudioInfo& aConfig, MediaFormat::Param aFormat, MediaDataDecoderCallback* aCallback)
+    : MediaCodecDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mMimeType, aFormat, aCallback)
   {
     JNIEnv* env = GetJNIForThread();
 
     jni::Object::LocalRef buffer(env);
     NS_ENSURE_SUCCESS_VOID(aFormat->GetByteBuffer(NS_LITERAL_STRING("csd-0"), &buffer));
 
-    if (!buffer && aConfig.audio_specific_config->Length() >= 2) {
-      csd0[0] = (*aConfig.audio_specific_config)[0];
-      csd0[1] = (*aConfig.audio_specific_config)[1];
+    if (!buffer && aConfig.mCodecSpecificConfig->Length() >= 2) {
+      csd0[0] = (*aConfig.mCodecSpecificConfig)[0];
+      csd0[1] = (*aConfig.mCodecSpecificConfig)[1];
 
       buffer = jni::Object::LocalRef::Adopt(env, env->NewDirectByteBuffer(csd0, 2));
       NS_ENSURE_SUCCESS_VOID(aFormat->SetByteBuffer(NS_LITERAL_STRING("csd-0"), buffer));
     }
   }
 
   nsresult Output(BufferInfo::Param aInfo, void* aBuffer, MediaFormat::Param aFormat, Microseconds aDuration) {
     // The output on Android is always 16-bit signed
@@ -250,62 +249,62 @@ bool AndroidDecoderModule::SupportsMimeT
       aMimeType.EqualsLiteral("video/avc")) {
     return true;
   }
   return static_cast<bool>(mozilla::CreateDecoder(aMimeType));
 }
 
 already_AddRefed<MediaDataDecoder>
 AndroidDecoderModule::CreateVideoDecoder(
-                                const mp4_demuxer::VideoDecoderConfig& aConfig,
+                                const VideoInfo& aConfig,
                                 layers::LayersBackend aLayersBackend,
                                 layers::ImageContainer* aImageContainer,
                                 FlushableMediaTaskQueue* aVideoTaskQueue,
                                 MediaDataDecoderCallback* aCallback)
 {
   MediaFormat::LocalRef format;
 
   NS_ENSURE_SUCCESS(MediaFormat::CreateVideoFormat(
-      aConfig.mime_type,
-      aConfig.display_width,
-      aConfig.display_height,
+      aConfig.mMimeType,
+      aConfig.mDisplay.width,
+      aConfig.mDisplay.height,
       &format), nullptr);
 
   nsRefPtr<MediaDataDecoder> decoder =
     new VideoDataDecoder(aConfig, format, aCallback, aImageContainer);
 
   return decoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
-AndroidDecoderModule::CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+AndroidDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
                                          FlushableMediaTaskQueue* aAudioTaskQueue,
                                          MediaDataDecoderCallback* aCallback)
 {
-  MOZ_ASSERT(aConfig.bits_per_sample == 16, "We only handle 16-bit audio!");
+  MOZ_ASSERT(aConfig.mBitDepth == 16, "We only handle 16-bit audio!");
 
   MediaFormat::LocalRef format;
 
   NS_ENSURE_SUCCESS(MediaFormat::CreateAudioFormat(
-      aConfig.mime_type,
-      aConfig.samples_per_second,
-      aConfig.channel_count,
+      aConfig.mMimeType,
+      aConfig.mBitDepth,
+      aConfig.mChannels,
       &format), nullptr);
 
   nsRefPtr<MediaDataDecoder> decoder =
     new AudioDataDecoder(aConfig, format, aCallback);
 
   return decoder.forget();
 
 }
 
 PlatformDecoderModule::ConversionRequired
-AndroidDecoderModule::DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const
+AndroidDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
 {
-  if (aConfig.IsVideoConfig()) {
+  if (aConfig.IsVideo()) {
     return kNeedAnnexB;
   } else {
     return kNeedNone;
   }
 }
 
 MediaCodecDataDecoder::MediaCodecDataDecoder(MediaData::Type aType,
                                              const nsACString& aMimeType,
--- a/dom/media/fmp4/android/AndroidDecoderModule.h
+++ b/dom/media/fmp4/android/AndroidDecoderModule.h
@@ -15,35 +15,35 @@
 
 namespace mozilla {
 
 typedef std::queue<nsRefPtr<MediaRawData>> SampleQueue;
 
 class AndroidDecoderModule : public PlatformDecoderModule {
 public:
   virtual already_AddRefed<MediaDataDecoder>
-  CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  CreateVideoDecoder(const VideoInfo& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
                      FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) override;
 
   virtual already_AddRefed<MediaDataDecoder>
-  CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+  CreateAudioDecoder(const AudioInfo& aConfig,
                      FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) override;
 
 
   AndroidDecoderModule() {}
   virtual ~AndroidDecoderModule() {}
 
   virtual bool SupportsMimeType(const nsACString& aMimeType) override;
 
   virtual ConversionRequired
-  DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const override;
+  DecoderNeedsConversion(const TrackInfo& aConfig) const override;
 };
 
 class MediaCodecDataDecoder : public MediaDataDecoder {
 public:
 
   MediaCodecDataDecoder(MediaData::Type aType,
                         const nsACString& aMimeType,
                         widget::sdk::MediaFormat::Param aFormat,
--- a/dom/media/fmp4/apple/AppleATDecoder.cpp
+++ b/dom/media/fmp4/apple/AppleATDecoder.cpp
@@ -3,51 +3,51 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AppleUtils.h"
 #include "MP4Reader.h"
 #include "MP4Decoder.h"
 #include "mp4_demuxer/Adts.h"
-#include "mp4_demuxer/DecoderData.h"
+#include "MediaInfo.h"
 #include "AppleATDecoder.h"
 #include "prlog.h"
 
 #ifdef PR_LOGGING
 PRLogModuleInfo* GetAppleMediaLog();
 #define LOG(...) PR_LOG(GetAppleMediaLog(), PR_LOG_DEBUG, (__VA_ARGS__))
 #else
 #define LOG(...)
 #endif
 #define FourCC2Str(n) ((char[5]){(char)(n >> 24), (char)(n >> 16), (char)(n >> 8), (char)(n), 0})
 
 namespace mozilla {
 
-AppleATDecoder::AppleATDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+AppleATDecoder::AppleATDecoder(const AudioInfo& aConfig,
                                FlushableMediaTaskQueue* aAudioTaskQueue,
                                MediaDataDecoderCallback* aCallback)
   : mConfig(aConfig)
   , mFileStreamError(false)
   , mTaskQueue(aAudioTaskQueue)
   , mCallback(aCallback)
   , mConverter(nullptr)
   , mStream(nullptr)
 {
   MOZ_COUNT_CTOR(AppleATDecoder);
   LOG("Creating Apple AudioToolbox decoder");
   LOG("Audio Decoder configuration: %s %d Hz %d channels %d bits per channel",
-      mConfig.mime_type.get(),
-      mConfig.samples_per_second,
-      mConfig.channel_count,
-      mConfig.bits_per_sample);
+      mConfig.mMimeType.get(),
+      mConfig.mRate,
+      mConfig.mChannels,
+      mConfig.mBitDepth);
 
-  if (mConfig.mime_type.EqualsLiteral("audio/mpeg")) {
+  if (mConfig.mMimeType.EqualsLiteral("audio/mpeg")) {
     mFormatID = kAudioFormatMPEGLayer3;
-  } else if (mConfig.mime_type.EqualsLiteral("audio/mp4a-latm")) {
+  } else if (mConfig.mMimeType.EqualsLiteral("audio/mp4a-latm")) {
     mFormatID = kAudioFormatMPEG4AAC;
   } else {
     mFormatID = 0;
   }
 }
 
 AppleATDecoder::~AppleATDecoder()
 {
@@ -293,29 +293,29 @@ nsresult
 AppleATDecoder::GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
                                          const nsTArray<uint8_t>& aExtraData)
 {
   // Request the properties from CoreAudio using the codec magic cookie
   AudioFormatInfo formatInfo;
   PodZero(&formatInfo.mASBD);
   formatInfo.mASBD.mFormatID = mFormatID;
   if (mFormatID == kAudioFormatMPEG4AAC) {
-    formatInfo.mASBD.mFormatFlags = mConfig.extended_profile;
+    formatInfo.mASBD.mFormatFlags = mConfig.mExtendedProfile;
   }
   formatInfo.mMagicCookieSize = aExtraData.Length();
   formatInfo.mMagicCookie = aExtraData.Elements();
 
   UInt32 formatListSize;
   // Attempt to retrieve the default format using
   // kAudioFormatProperty_FormatInfo method.
   // This method only retrieves the FramesPerPacket information required
   // by the decoder, which depends on the codec type and profile.
   aDesc.mFormatID = mFormatID;
-  aDesc.mChannelsPerFrame = mConfig.channel_count;
-  aDesc.mSampleRate = mConfig.samples_per_second;
+  aDesc.mChannelsPerFrame = mConfig.mChannels;
+  aDesc.mSampleRate = mConfig.mRate;
   UInt32 inputFormatSize = sizeof(aDesc);
   OSStatus rv = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
                                        0,
                                        NULL,
                                        &inputFormatSize,
                                        &aDesc);
   if (NS_WARN_IF(rv)) {
     return NS_ERROR_FAILURE;
@@ -362,17 +362,17 @@ AppleATDecoder::GetInputAudioDescription
 
   return NS_OK;
 }
 
 nsresult
 AppleATDecoder::SetupDecoder(MediaRawData* aSample)
 {
   if (mFormatID == kAudioFormatMPEG4AAC &&
-      mConfig.extended_profile == 2) {
+      mConfig.mExtendedProfile == 2) {
     // Check for implicit SBR signalling if stream is AAC-LC
     // This will provide us with an updated magic cookie for use with
     // GetInputAudioDescription.
     if (NS_SUCCEEDED(GetImplicitAACMagicCookie(aSample)) &&
         !mMagicCookie.Length()) {
       // nothing found yet, will try again later
       return NS_ERROR_NOT_INITIALIZED;
     }
@@ -381,17 +381,17 @@ AppleATDecoder::SetupDecoder(MediaRawDat
 
   LOG("Initializing Apple AudioToolbox decoder");
 
   AudioStreamBasicDescription inputFormat;
   PodZero(&inputFormat);
   nsresult rv =
     GetInputAudioDescription(inputFormat,
                              mMagicCookie.Length() ?
-                                 mMagicCookie : *mConfig.extra_data);
+                                 mMagicCookie : *mConfig.mExtraData);
   if (NS_FAILED(rv)) {
     return rv;
   }
   // Fill in the output format manually.
   PodZero(&mOutputFormat);
   mOutputFormat.mFormatID = kAudioFormatLinearPCM;
   mOutputFormat.mSampleRate = inputFormat.mSampleRate;
   mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
@@ -463,20 +463,22 @@ static void
 nsresult
 AppleATDecoder::GetImplicitAACMagicCookie(const MediaRawData* aSample)
 {
   // Prepend ADTS header to AAC audio.
   nsRefPtr<MediaRawData> adtssample(aSample->Clone());
   if (!adtssample) {
     return NS_ERROR_OUT_OF_MEMORY;
   }
+  int8_t frequency_index =
+    mp4_demuxer::Adts::GetFrequencyIndex(mConfig.mRate);
 
-  bool rv = mp4_demuxer::Adts::ConvertSample(mConfig.channel_count,
-                                             mConfig.frequency_index,
-                                             mConfig.aac_profile,
+  bool rv = mp4_demuxer::Adts::ConvertSample(mConfig.mChannels,
+                                             frequency_index,
+                                             mConfig.mProfile,
                                              adtssample);
   if (!rv) {
     NS_WARNING("Failed to apply ADTS header");
     return NS_ERROR_FAILURE;
   }
   if (!mStream) {
     OSStatus rv = AudioFileStreamOpen(this,
                                       _MetadataCallback,
--- a/dom/media/fmp4/apple/AppleATDecoder.h
+++ b/dom/media/fmp4/apple/AppleATDecoder.h
@@ -15,29 +15,29 @@
 
 namespace mozilla {
 
 class FlushableMediaTaskQueue;
 class MediaDataDecoderCallback;
 
 class AppleATDecoder : public MediaDataDecoder {
 public:
-  AppleATDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+  AppleATDecoder(const AudioInfo& aConfig,
                  FlushableMediaTaskQueue* aVideoTaskQueue,
                  MediaDataDecoderCallback* aCallback);
   virtual ~AppleATDecoder();
 
   virtual nsresult Init() override;
   virtual nsresult Input(MediaRawData* aSample) override;
   virtual nsresult Flush() override;
   virtual nsresult Drain() override;
   virtual nsresult Shutdown() override;
 
   // Callbacks also need access to the config.
-  const mp4_demuxer::AudioDecoderConfig& mConfig;
+  const AudioInfo& mConfig;
 
   // Use to extract magic cookie for HE-AAC detection.
   nsTArray<uint8_t> mMagicCookie;
   // Will be set to true should an error occurred while attempting to retrieve
   // the magic cookie property.
   bool mFileStreamError;
 
 private:
--- a/dom/media/fmp4/apple/AppleDecoderModule.cpp
+++ b/dom/media/fmp4/apple/AppleDecoderModule.cpp
@@ -144,17 +144,17 @@ AppleDecoderModule::Startup()
 
   nsCOMPtr<nsIRunnable> task(new LinkTask());
   NS_DispatchToMainThread(task, NS_DISPATCH_SYNC);
 
   return NS_OK;
 }
 
 already_AddRefed<MediaDataDecoder>
-AppleDecoderModule::CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+AppleDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
                                        layers::LayersBackend aLayersBackend,
                                        layers::ImageContainer* aImageContainer,
                                        FlushableMediaTaskQueue* aVideoTaskQueue,
                                        MediaDataDecoderCallback* aCallback)
 {
   nsRefPtr<MediaDataDecoder> decoder;
 
   if (sIsVDAAvailable && (!sIsVTHWAvailable || sForceVDA)) {
@@ -172,35 +172,35 @@ AppleDecoderModule::CreateVideoDecoder(c
   if (sIsVTAvailable) {
     decoder =
       new AppleVTDecoder(aConfig, aVideoTaskQueue, aCallback, aImageContainer);
   }
   return decoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
-AppleDecoderModule::CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+AppleDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
                                        FlushableMediaTaskQueue* aAudioTaskQueue,
                                        MediaDataDecoderCallback* aCallback)
 {
   nsRefPtr<MediaDataDecoder> decoder =
     new AppleATDecoder(aConfig, aAudioTaskQueue, aCallback);
   return decoder.forget();
 }
 
 bool
 AppleDecoderModule::SupportsMimeType(const nsACString& aMimeType)
 {
   return aMimeType.EqualsLiteral("audio/mpeg") ||
     PlatformDecoderModule::SupportsMimeType(aMimeType);
 }
 
 PlatformDecoderModule::ConversionRequired
-AppleDecoderModule::DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const
+AppleDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
 {
-  if (aConfig.IsVideoConfig()) {
+  if (aConfig.IsVideo()) {
     return kNeedAVCC;
   } else {
     return kNeedNone;
   }
 }
 
 } // namespace mozilla
--- a/dom/media/fmp4/apple/AppleDecoderModule.h
+++ b/dom/media/fmp4/apple/AppleDecoderModule.h
@@ -15,32 +15,32 @@ class AppleDecoderModule : public Platfo
 public:
   AppleDecoderModule();
   virtual ~AppleDecoderModule();
 
   virtual nsresult Startup() override;
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
-  CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  CreateVideoDecoder(const VideoInfo& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
                      FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) override;
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
-  CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+  CreateAudioDecoder(const AudioInfo& aConfig,
                      FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) override;
 
   virtual bool SupportsMimeType(const nsACString& aMimeType) override;
 
   virtual ConversionRequired
-  DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const override;
+  DecoderNeedsConversion(const TrackInfo& aConfig) const override;
 
   static void Init();
   static nsresult CanDecode();
 
 private:
   friend class InitTask;
   friend class LinkTask;
   friend class UnlinkTask;
--- a/dom/media/fmp4/apple/AppleVDADecoder.cpp
+++ b/dom/media/fmp4/apple/AppleVDADecoder.cpp
@@ -4,17 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include <CoreFoundation/CFString.h>
 
 #include "AppleUtils.h"
 #include "AppleVDADecoder.h"
 #include "AppleVDALinker.h"
-#include "mp4_demuxer/DecoderData.h"
+#include "MediaInfo.h"
 #include "mp4_demuxer/H264.h"
 #include "MP4Decoder.h"
 #include "MediaData.h"
 #include "MacIOSurfaceImage.h"
 #include "mozilla/ArrayUtils.h"
 #include "nsAutoPtr.h"
 #include "nsCocoaFeatures.h"
 #include "nsThreadUtils.h"
@@ -28,37 +28,36 @@ PRLogModuleInfo* GetAppleMediaLog();
 #define LOG(...) PR_LOG(GetAppleMediaLog(), PR_LOG_DEBUG, (__VA_ARGS__))
 //#define LOG_MEDIA_SHA1
 #else
 #define LOG(...)
 #endif
 
 namespace mozilla {
 
-AppleVDADecoder::AppleVDADecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+AppleVDADecoder::AppleVDADecoder(const VideoInfo& aConfig,
                                FlushableMediaTaskQueue* aVideoTaskQueue,
                                MediaDataDecoderCallback* aCallback,
                                layers::ImageContainer* aImageContainer)
   : mTaskQueue(aVideoTaskQueue)
   , mCallback(aCallback)
   , mImageContainer(aImageContainer)
-  , mPictureWidth(aConfig.image_width)
-  , mPictureHeight(aConfig.image_height)
-  , mDisplayWidth(aConfig.display_width)
-  , mDisplayHeight(aConfig.display_height)
+  , mPictureWidth(aConfig.mImage.width)
+  , mPictureHeight(aConfig.mImage.height)
+  , mDisplayWidth(aConfig.mDisplay.width)
+  , mDisplayHeight(aConfig.mDisplay.height)
   , mDecoder(nullptr)
   , mIs106(!nsCocoaFeatures::OnLionOrLater())
 {
   MOZ_COUNT_CTOR(AppleVDADecoder);
   // TODO: Verify aConfig.mime_type.
 
+  mExtraData = aConfig.mExtraData;
+  mMaxRefFrames = 4;
   // Retrieve video dimensions from H264 SPS NAL.
-  mPictureWidth = aConfig.image_width;
-  mExtraData = aConfig.extra_data;
-  mMaxRefFrames = 4;
   mp4_demuxer::SPSData spsdata;
   if (mp4_demuxer::H264::DecodeSPSFromExtraData(mExtraData, spsdata)) {
     // max_num_ref_frames determines the size of the sliding window
     // we need to queue that many frames in order to guarantee proper
     // pts frames ordering. Use a minimum of 4 to ensure proper playback of
     // non compliant videos.
     mMaxRefFrames =
       std::min(std::max(mMaxRefFrames, spsdata.max_num_ref_frames + 1), 16u);
@@ -261,17 +260,16 @@ AppleVDADecoder::OutputFrame(CVPixelBuff
     aFrameRef->duration,
     aFrameRef->is_sync_point ? " keyframe" : ""
   );
 
   nsRefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
   // Bounds.
   VideoInfo info;
   info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
-  info.mHasVideo = true;
   gfx::IntRect visible = gfx::IntRect(0,
                                       0,
                                       mPictureWidth,
                                       mPictureHeight);
 
   nsRefPtr<layers::Image> image =
     mImageContainer->CreateImage(ImageFormat::MAC_IOSURFACE);
   layers::MacIOSurfaceImage* videoImage =
@@ -497,17 +495,17 @@ AppleVDADecoder::CreateOutputConfigurati
                             ArrayLength(outputKeys),
                             &kCFTypeDictionaryKeyCallBacks,
                             &kCFTypeDictionaryValueCallBacks);
 }
 
 /* static */
 already_AddRefed<AppleVDADecoder>
 AppleVDADecoder::CreateVDADecoder(
-  const mp4_demuxer::VideoDecoderConfig& aConfig,
+  const VideoInfo& aConfig,
   FlushableMediaTaskQueue* aVideoTaskQueue,
   MediaDataDecoderCallback* aCallback,
   layers::ImageContainer* aImageContainer)
 {
   nsRefPtr<AppleVDADecoder> decoder =
     new AppleVDADecoder(aConfig, aVideoTaskQueue, aCallback, aImageContainer);
   if (NS_FAILED(decoder->Init())) {
     return nullptr;
--- a/dom/media/fmp4/apple/AppleVDADecoder.h
+++ b/dom/media/fmp4/apple/AppleVDADecoder.h
@@ -55,22 +55,22 @@ public:
       , is_sync_point(aIs_sync_point)
     {
     }
   };
 
   // Return a new created AppleVDADecoder or nullptr if media or hardware is
   // not supported by current configuration.
   static already_AddRefed<AppleVDADecoder> CreateVDADecoder(
-    const mp4_demuxer::VideoDecoderConfig& aConfig,
+    const VideoInfo& aConfig,
     FlushableMediaTaskQueue* aVideoTaskQueue,
     MediaDataDecoderCallback* aCallback,
     layers::ImageContainer* aImageContainer);
 
-  AppleVDADecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  AppleVDADecoder(const VideoInfo& aConfig,
                   FlushableMediaTaskQueue* aVideoTaskQueue,
                   MediaDataDecoderCallback* aCallback,
                   layers::ImageContainer* aImageContainer);
   virtual ~AppleVDADecoder();
   virtual nsresult Init() override;
   virtual nsresult Input(MediaRawData* aSample) override;
   virtual nsresult Flush() override;
   virtual nsresult Drain() override;
--- a/dom/media/fmp4/apple/AppleVTDecoder.cpp
+++ b/dom/media/fmp4/apple/AppleVTDecoder.cpp
@@ -29,17 +29,17 @@ PRLogModuleInfo* GetAppleMediaLog();
 #endif
 
 #ifdef LOG_MEDIA_SHA1
 #include "mozilla/SHA1.h"
 #endif
 
 namespace mozilla {
 
-AppleVTDecoder::AppleVTDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig,
                                FlushableMediaTaskQueue* aVideoTaskQueue,
                                MediaDataDecoderCallback* aCallback,
                                layers::ImageContainer* aImageContainer)
   : AppleVDADecoder(aConfig, aVideoTaskQueue, aCallback, aImageContainer)
   , mFormat(nullptr)
   , mSession(nullptr)
   , mIsHardwareAccelerated(false)
 {
--- a/dom/media/fmp4/apple/AppleVTDecoder.h
+++ b/dom/media/fmp4/apple/AppleVTDecoder.h
@@ -10,17 +10,17 @@
 #include "AppleVDADecoder.h"
 
 #include "VideoToolbox/VideoToolbox.h"
 
 namespace mozilla {
 
 class AppleVTDecoder : public AppleVDADecoder {
 public:
-  AppleVTDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  AppleVTDecoder(const VideoInfo& aConfig,
                  FlushableMediaTaskQueue* aVideoTaskQueue,
                  MediaDataDecoderCallback* aCallback,
                  layers::ImageContainer* aImageContainer);
   virtual ~AppleVTDecoder();
   virtual nsresult Init() override;
   virtual nsresult Input(MediaRawData* aSample) override;
   virtual nsresult Flush() override;
   virtual nsresult Drain() override;
--- a/dom/media/fmp4/eme/EMEAudioDecoder.h
+++ b/dom/media/fmp4/eme/EMEAudioDecoder.h
@@ -19,17 +19,17 @@ public:
   {}
 
   virtual void Error(GMPErr aErr) override;
 };
 
 class EMEAudioDecoder : public GMPAudioDecoder {
 public:
   EMEAudioDecoder(CDMProxy* aProxy,
-                  const mp4_demuxer::AudioDecoderConfig& aConfig,
+                  const AudioInfo& aConfig,
                   MediaTaskQueue* aTaskQueue,
                   MediaDataDecoderCallbackProxy* aCallback)
    : GMPAudioDecoder(aConfig, aTaskQueue, aCallback, new EMEAudioCallbackAdapter(aCallback))
    , mProxy(aProxy)
   {
   }
 
 private:
--- a/dom/media/fmp4/eme/EMEDecoderModule.cpp
+++ b/dom/media/fmp4/eme/EMEDecoderModule.cpp
@@ -7,17 +7,17 @@
 #include "EMEDecoderModule.h"
 #include "EMEAudioDecoder.h"
 #include "EMEVideoDecoder.h"
 #include "MediaDataDecoderProxy.h"
 #include "mozIGeckoMediaPluginService.h"
 #include "mozilla/CDMProxy.h"
 #include "mozilla/unused.h"
 #include "nsServiceManagerUtils.h"
-#include "mp4_demuxer/DecoderData.h"
+#include "MediaInfo.h"
 
 namespace mozilla {
 
 class EMEDecryptor : public MediaDataDecoder {
 
 public:
 
   EMEDecryptor(MediaDataDecoder* aDecoder,
@@ -86,18 +86,19 @@ public:
     // for keys to become usable, and once they do we need to dispatch an event
     // to run the PDM on the same task queue, but since the decode task queue
     // is waiting in MP4Reader::Decode() for output our task would never run.
     // So we dispatch tasks to make all calls into the wrapped decoder.
     if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
       return NS_OK;
     }
 
+    nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
     mProxy->GetSessionIdsForKeyId(aSample->mCrypto.mKeyId,
-                                  aSample->mCrypto.mSessionIds);
+                                  writer->mCrypto.mSessionIds);
 
     mProxy->Decrypt(aSample, new DeliverDecrypted(this, mTaskQueue));
     return NS_OK;
   }
 
   void Decrypted(MediaRawData* aSample) {
     MOZ_ASSERT(!mIsShutdown);
     nsresult rv = mTaskQueue->Dispatch(
@@ -181,18 +182,19 @@ private:
 
 nsresult
 EMEMediaDataDecoderProxy::Input(MediaRawData* aSample)
 {
   if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
     return NS_OK;
   }
 
+  nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
   mProxy->GetSessionIdsForKeyId(aSample->mCrypto.mKeyId,
-                                aSample->mCrypto.mSessionIds);
+                                writer->mCrypto.mSessionIds);
 
   return MediaDataDecoderProxy::Input(aSample);
 }
 
 nsresult
 EMEMediaDataDecoderProxy::Shutdown()
 {
   nsresult rv = MediaDataDecoderProxy::Shutdown();
@@ -233,23 +235,23 @@ CreateDecoderWrapper(MediaDataDecoderCal
     return nullptr;
   }
 
   nsRefPtr<MediaDataDecoderProxy> decoder(new EMEMediaDataDecoderProxy(thread, aCallback, aProxy, aTaskQueue));
   return decoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
-EMEDecoderModule::CreateVideoDecoder(const VideoDecoderConfig& aConfig,
+EMEDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
                                      layers::LayersBackend aLayersBackend,
                                      layers::ImageContainer* aImageContainer,
                                      FlushableMediaTaskQueue* aVideoTaskQueue,
                                      MediaDataDecoderCallback* aCallback)
 {
-  if (mCDMDecodesVideo && aConfig.crypto.mValid) {
+  if (mCDMDecodesVideo && aConfig.mCrypto.mValid) {
     nsRefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aCallback, mProxy, aVideoTaskQueue);
     wrapper->SetProxyTarget(new EMEVideoDecoder(mProxy,
                                                 aConfig,
                                                 aLayersBackend,
                                                 aImageContainer,
                                                 aVideoTaskQueue,
                                                 wrapper->Callback()));
     return wrapper.forget();
@@ -260,59 +262,59 @@ EMEDecoderModule::CreateVideoDecoder(con
                         aVideoTaskQueue,
                         aCallback,
                         aLayersBackend,
                         aImageContainer));
   if (!decoder) {
     return nullptr;
   }
 
-  if (!aConfig.crypto.mValid) {
+  if (!aConfig.mCrypto.mValid) {
     return decoder.forget();
   }
 
   nsRefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(decoder,
                                                          aCallback,
                                                          mProxy));
   return emeDecoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
-EMEDecoderModule::CreateAudioDecoder(const AudioDecoderConfig& aConfig,
+EMEDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
                                      FlushableMediaTaskQueue* aAudioTaskQueue,
                                      MediaDataDecoderCallback* aCallback)
 {
-  if (mCDMDecodesAudio && aConfig.crypto.mValid) {
+  if (mCDMDecodesAudio && aConfig.mCrypto.mValid) {
     nsRefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aCallback, mProxy, aAudioTaskQueue);
     wrapper->SetProxyTarget(new EMEAudioDecoder(mProxy,
                                                 aConfig,
                                                 aAudioTaskQueue,
                                                 wrapper->Callback()));
     return wrapper.forget();
   }
 
   nsRefPtr<MediaDataDecoder> decoder(
     mPDM->CreateDecoder(aConfig, aAudioTaskQueue, aCallback));
   if (!decoder) {
     return nullptr;
   }
 
-  if (!aConfig.crypto.mValid) {
+  if (!aConfig.mCrypto.mValid) {
     return decoder.forget();
   }
 
   nsRefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(decoder,
                                                          aCallback,
                                                          mProxy));
   return emeDecoder.forget();
 }
 
 PlatformDecoderModule::ConversionRequired
-EMEDecoderModule::DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const
+EMEDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
 {
-  if (aConfig.IsVideoConfig()) {
+  if (aConfig.IsVideo()) {
     return kNeedAVCC;
   } else {
     return kNeedNone;
   }
 }
 
 } // namespace mozilla
--- a/dom/media/fmp4/eme/EMEDecoderModule.h
+++ b/dom/media/fmp4/eme/EMEDecoderModule.h
@@ -12,43 +12,41 @@
 
 namespace mozilla {
 
 class CDMProxy;
 class FlushableMediaTaskQueue;
 
 class EMEDecoderModule : public PlatformDecoderModule {
 private:
-  typedef mp4_demuxer::AudioDecoderConfig AudioDecoderConfig;
-  typedef mp4_demuxer::VideoDecoderConfig VideoDecoderConfig;
 
 public:
   EMEDecoderModule(CDMProxy* aProxy,
                    PlatformDecoderModule* aPDM,
                    bool aCDMDecodesAudio,
                    bool aCDMDecodesVideo);
 
   virtual ~EMEDecoderModule();
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
-  CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  CreateVideoDecoder(const VideoInfo& aConfig,
                     layers::LayersBackend aLayersBackend,
                     layers::ImageContainer* aImageContainer,
                     FlushableMediaTaskQueue* aVideoTaskQueue,
                     MediaDataDecoderCallback* aCallback) override;
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
-  CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+  CreateAudioDecoder(const AudioInfo& aConfig,
                      FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) override;
 
   virtual ConversionRequired
-  DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const override;
+  DecoderNeedsConversion(const TrackInfo& aConfig) const override;
 
 private:
   nsRefPtr<CDMProxy> mProxy;
   // Will be null if CDM has decoding capability.
   nsRefPtr<PlatformDecoderModule> mPDM;
   // We run the PDM on its own task queue.
   nsRefPtr<FlushableMediaTaskQueue> mTaskQueue;
   bool mCDMDecodesAudio;
--- a/dom/media/fmp4/eme/EMEVideoDecoder.h
+++ b/dom/media/fmp4/eme/EMEVideoDecoder.h
@@ -24,24 +24,30 @@ public:
   {}
 
   virtual void Error(GMPErr aErr) override;
 };
 
 class EMEVideoDecoder : public GMPVideoDecoder {
 public:
   EMEVideoDecoder(CDMProxy* aProxy,
-                  const mp4_demuxer::VideoDecoderConfig& aConfig,
+                  const VideoInfo& aConfig,
                   layers::LayersBackend aLayersBackend,
                   layers::ImageContainer* aImageContainer,
                   MediaTaskQueue* aTaskQueue,
                   MediaDataDecoderCallbackProxy* aCallback)
-   : GMPVideoDecoder(aConfig, aLayersBackend, aImageContainer, aTaskQueue, aCallback,
-                     new EMEVideoCallbackAdapter(aCallback, VideoInfo(aConfig.display_width,
-                                                                      aConfig.display_height), aImageContainer))
+   : GMPVideoDecoder(aConfig,
+                     aLayersBackend,
+                     aImageContainer,
+                     aTaskQueue,
+                     aCallback,
+                     new EMEVideoCallbackAdapter(aCallback,
+                                                 VideoInfo(aConfig.mDisplay.width,
+                                                           aConfig.mDisplay.height),
+                                                 aImageContainer))
    , mProxy(aProxy)
   {
   }
 
 private:
   virtual void InitTags(nsTArray<nsCString>& aTags) override;
   virtual nsCString GetNodeId() override;
   virtual GMPUnique<GMPVideoEncodedFrame>::Ptr CreateFrame(MediaRawData* aSample) override;
--- a/dom/media/fmp4/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/fmp4/ffmpeg/FFmpegAudioDecoder.cpp
@@ -3,33 +3,32 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaTaskQueue.h"
 #include "FFmpegRuntimeLinker.h"
 
 #include "FFmpegAudioDecoder.h"
-#include "mp4_demuxer/Adts.h"
 
 #define MAX_CHANNELS 16
 
 namespace mozilla
 {
 
 FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(
   FlushableMediaTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
-  const mp4_demuxer::AudioDecoderConfig& aConfig)
-  : FFmpegDataDecoder(aTaskQueue, GetCodecId(aConfig.mime_type))
+  const AudioInfo& aConfig)
+  : FFmpegDataDecoder(aTaskQueue, GetCodecId(aConfig.mMimeType))
   , mCallback(aCallback)
 {
   MOZ_COUNT_CTOR(FFmpegAudioDecoder);
   // Use a new DataBuffer as the object will be modified during initialization.
   mExtraData = new DataBuffer;
-  mExtraData->AppendElements(*aConfig.audio_specific_config);
+  mExtraData->AppendElements(*aConfig.mCodecSpecificConfig);
 }
 
 nsresult
 FFmpegAudioDecoder<LIBAV_VER>::Init()
 {
   nsresult rv = FFmpegDataDecoder::Init();
   NS_ENSURE_SUCCESS(rv, rv);
 
--- a/dom/media/fmp4/ffmpeg/FFmpegAudioDecoder.h
+++ b/dom/media/fmp4/ffmpeg/FFmpegAudioDecoder.h
@@ -17,17 +17,17 @@ template <int V> class FFmpegAudioDecode
 };
 
 template <>
 class FFmpegAudioDecoder<LIBAV_VER> : public FFmpegDataDecoder<LIBAV_VER>
 {
 public:
   FFmpegAudioDecoder(FlushableMediaTaskQueue* aTaskQueue,
                      MediaDataDecoderCallback* aCallback,
-                     const mp4_demuxer::AudioDecoderConfig& aConfig);
+                     const AudioInfo& aConfig);
   virtual ~FFmpegAudioDecoder();
 
   virtual nsresult Init() override;
   virtual nsresult Input(MediaRawData* aSample) override;
   virtual nsresult Drain() override;
   static AVCodecID GetCodecId(const nsACString& aMimeType);
 
 private:
--- a/dom/media/fmp4/ffmpeg/FFmpegDecoderModule.h
+++ b/dom/media/fmp4/ffmpeg/FFmpegDecoderModule.h
@@ -24,50 +24,50 @@ public:
     nsRefPtr<PlatformDecoderModule> pdm = new FFmpegDecoderModule();
     return pdm.forget();
   }
 
   FFmpegDecoderModule() {}
   virtual ~FFmpegDecoderModule() {}
 
   virtual already_AddRefed<MediaDataDecoder>
-  CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  CreateVideoDecoder(const VideoInfo& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
                      FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) override
   {
     nsRefPtr<MediaDataDecoder> decoder =
       new FFmpegH264Decoder<V>(aVideoTaskQueue, aCallback, aConfig,
                                aImageContainer);
     return decoder.forget();
   }
 
   virtual already_AddRefed<MediaDataDecoder>
-  CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+  CreateAudioDecoder(const AudioInfo& aConfig,
                      FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) override
   {
     nsRefPtr<MediaDataDecoder> decoder =
       new FFmpegAudioDecoder<V>(aAudioTaskQueue, aCallback, aConfig);
     return decoder.forget();
   }
 
   virtual bool SupportsMimeType(const nsACString& aMimeType) override
   {
     return FFmpegAudioDecoder<V>::GetCodecId(aMimeType) != AV_CODEC_ID_NONE ||
       FFmpegH264Decoder<V>::GetCodecId(aMimeType) != AV_CODEC_ID_NONE;
   }
 
   virtual ConversionRequired
-  DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const override
+  DecoderNeedsConversion(const TrackInfo& aConfig) const override
   {
-    if (aConfig.IsVideoConfig() &&
-        (aConfig.mime_type.EqualsLiteral("video/avc") ||
-         aConfig.mime_type.EqualsLiteral("video/mp4"))) {
+    if (aConfig.IsVideo() &&
+        (aConfig.mMimeType.EqualsLiteral("video/avc") ||
+         aConfig.mMimeType.EqualsLiteral("video/mp4"))) {
       return PlatformDecoderModule::kNeedAVCC;
     } else {
       return kNeedNone;
     }
   }
 
 };
 
--- a/dom/media/fmp4/ffmpeg/FFmpegH264Decoder.cpp
+++ b/dom/media/fmp4/ffmpeg/FFmpegH264Decoder.cpp
@@ -4,42 +4,42 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaTaskQueue.h"
 #include "nsThreadUtils.h"
 #include "nsAutoPtr.h"
 #include "ImageContainer.h"
 
-#include "mp4_demuxer/mp4_demuxer.h"
+#include "MediaInfo.h"
 
 #include "FFmpegH264Decoder.h"
 
 #define GECKO_FRAME_TYPE 0x00093CC0
 
 typedef mozilla::layers::Image Image;
 typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
 
 namespace mozilla
 {
 
 FFmpegH264Decoder<LIBAV_VER>::FFmpegH264Decoder(
   FlushableMediaTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
-  const mp4_demuxer::VideoDecoderConfig& aConfig,
+  const VideoInfo& aConfig,
   ImageContainer* aImageContainer)
-  : FFmpegDataDecoder(aTaskQueue, GetCodecId(aConfig.mime_type))
+  : FFmpegDataDecoder(aTaskQueue, GetCodecId(aConfig.mMimeType))
   , mCallback(aCallback)
   , mImageContainer(aImageContainer)
-  , mDisplayWidth(aConfig.display_width)
-  , mDisplayHeight(aConfig.display_height)
+  , mDisplayWidth(aConfig.mDisplay.width)
+  , mDisplayHeight(aConfig.mDisplay.height)
 {
   MOZ_COUNT_CTOR(FFmpegH264Decoder);
   // Use a new DataBuffer as the object will be modified during initialization.
   mExtraData = new DataBuffer;
-  mExtraData->AppendElements(*aConfig.extra_data);
+  mExtraData->AppendElements(*aConfig.mExtraData);
 }
 
 nsresult
 FFmpegH264Decoder<LIBAV_VER>::Init()
 {
   nsresult rv = FFmpegDataDecoder::Init();
   NS_ENSURE_SUCCESS(rv, rv);
 
@@ -77,18 +77,16 @@ FFmpegH264Decoder<LIBAV_VER>::DoDecodeFr
     mCallback->Error();
     return DecodeResult::DECODE_ERROR;
   }
 
   // If we've decoded a frame then we need to output it
   if (decoded) {
     VideoInfo info;
     info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
-    info.mStereoMode = StereoMode::MONO;
-    info.mHasVideo = true;
 
     VideoData::YCbCrBuffer b;
     b.mPlanes[0].mData = mFrame->data[0];
     b.mPlanes[0].mStride = mFrame->linesize[0];
     b.mPlanes[0].mHeight = mFrame->height;
     b.mPlanes[0].mWidth = mFrame->width;
     b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
 
--- a/dom/media/fmp4/ffmpeg/FFmpegH264Decoder.h
+++ b/dom/media/fmp4/ffmpeg/FFmpegH264Decoder.h
@@ -27,17 +27,17 @@ class FFmpegH264Decoder<LIBAV_VER> : pub
     DECODE_FRAME,
     DECODE_NO_FRAME,
     DECODE_ERROR
   };
 
 public:
   FFmpegH264Decoder(FlushableMediaTaskQueue* aTaskQueue,
                     MediaDataDecoderCallback* aCallback,
-                    const mp4_demuxer::VideoDecoderConfig& aConfig,
+                    const VideoInfo& aConfig,
                     ImageContainer* aImageContainer);
   virtual ~FFmpegH264Decoder();
 
   virtual nsresult Init() override;
   virtual nsresult Input(MediaRawData* aSample) override;
   virtual nsresult Drain() override;
   virtual nsresult Flush() override;
   static AVCodecID GetCodecId(const nsACString& aMimeType);
--- a/dom/media/fmp4/gmp/GMPAudioDecoder.cpp
+++ b/dom/media/fmp4/gmp/GMPAudioDecoder.cpp
@@ -1,17 +1,17 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "GMPAudioDecoder.h"
 #include "nsServiceManagerUtils.h"
-#include "mp4_demuxer/DecoderData.h"
+#include "MediaInfo.h"
 
 namespace mozilla {
 
 #if defined(DEBUG)
 bool IsOnGMPThread()
 {
   nsCOMPtr<mozIGeckoMediaPluginService> mps = do_GetService("@mozilla.org/gecko-media-plugin-service;1");
   MOZ_ASSERT(mps);
@@ -148,23 +148,23 @@ GMPAudioDecoder::GetGMPAPI(GMPInitDoneRu
   }
 }
 
 void
 GMPAudioDecoder::GMPInitDone(GMPAudioDecoderProxy* aGMP)
 {
   MOZ_ASSERT(aGMP);
   nsTArray<uint8_t> codecSpecific;
-  codecSpecific.AppendElements(mConfig.audio_specific_config->Elements(),
-                               mConfig.audio_specific_config->Length());
+  codecSpecific.AppendElements(mConfig.mCodecSpecificConfig->Elements(),
+                               mConfig.mCodecSpecificConfig->Length());
 
   nsresult rv = aGMP->InitDecode(kGMPAudioCodecAAC,
-                                 mConfig.channel_count,
-                                 mConfig.bits_per_sample,
-                                 mConfig.samples_per_second,
+                                 mConfig.mChannels,
+                                 mConfig.mBitDepth,
+                                 mConfig.mRate,
                                  codecSpecific,
                                  mAdapter);
   if (NS_SUCCEEDED(rv)) {
     mGMP = aGMP;
   }
 }
 
 nsresult
@@ -199,17 +199,17 @@ GMPAudioDecoder::Input(MediaRawData* aSa
   nsRefPtr<MediaRawData> sample(aSample);
   if (!mGMP) {
     mCallback->Error();
     return NS_ERROR_FAILURE;
   }
 
   mAdapter->SetLastStreamOffset(sample->mOffset);
 
-  gmp::GMPAudioSamplesImpl samples(sample, mConfig.channel_count, mConfig.samples_per_second);
+  gmp::GMPAudioSamplesImpl samples(sample, mConfig.mChannels, mConfig.mRate);
   nsresult rv = mGMP->Decode(samples);
   if (NS_FAILED(rv)) {
     mCallback->Error();
     return rv;
   }
 
   return NS_OK;
 }
--- a/dom/media/fmp4/gmp/GMPAudioDecoder.h
+++ b/dom/media/fmp4/gmp/GMPAudioDecoder.h
@@ -42,29 +42,29 @@ private:
 
   int64_t mAudioFrameSum;
   int64_t mAudioFrameOffset;
   bool mMustRecaptureAudioPosition;
 };
 
 class GMPAudioDecoder : public MediaDataDecoder {
 protected:
-  GMPAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+  GMPAudioDecoder(const AudioInfo& aConfig,
                   MediaTaskQueue* aTaskQueue,
                   MediaDataDecoderCallbackProxy* aCallback,
                   AudioCallbackAdapter* aAdapter)
    : mConfig(aConfig)
    , mCallback(aCallback)
    , mGMP(nullptr)
    , mAdapter(aAdapter)
   {
   }
 
 public:
-  GMPAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+  GMPAudioDecoder(const AudioInfo& aConfig,
                   MediaTaskQueue* aTaskQueue,
                   MediaDataDecoderCallbackProxy* aCallback)
    : mConfig(aConfig)
    , mCallback(aCallback)
    , mGMP(nullptr)
    , mAdapter(new AudioCallbackAdapter(aCallback))
   {
   }
@@ -132,17 +132,17 @@ private:
     }
 
   private:
     nsRefPtr<GMPAudioDecoder> mDecoder;
     nsRefPtr<GMPInitDoneRunnable> mGMPInitDone;
   };
   void GMPInitDone(GMPAudioDecoderProxy* aGMP);
 
-  const mp4_demuxer::AudioDecoderConfig& mConfig;
+  const AudioInfo& mConfig;
   MediaDataDecoderCallbackProxy* mCallback;
   nsCOMPtr<mozIGeckoMediaPluginService> mMPS;
   GMPAudioDecoderProxy* mGMP;
   nsAutoPtr<AudioCallbackAdapter> mAdapter;
 };
 
 } // namespace mozilla
 
--- a/dom/media/fmp4/gmp/GMPDecoderModule.cpp
+++ b/dom/media/fmp4/gmp/GMPDecoderModule.cpp
@@ -35,55 +35,55 @@ CreateDecoderWrapper(MediaDataDecoderCal
     return nullptr;
   }
 
   nsRefPtr<MediaDataDecoderProxy> decoder(new MediaDataDecoderProxy(thread, aCallback));
   return decoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
-GMPDecoderModule::CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+GMPDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
                                      layers::LayersBackend aLayersBackend,
                                      layers::ImageContainer* aImageContainer,
                                      FlushableMediaTaskQueue* aVideoTaskQueue,
                                      MediaDataDecoderCallback* aCallback)
 {
-  if (!aConfig.mime_type.EqualsLiteral("video/avc")) {
+  if (!aConfig.mMimeType.EqualsLiteral("video/avc")) {
     return nullptr;
   }
 
   nsRefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aCallback);
   wrapper->SetProxyTarget(new GMPVideoDecoder(aConfig,
                                               aLayersBackend,
                                               aImageContainer,
                                               aVideoTaskQueue,
                                               wrapper->Callback()));
   return wrapper.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
-GMPDecoderModule::CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+GMPDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
                                      FlushableMediaTaskQueue* aAudioTaskQueue,
                                      MediaDataDecoderCallback* aCallback)
 {
-  if (!aConfig.mime_type.EqualsLiteral("audio/mp4a-latm")) {
+  if (!aConfig.mMimeType.EqualsLiteral("audio/mp4a-latm")) {
     return nullptr;
   }
 
   nsRefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aCallback);
   wrapper->SetProxyTarget(new GMPAudioDecoder(aConfig,
                                               aAudioTaskQueue,
                                               wrapper->Callback()));
   return wrapper.forget();
 }
 
 PlatformDecoderModule::ConversionRequired
-GMPDecoderModule::DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const
+GMPDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
 {
   // GMPVideoCodecType::kGMPVideoCodecH264 specifies that encoded frames must be in AVCC format.
-  if (aConfig.IsVideoConfig()) {
+  if (aConfig.IsVideo()) {
     return kNeedAVCC;
   } else {
     return kNeedNone;
   }
 }
 
 } // namespace mozilla
--- a/dom/media/fmp4/gmp/GMPDecoderModule.h
+++ b/dom/media/fmp4/gmp/GMPDecoderModule.h
@@ -14,27 +14,27 @@ namespace mozilla {
 class GMPDecoderModule : public PlatformDecoderModule {
 public:
   GMPDecoderModule();
 
   virtual ~GMPDecoderModule();
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
-  CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  CreateVideoDecoder(const VideoInfo& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
                      FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) override;
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
-  CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+  CreateAudioDecoder(const AudioInfo& aConfig,
                      FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) override;
 
   virtual ConversionRequired
-  DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const override;
+  DecoderNeedsConversion(const TrackInfo& aConfig) const override;
 };
 
 } // namespace mozilla
 
 #endif // GMPDecoderModule_h_
--- a/dom/media/fmp4/gmp/GMPVideoDecoder.cpp
+++ b/dom/media/fmp4/gmp/GMPVideoDecoder.cpp
@@ -4,17 +4,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "GMPVideoDecoder.h"
 #include "GMPVideoHost.h"
 #include "mozilla/Endian.h"
 #include "prsystem.h"
 #include "MediaData.h"
-#include "mp4_demuxer/DecoderData.h"
 
 namespace mozilla {
 
 #if defined(DEBUG)
 extern bool IsOnGMPThread();
 #endif
 
 void
@@ -144,18 +143,18 @@ GMPVideoDecoder::CreateFrame(MediaRawDat
       uint32_t length = BigEndian::readUint32(buf) + kNALLengthSize;
       *reinterpret_cast<uint32_t *>(buf) = length;
       buf += length;
     }
   }
 
   frame->SetBufferType(GMP_BufferLength32);
 
-  frame->SetEncodedWidth(mConfig.display_width);
-  frame->SetEncodedHeight(mConfig.display_height);
+  frame->SetEncodedWidth(mConfig.mDisplay.width);
+  frame->SetEncodedHeight(mConfig.mDisplay.height);
   frame->SetTimeStamp(aSample->mTime);
   frame->SetCompleteFrame(true);
   frame->SetDuration(aSample->mDuration);
   frame->SetFrameType(aSample->mKeyframe ? kGMPKeyFrame : kGMPDeltaFrame);
 
   return frame;
 }
 
@@ -179,23 +178,23 @@ GMPVideoDecoder::GMPInitDone(GMPVideoDec
   MOZ_ASSERT(aHost && aGMP);
 
   GMPVideoCodec codec;
   memset(&codec, 0, sizeof(codec));
 
   codec.mGMPApiVersion = kGMPVersion33;
 
   codec.mCodecType = kGMPVideoCodecH264;
-  codec.mWidth = mConfig.display_width;
-  codec.mHeight = mConfig.display_height;
+  codec.mWidth = mConfig.mDisplay.width;
+  codec.mHeight = mConfig.mDisplay.height;
 
   nsTArray<uint8_t> codecSpecific;
   codecSpecific.AppendElement(0); // mPacketizationMode.
-  codecSpecific.AppendElements(mConfig.extra_data->Elements(),
-                               mConfig.extra_data->Length());
+  codecSpecific.AppendElements(mConfig.mExtraData->Elements(),
+                               mConfig.mExtraData->Length());
 
   nsresult rv = aGMP->InitDecode(codec,
                                  codecSpecific,
                                  mAdapter,
                                  PR_GetNumberOfProcessors());
   if (NS_SUCCEEDED(rv)) {
     mGMP = aGMP;
     mHost = aHost;
--- a/dom/media/fmp4/gmp/GMPVideoDecoder.h
+++ b/dom/media/fmp4/gmp/GMPVideoDecoder.h
@@ -7,17 +7,17 @@
 #if !defined(GMPVideoDecoder_h_)
 #define GMPVideoDecoder_h_
 
 #include "GMPVideoDecoderProxy.h"
 #include "ImageContainer.h"
 #include "MediaDataDecoderProxy.h"
 #include "PlatformDecoderModule.h"
 #include "mozIGeckoMediaPluginService.h"
-#include "mp4_demuxer/DecoderData.h"
+#include "MediaInfo.h"
 
 namespace mozilla {
 
 class VideoCallbackAdapter : public GMPVideoDecoderCallbackProxy {
 public:
   VideoCallbackAdapter(MediaDataDecoderCallbackProxy* aCallback,
                        VideoInfo aVideoInfo,
                        layers::ImageContainer* aImageContainer)
@@ -46,44 +46,44 @@ private:
   int64_t mLastStreamOffset;
 
   VideoInfo mVideoInfo;
   nsRefPtr<layers::ImageContainer> mImageContainer;
 };
 
 class GMPVideoDecoder : public MediaDataDecoder {
 protected:
-  GMPVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  GMPVideoDecoder(const VideoInfo& aConfig,
                   layers::LayersBackend aLayersBackend,
                   layers::ImageContainer* aImageContainer,
                   MediaTaskQueue* aTaskQueue,
                   MediaDataDecoderCallbackProxy* aCallback,
                   VideoCallbackAdapter* aAdapter)
    : mConfig(aConfig)
    , mCallback(aCallback)
    , mGMP(nullptr)
    , mHost(nullptr)
    , mAdapter(aAdapter)
    , mConvertNALUnitLengths(false)
   {
   }
 
 public:
-  GMPVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  GMPVideoDecoder(const VideoInfo& aConfig,
                   layers::LayersBackend aLayersBackend,
                   layers::ImageContainer* aImageContainer,
                   MediaTaskQueue* aTaskQueue,
                   MediaDataDecoderCallbackProxy* aCallback)
    : mConfig(aConfig)
    , mCallback(aCallback)
    , mGMP(nullptr)
    , mHost(nullptr)
    , mAdapter(new VideoCallbackAdapter(aCallback,
-                                       VideoInfo(aConfig.display_width,
-                                                 aConfig.display_height),
+                                       VideoInfo(aConfig.mDisplay.width,
+                                                 aConfig.mDisplay.height),
                                        aImageContainer))
    , mConvertNALUnitLengths(false)
   {
   }
 
   virtual nsresult Init() override;
   virtual nsresult Input(MediaRawData* aSample) override;
   virtual nsresult Flush() override;
@@ -148,17 +148,17 @@ private:
     }
 
   private:
     nsRefPtr<GMPVideoDecoder> mDecoder;
     nsRefPtr<GMPInitDoneRunnable> mGMPInitDone;
   };
   void GMPInitDone(GMPVideoDecoderProxy* aGMP, GMPVideoHost* aHost);
 
-  const mp4_demuxer::VideoDecoderConfig& mConfig;
+  const VideoInfo& mConfig;
   MediaDataDecoderCallbackProxy* mCallback;
   nsCOMPtr<mozIGeckoMediaPluginService> mMPS;
   GMPVideoDecoderProxy* mGMP;
   GMPVideoHost* mHost;
   nsAutoPtr<VideoCallbackAdapter> mAdapter;
   bool mConvertNALUnitLengths;
 };
 
--- a/dom/media/fmp4/gonk/GonkAudioDecoderManager.cpp
+++ b/dom/media/fmp4/gonk/GonkAudioDecoderManager.cpp
@@ -15,16 +15,17 @@
 #include "prlog.h"
 #include "stagefright/MediaBuffer.h"
 #include "stagefright/MetaData.h"
 #include "stagefright/MediaErrors.h"
 #include <stagefright/foundation/AMessage.h>
 #include <stagefright/foundation/ALooper.h>
 #include "media/openmax/OMX_Audio.h"
 #include "MediaData.h"
+#include "MediaInfo.h"
 
 #include <android/log.h>
 #define GADM_LOG(...) __android_log_print(ANDROID_LOG_DEBUG, "GonkAudioDecoderManager", __VA_ARGS__)
 
 #ifdef PR_LOGGING
 PRLogModuleInfo* GetDemuxerLog();
 #define LOG(...) PR_LOG(GetDemuxerLog(), PR_LOG_DEBUG, (__VA_ARGS__))
 #else
@@ -34,30 +35,30 @@ PRLogModuleInfo* GetDemuxerLog();
 
 using namespace android;
 typedef android::MediaCodecProxy MediaCodecProxy;
 
 namespace mozilla {
 
 GonkAudioDecoderManager::GonkAudioDecoderManager(
   MediaTaskQueue* aTaskQueue,
-  const mp4_demuxer::AudioDecoderConfig& aConfig)
+  const AudioInfo& aConfig)
   : GonkDecoderManager(aTaskQueue)
-  , mAudioChannels(aConfig.channel_count)
-  , mAudioRate(aConfig.samples_per_second)
-  , mAudioProfile(aConfig.aac_profile)
+  , mAudioChannels(aConfig.mChannels)
+  , mAudioRate(aConfig.mRate)
+  , mAudioProfile(aConfig.mProfile)
   , mUseAdts(true)
   , mAudioBuffer(nullptr)
 {
   MOZ_COUNT_CTOR(GonkAudioDecoderManager);
   MOZ_ASSERT(mAudioChannels);
-  mUserData.AppendElements(aConfig.audio_specific_config->Elements(),
-                           aConfig.audio_specific_config->Length());
+  mUserData.AppendElements(aConfig.mCodecSpecificConfig->Elements(),
+                           aConfig.mCodecSpecificConfig->Length());
   // Pass through mp3 without applying an ADTS header.
-  if (!aConfig.mime_type.EqualsLiteral("audio/mp4a-latm")) {
+  if (!aConfig.mMimeType.EqualsLiteral("audio/mp4a-latm")) {
       mUseAdts = false;
   }
 }
 
 GonkAudioDecoderManager::~GonkAudioDecoderManager()
 {
   MOZ_COUNT_DTOR(GonkAudioDecoderManager);
 }
--- a/dom/media/fmp4/gonk/GonkAudioDecoderManager.h
+++ b/dom/media/fmp4/gonk/GonkAudioDecoderManager.h
@@ -19,17 +19,17 @@ class MOZ_EXPORT MediaBuffer;
 } // namespace android
 
 namespace mozilla {
 
 class GonkAudioDecoderManager : public GonkDecoderManager {
 typedef android::MediaCodecProxy MediaCodecProxy;
 public:
   GonkAudioDecoderManager(MediaTaskQueue* aTaskQueue,
-                          const mp4_demuxer::AudioDecoderConfig& aConfig);
+                          const AudioInfo& aConfig);
   ~GonkAudioDecoderManager();
 
   virtual android::sp<MediaCodecProxy> Init(MediaDataDecoderCallback* aCallback) override;
 
   virtual nsresult Output(int64_t aStreamOffset,
                           nsRefPtr<MediaData>& aOutput) override;
 
   virtual nsresult Flush() override;
--- a/dom/media/fmp4/gonk/GonkDecoderModule.cpp
+++ b/dom/media/fmp4/gonk/GonkDecoderModule.cpp
@@ -22,43 +22,43 @@ GonkDecoderModule::~GonkDecoderModule()
 /* static */
 void
 GonkDecoderModule::Init()
 {
   MOZ_ASSERT(NS_IsMainThread(), "Must be on main thread.");
 }
 
 already_AddRefed<MediaDataDecoder>
-GonkDecoderModule::CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+GonkDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
                                      mozilla::layers::LayersBackend aLayersBackend,
                                      mozilla::layers::ImageContainer* aImageContainer,
                                      FlushableMediaTaskQueue* aVideoTaskQueue,
                                      MediaDataDecoderCallback* aCallback)
 {
   nsRefPtr<MediaDataDecoder> decoder =
   new GonkMediaDataDecoder(new GonkVideoDecoderManager(aVideoTaskQueue,
                                                        aImageContainer, aConfig),
                            aVideoTaskQueue, aCallback);
   return decoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
-GonkDecoderModule::CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+GonkDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
                                       FlushableMediaTaskQueue* aAudioTaskQueue,
                                       MediaDataDecoderCallback* aCallback)
 {
   nsRefPtr<MediaDataDecoder> decoder =
   new GonkMediaDataDecoder(new GonkAudioDecoderManager(aAudioTaskQueue, aConfig),
                            aAudioTaskQueue, aCallback);
   return decoder.forget();
 }
 
 PlatformDecoderModule::ConversionRequired
-GonkDecoderModule::DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const
+GonkDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
 {
-  if (aConfig.IsVideoConfig()) {
+  if (aConfig.IsVideo()) {
     return kNeedAnnexB;
   } else {
     return kNeedNone;
   }
 }
 
 } // namespace mozilla
--- a/dom/media/fmp4/gonk/GonkDecoderModule.h
+++ b/dom/media/fmp4/gonk/GonkDecoderModule.h
@@ -13,29 +13,29 @@ namespace mozilla {
 
 class GonkDecoderModule : public PlatformDecoderModule {
 public:
   GonkDecoderModule();
   virtual ~GonkDecoderModule();
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
-  CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  CreateVideoDecoder(const VideoInfo& aConfig,
                      mozilla::layers::LayersBackend aLayersBackend,
                      mozilla::layers::ImageContainer* aImageContainer,
                      FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) override;
 
   // Decode thread.
   virtual already_AddRefed<MediaDataDecoder>
-  CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+  CreateAudioDecoder(const AudioInfo& aConfig,
                      FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) override;
 
   static void Init();
 
   virtual ConversionRequired
-  DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const override;
+  DecoderNeedsConversion(const TrackInfo& aConfig) const override;
 };
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/fmp4/gonk/GonkVideoDecoderManager.cpp
+++ b/dom/media/fmp4/gonk/GonkVideoDecoderManager.cpp
@@ -16,17 +16,16 @@
 #include "prlog.h"
 #include "stagefright/MediaBuffer.h"
 #include "stagefright/MetaData.h"
 #include "stagefright/MediaErrors.h"
 #include <stagefright/foundation/ADebug.h>
 #include <stagefright/foundation/AMessage.h>
 #include <stagefright/foundation/AString.h>
 #include <stagefright/foundation/ALooper.h>
-#include "mp4_demuxer/AnnexB.h"
 #include "GonkNativeWindow.h"
 #include "GonkNativeWindowClient.h"
 #include "mozilla/layers/GrallocTextureClient.h"
 #include "mozilla/layers/TextureClient.h"
 
 #define READ_OUTPUT_BUFFER_TIMEOUT_US  3000
 
 #include <android/log.h>
@@ -42,34 +41,32 @@ using namespace mozilla::layers;
 using namespace android;
 typedef android::MediaCodecProxy MediaCodecProxy;
 
 namespace mozilla {
 
 GonkVideoDecoderManager::GonkVideoDecoderManager(
   MediaTaskQueue* aTaskQueue,
   mozilla::layers::ImageContainer* aImageContainer,
-  const mp4_demuxer::VideoDecoderConfig& aConfig)
+  const VideoInfo& aConfig)
   : GonkDecoderManager(aTaskQueue)
   , mImageContainer(aImageContainer)
   , mReaderCallback(nullptr)
   , mColorConverterBufferSize(0)
   , mNativeWindow(nullptr)
   , mPendingVideoBuffersLock("GonkVideoDecoderManager::mPendingVideoBuffersLock")
 {
   NS_ASSERTION(!NS_IsMainThread(), "Should not be on main thread.");
   MOZ_ASSERT(mImageContainer);
   MOZ_COUNT_CTOR(GonkVideoDecoderManager);
-  mVideoWidth  = aConfig.display_width;
-  mVideoHeight = aConfig.display_height;
-  mDisplayWidth = aConfig.display_width;
-  mDisplayHeight = aConfig.display_height;
-  mInfo.mVideo.mHasVideo = true;
-  nsIntSize displaySize(mDisplayWidth, mDisplayHeight);
-  mInfo.mVideo.mDisplay = displaySize;
+  mVideoWidth  = aConfig.mDisplay.width;
+  mVideoHeight = aConfig.mDisplay.height;
+  mDisplayWidth = aConfig.mDisplay.width;
+  mDisplayHeight = aConfig.mDisplay.height;
+  mInfo.mVideo = aConfig;
 
   nsIntRect pictureRect(0, 0, mVideoWidth, mVideoHeight);
   nsIntSize frameSize(mVideoWidth, mVideoHeight);
   mPicture = pictureRect;
   mInitialFrame = frameSize;
   mHandler = new MessageHandler(this);
   mVideoListener = new VideoResourceListener(this);
 
--- a/dom/media/fmp4/gonk/GonkVideoDecoderManager.h
+++ b/dom/media/fmp4/gonk/GonkVideoDecoderManager.h
@@ -35,17 +35,17 @@ class TextureClient;
 
 class GonkVideoDecoderManager : public GonkDecoderManager {
 typedef android::MediaCodecProxy MediaCodecProxy;
 typedef mozilla::layers::TextureClient TextureClient;
 
 public:
   GonkVideoDecoderManager(MediaTaskQueue* aTaskQueue,
                           mozilla::layers::ImageContainer* aImageContainer,
-		                      const mp4_demuxer::VideoDecoderConfig& aConfig);
+                          const VideoInfo& aConfig);
 
   ~GonkVideoDecoderManager();
 
   virtual android::sp<MediaCodecProxy> Init(MediaDataDecoderCallback* aCallback) override;
 
   virtual nsresult Output(int64_t aStreamOffset,
                           nsRefPtr<MediaData>& aOutput) override;
 
--- a/dom/media/fmp4/wmf/MFTDecoder.cpp
+++ b/dom/media/fmp4/wmf/MFTDecoder.cpp
@@ -43,25 +43,27 @@ MFTDecoder::Create(const GUID& aMFTClsID
                         reinterpret_cast<void**>(static_cast<IMFTransform**>(byRef(mDecoder))));
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   return S_OK;
 }
 
 HRESULT
 MFTDecoder::SetMediaTypes(IMFMediaType* aInputType,
-                          IMFMediaType* aOutputType)
+                          IMFMediaType* aOutputType,
+                          ConfigureOutputCallback aCallback,
+                          void* aData)
 {
   mOutputType = aOutputType;
 
   // Set the input type to the one the caller gave us...
   HRESULT hr = mDecoder->SetInputType(0, aInputType, 0);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
-  hr = SetDecoderOutputType();
+  hr = SetDecoderOutputType(aCallback, aData);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   hr = mDecoder->GetInputStreamInfo(0, &mInputStreamInfo);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   hr = SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
@@ -76,29 +78,33 @@ MFTDecoder::GetAttributes()
 {
   RefPtr<IMFAttributes> attr;
   HRESULT hr = mDecoder->GetAttributes(byRef(attr));
   NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
   return attr;
 }
 
 HRESULT
-MFTDecoder::SetDecoderOutputType()
+MFTDecoder::SetDecoderOutputType(ConfigureOutputCallback aCallback, void* aData)
 {
   NS_ENSURE_TRUE(mDecoder != nullptr, E_POINTER);
 
   // Iterate the enumerate the output types, until we find one compatible
   // with what we need.
   HRESULT hr;
   RefPtr<IMFMediaType> outputType;
   UINT32 typeIndex = 0;
   while (SUCCEEDED(mDecoder->GetOutputAvailableType(0, typeIndex++, byRef(outputType)))) {
     BOOL resultMatch;
     hr = mOutputType->Compare(outputType, MF_ATTRIBUTES_MATCH_OUR_ITEMS, &resultMatch);
     if (SUCCEEDED(hr) && resultMatch == TRUE) {
+      if (aCallback) {
+        hr = aCallback(outputType, aData);
+        NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+      }
       hr = mDecoder->SetOutputType(0, outputType, 0);
       NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
       hr = mDecoder->GetOutputStreamInfo(0, &mOutputStreamInfo);
       NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
       mMFTProvidesOutputSamples = IsFlagSet(mOutputStreamInfo.dwFlags, MFT_OUTPUT_STREAM_PROVIDES_SAMPLES);
 
@@ -195,18 +201,22 @@ HRESULT
 MFTDecoder::Output(RefPtr<IMFSample>* aOutput)
 {
   NS_ENSURE_TRUE(mDecoder != nullptr, E_POINTER);
 
   HRESULT hr;
 
   MFT_OUTPUT_DATA_BUFFER output = {0};
 
+  bool providedSample = false;
   RefPtr<IMFSample> sample;
-  if (!mMFTProvidesOutputSamples) {
+  if (*aOutput) {
+    output.pSample = *aOutput;
+    providedSample = true;
+  } else if (!mMFTProvidesOutputSamples) {
     hr = CreateOutputSample(&sample);
     NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
     output.pSample = sample;
   }
 
   DWORD status = 0;
   hr = mDecoder->ProcessOutput(0, 1, &output, &status);
   if (output.pEvents) {
@@ -216,17 +226,17 @@ MFTDecoder::Output(RefPtr<IMFSample>* aO
     output.pEvents = nullptr;
   }
 
   if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
     // Type change, probably geometric aperature change.
     // Reconfigure decoder output type, so that GetOutputMediaType()
     // returns the new type, and return the error code to caller.
     // This is an expected failure, so don't warn on encountering it.
-    hr = SetDecoderOutputType();
+    hr = SetDecoderOutputType(nullptr, nullptr);
     NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
     // Return the error, so that the caller knows to retry.
     return MF_E_TRANSFORM_STREAM_CHANGE;
   }
 
   if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
     // Not enough input to produce output. This is an expected failure,
     // so don't warn on encountering it.
@@ -238,17 +248,17 @@ MFTDecoder::Output(RefPtr<IMFSample>* aO
   MOZ_ASSERT(output.pSample);
 
   if (mDiscontinuity) {
     output.pSample->SetUINT32(MFSampleExtension_Discontinuity, TRUE);
     mDiscontinuity = false;
   }
 
   *aOutput = output.pSample; // AddRefs
-  if (mMFTProvidesOutputSamples) {
+  if (mMFTProvidesOutputSamples && !providedSample) {
     // If the MFT is providing samples, we must release the sample here.
     // Typically only the H.264 MFT provides samples when using DXVA,
     // and it always re-uses the same sample, so if we don't release it
     // MFT::ProcessOutput() deadlocks waiting for the sample to be released.
     output.pSample->Release();
     output.pSample = nullptr;
   }
 
@@ -259,19 +269,25 @@ HRESULT
 MFTDecoder::Input(const uint8_t* aData,
                   uint32_t aDataSize,
                   int64_t aTimestamp)
 {
   NS_ENSURE_TRUE(mDecoder != nullptr, E_POINTER);
 
   RefPtr<IMFSample> input;
   HRESULT hr = CreateInputSample(aData, aDataSize, aTimestamp, &input);
-  NS_ENSURE_TRUE(SUCCEEDED(hr) && input!=nullptr, hr);
+  NS_ENSURE_TRUE(SUCCEEDED(hr) && input != nullptr, hr);
+
+  return Input(input);
+}
 
-  hr = mDecoder->ProcessInput(0, input, 0);
+HRESULT
+MFTDecoder::Input(IMFSample* aSample)
+{
+  HRESULT hr = mDecoder->ProcessInput(0, aSample, 0);
   if (hr == MF_E_NOTACCEPTING) {
     // MFT *already* has enough data to produce a sample. Retrieve it.
     return MF_E_NOTACCEPTING;
   }
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   return S_OK;
 }
--- a/dom/media/fmp4/wmf/MFTDecoder.h
+++ b/dom/media/fmp4/wmf/MFTDecoder.h
@@ -31,34 +31,38 @@ public:
 
   // Sets the input and output media types. Call after Init().
   //
   // Params:
   //  - aInputType needs at least major and minor types set.
   //  - aOutputType needs at least major and minor types set.
   //    This is used to select the matching output type out
   //    of all the available output types of the MFT.
+  typedef HRESULT (*ConfigureOutputCallback)(IMFMediaType* aOutputType, void* aData);
   HRESULT SetMediaTypes(IMFMediaType* aInputType,
-                        IMFMediaType* aOutputType);
+                        IMFMediaType* aOutputType,
+                        ConfigureOutputCallback aCallback = nullptr,
+                        void* aData = nullptr);
 
   // Returns the MFT's IMFAttributes object.
   TemporaryRef<IMFAttributes> GetAttributes();
 
   // Retrieves the media type being output. This may not be valid until
   //  the first sample is decoded.
   HRESULT GetOutputMediaType(RefPtr<IMFMediaType>& aMediaType);
 
   // Submits data into the MFT for processing.
   //
   // Returns:
   //  - MF_E_NOTACCEPTING if the decoder can't accept input. The data
   //    must be resubmitted after Output() stops producing output.
   HRESULT Input(const uint8_t* aData,
                 uint32_t aDataSize,
                 int64_t aTimestampUsecs);
+  HRESULT Input(IMFSample* aSample);
 
   // Retrieves output from the MFT. Call this once Input() returns
   // MF_E_NOTACCEPTING. Some MFTs with hardware acceleration (the H.264
   // decoder MFT in particular) can't handle it if clients hold onto
   // references to the output IMFSample, so don't do that.
   //
   // Returns:
   //  - MF_E_TRANSFORM_STREAM_CHANGE if the underlying stream output
@@ -73,17 +77,17 @@ public:
   // input data. Use before seeking.
   HRESULT Flush();
 
   // Sends a message to the MFT.
   HRESULT SendMFTMessage(MFT_MESSAGE_TYPE aMsg, ULONG_PTR aData);
 
 private:
 
-  HRESULT SetDecoderOutputType();
+  HRESULT SetDecoderOutputType(ConfigureOutputCallback aCallback, void* aData);
 
   HRESULT CreateInputSample(const uint8_t* aData,
                             uint32_t aDataSize,
                             int64_t aTimestampUsecs,
                             RefPtr<IMFSample>* aOutSample);
 
   HRESULT CreateOutputSample(RefPtr<IMFSample>* aOutSample);
 
--- a/dom/media/fmp4/wmf/WMFAudioMFTManager.cpp
+++ b/dom/media/fmp4/wmf/WMFAudioMFTManager.cpp
@@ -1,16 +1,16 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "WMFAudioMFTManager.h"
-#include "mp4_demuxer/DecoderData.h"
+#include "MediaInfo.h"
 #include "VideoUtils.h"
 #include "WMFUtils.h"
 #include "nsTArray.h"
 
 #include "prlog.h"
 
 #ifdef PR_LOGGING
 PRLogModuleInfo* GetDemuxerLog();
@@ -63,32 +63,32 @@ AACAudioSpecificConfigToUserData(uint8_t
   w[0] = 0x0; // Payload type raw AAC packet
   w[1] = aAACProfileLevelIndication;
 
   aOutUserData.AppendElements(heeInfo, heeInfoLen);
   aOutUserData.AppendElements(aAudioSpecConfig, aConfigLength);
 }
 
 WMFAudioMFTManager::WMFAudioMFTManager(
-  const mp4_demuxer::AudioDecoderConfig& aConfig)
-  : mAudioChannels(aConfig.channel_count)
-  , mAudioRate(aConfig.samples_per_second)
+  const AudioInfo& aConfig)
+  : mAudioChannels(aConfig.mChannels)
+  , mAudioRate(aConfig.mRate)
   , mAudioFrameOffset(0)
   , mAudioFrameSum(0)
   , mMustRecaptureAudioPosition(true)
 {
   MOZ_COUNT_CTOR(WMFAudioMFTManager);
 
-  if (aConfig.mime_type.EqualsLiteral("audio/mpeg")) {
+  if (aConfig.mMimeType.EqualsLiteral("audio/mpeg")) {
     mStreamType = MP3;
-  } else if (aConfig.mime_type.EqualsLiteral("audio/mp4a-latm")) {
+  } else if (aConfig.mMimeType.EqualsLiteral("audio/mp4a-latm")) {
     mStreamType = AAC;
-    AACAudioSpecificConfigToUserData(aConfig.aac_profile,
-                                     aConfig.audio_specific_config->Elements(),
-                                     aConfig.audio_specific_config->Length(),
+    AACAudioSpecificConfigToUserData(aConfig.mProfile,
+                                     aConfig.mCodecSpecificConfig->Elements(),
+                                     aConfig.mCodecSpecificConfig->Length(),
                                      mUserData);
   } else {
     mStreamType = Unknown;
   }
 }
 
 WMFAudioMFTManager::~WMFAudioMFTManager()
 {
--- a/dom/media/fmp4/wmf/WMFAudioMFTManager.h
+++ b/dom/media/fmp4/wmf/WMFAudioMFTManager.h
@@ -12,17 +12,17 @@
 #include "MFTDecoder.h"
 #include "mozilla/RefPtr.h"
 #include "WMFMediaDataDecoder.h"
 
 namespace mozilla {
 
 class WMFAudioMFTManager : public MFTManager {
 public:
-  WMFAudioMFTManager(const mp4_demuxer::AudioDecoderConfig& aConfig);
+  WMFAudioMFTManager(const AudioInfo& aConfig);
   ~WMFAudioMFTManager();
 
   virtual TemporaryRef<MFTDecoder> Init() override;
 
   virtual HRESULT Input(MediaRawData* aSample) override;
 
   // Note WMF's AAC decoder sometimes output negatively timestamped samples,
   // presumably they're the preroll samples, and we strip them. We may return
--- a/dom/media/fmp4/wmf/WMFDecoderModule.cpp
+++ b/dom/media/fmp4/wmf/WMFDecoderModule.cpp
@@ -13,16 +13,17 @@
 #include "mozilla/DebugOnly.h"
 #include "WMFMediaDataDecoder.h"
 #include "nsIWindowsRegKey.h"
 #include "nsComponentManagerUtils.h"
 #include "nsServiceManagerUtils.h"
 #include "nsIGfxInfo.h"
 #include "GfxDriverInfo.h"
 #include "gfxWindowsPlatform.h"
+#include "MediaInfo.h"
 
 namespace mozilla {
 
 bool WMFDecoderModule::sIsWMFEnabled = false;
 bool WMFDecoderModule::sDXVAEnabled = false;
 
 WMFDecoderModule::WMFDecoderModule()
 {
@@ -59,66 +60,66 @@ WMFDecoderModule::Startup()
   if (FAILED(wmf::MFStartup())) {
     NS_WARNING("Failed to initialize Windows Media Foundation");
     return NS_ERROR_FAILURE;
   }
   return NS_OK;
 }
 
 already_AddRefed<MediaDataDecoder>
-WMFDecoderModule::CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+WMFDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
                                      layers::LayersBackend aLayersBackend,
                                      layers::ImageContainer* aImageContainer,
                                      FlushableMediaTaskQueue* aVideoTaskQueue,
                                      MediaDataDecoderCallback* aCallback)
 {
   nsRefPtr<MediaDataDecoder> decoder =
     new WMFMediaDataDecoder(new WMFVideoMFTManager(aConfig,
                                                    aLayersBackend,
                                                    aImageContainer,
                                                    sDXVAEnabled && ShouldUseDXVA(aConfig)),
                             aVideoTaskQueue,
                             aCallback);
   return decoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder>
-WMFDecoderModule::CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+WMFDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
                                      FlushableMediaTaskQueue* aAudioTaskQueue,
                                      MediaDataDecoderCallback* aCallback)
 {
   nsRefPtr<MediaDataDecoder> decoder =
     new WMFMediaDataDecoder(new WMFAudioMFTManager(aConfig),
                             aAudioTaskQueue,
                             aCallback);
   return decoder.forget();
 }
 
 bool
-WMFDecoderModule::ShouldUseDXVA(const mp4_demuxer::VideoDecoderConfig& aConfig) const
+WMFDecoderModule::ShouldUseDXVA(const VideoInfo& aConfig) const
 {
   static bool isAMD = false;
   static bool initialized = false;
   if (!initialized) {
     nsCOMPtr<nsIGfxInfo> gfxInfo = do_GetService("@mozilla.org/gfx/info;1");
     nsAutoString vendor;
     gfxInfo->GetAdapterVendorID(vendor);
     isAMD = vendor.Equals(widget::GfxDriverInfo::GetDeviceVendor(widget::VendorAMD), nsCaseInsensitiveStringComparator()) ||
             vendor.Equals(widget::GfxDriverInfo::GetDeviceVendor(widget::VendorATI), nsCaseInsensitiveStringComparator());
     initialized = true;
   }
   if (!isAMD) {
     return true;
   }
   // Don't use DXVA for 4k videos or above, since it seems to perform poorly.
-  return aConfig.display_height <= 1920 && aConfig.display_height <= 1200;
+  return aConfig.mDisplay.width <= 1920 && aConfig.mDisplay.height <= 1200;
 }
 
 bool
-WMFDecoderModule::SupportsSharedDecoders(const mp4_demuxer::VideoDecoderConfig& aConfig) const
+WMFDecoderModule::SupportsSharedDecoders(const VideoInfo& aConfig) const
 {
   // If DXVA is enabled, but we're not going to use it for this specific config, then
   // we can't use the shared decoder.
   return !sDXVAEnabled || ShouldUseDXVA(aConfig);
 }
 
 bool
 WMFDecoderModule::SupportsMimeType(const nsACString& aMimeType)
@@ -127,21 +128,21 @@ WMFDecoderModule::SupportsMimeType(const
          aMimeType.EqualsLiteral("video/avc") ||
          aMimeType.EqualsLiteral("video/webm; codecs=vp8") ||
          aMimeType.EqualsLiteral("video/webm; codecs=vp9") ||
          aMimeType.EqualsLiteral("audio/mp4a-latm") ||
          aMimeType.EqualsLiteral("audio/mpeg");
 }
 
 PlatformDecoderModule::ConversionRequired
-WMFDecoderModule::DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const
+WMFDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
 {
-  if (aConfig.IsVideoConfig() &&
-      (aConfig.mime_type.EqualsLiteral("video/avc") ||
-       aConfig.mime_type.EqualsLiteral("video/mp4"))) {
+  if (aConfig.IsVideo() &&
+      (aConfig.mMimeType.EqualsLiteral("video/avc") ||
+       aConfig.mMimeType.EqualsLiteral("video/mp4"))) {
     return kNeedAnnexB;
   } else {
     return kNeedNone;
   }
 }
 
 static bool
 ClassesRootRegKeyExists(const nsAString& aRegKeyPath)
--- a/dom/media/fmp4/wmf/WMFDecoderModule.h
+++ b/dom/media/fmp4/wmf/WMFDecoderModule.h
@@ -15,50 +15,50 @@ class WMFDecoderModule : public Platform
 public:
   WMFDecoderModule();
   virtual ~WMFDecoderModule();
 
   // Initializes the module, loads required dynamic libraries, etc.
   virtual nsresult Startup() override;
 
   virtual already_AddRefed<MediaDataDecoder>
-  CreateVideoDecoder(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  CreateVideoDecoder(const VideoInfo& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
                      FlushableMediaTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) override;
 
   virtual already_AddRefed<MediaDataDecoder>
-  CreateAudioDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
+  CreateAudioDecoder(const AudioInfo& aConfig,
                      FlushableMediaTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) override;
 
   bool SupportsMimeType(const nsACString& aMimeType) override;
 
   virtual void DisableHardwareAcceleration() override
   {
     sDXVAEnabled = false;
   }
 
-  virtual bool SupportsSharedDecoders(const mp4_demuxer::VideoDecoderConfig& aConfig) const override;
+  virtual bool SupportsSharedDecoders(const VideoInfo& aConfig) const override;
 
   virtual ConversionRequired
-  DecoderNeedsConversion(const mp4_demuxer::TrackConfig& aConfig) const override;
+  DecoderNeedsConversion(const TrackInfo& aConfig) const override;
 
   // Accessors that report whether we have the required MFTs available
   // on the system to play various codecs. Windows Vista doesn't have the
   // H.264/AAC decoders if the "Platform Update Supplement for Windows Vista"
   // is not installed.
   static bool HasAAC();
   static bool HasH264();
 
   // Called on main thread.
   static void Init();
 private:
-  bool ShouldUseDXVA(const mp4_demuxer::VideoDecoderConfig& aConfig) const;
+  bool ShouldUseDXVA(const VideoInfo& aConfig) const;
 
   static bool sIsWMFEnabled;
   static bool sDXVAEnabled;
 };
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/fmp4/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/fmp4/wmf/WMFVideoMFTManager.cpp
@@ -9,22 +9,22 @@
 #include "MediaDecoderReader.h"
 #include "WMFUtils.h"
 #include "ImageContainer.h"
 #include "VideoUtils.h"
 #include "DXVA2Manager.h"
 #include "nsThreadUtils.h"
 #include "Layers.h"
 #include "mozilla/layers/LayersTypes.h"
-#include "mp4_demuxer/AnnexB.h"
-#include "mp4_demuxer/DecoderData.h"
+#include "MediaInfo.h"
 #include "prlog.h"
 #include "gfx2DGlue.h"
 #include "gfxWindowsPlatform.h"
 #include "IMFYCbCrImage.h"
+#include "mozilla/WindowsVersion.h"
 
 #ifdef PR_LOGGING
 PRLogModuleInfo* GetDemuxerLog();
 #define LOG(...) PR_LOG(GetDemuxerLog(), PR_LOG_DEBUG, (__VA_ARGS__))
 #else
 #define LOG(...)
 #endif
 
@@ -64,37 +64,37 @@ const CLSID CLSID_WebmMfVp9Dec =
   0x1979,
   0x4fcd,
   {0xa6, 0x97, 0xdf, 0x9a, 0xd1, 0x5b, 0x34, 0xfe}
 };
 
 namespace mozilla {
 
 WMFVideoMFTManager::WMFVideoMFTManager(
-                            const mp4_demuxer::VideoDecoderConfig& aConfig,
+                            const VideoInfo& aConfig,
                             mozilla::layers::LayersBackend aLayersBackend,
                             mozilla::layers::ImageContainer* aImageContainer,
                             bool aDXVAEnabled)
   : mImageContainer(aImageContainer)
   , mDXVAEnabled(aDXVAEnabled)
   , mLayersBackend(aLayersBackend)
   // mVideoStride, mVideoWidth, mVideoHeight, mUseHwAccel are initialized in
   // Init().
 {
   NS_ASSERTION(!NS_IsMainThread(), "Should not be on main thread.");
   MOZ_ASSERT(mImageContainer);
   MOZ_COUNT_CTOR(WMFVideoMFTManager);
 
   // Need additional checks/params to check vp8/vp9
-  if (aConfig.mime_type.EqualsLiteral("video/mp4") ||
-      aConfig.mime_type.EqualsLiteral("video/avc")) {
+  if (aConfig.mMimeType.EqualsLiteral("video/mp4") ||
+      aConfig.mMimeType.EqualsLiteral("video/avc")) {
     mStreamType = H264;
-  } else if (aConfig.mime_type.EqualsLiteral("video/webm; codecs=vp8")) {
+  } else if (aConfig.mMimeType.EqualsLiteral("video/webm; codecs=vp8")) {
     mStreamType = VP8;
-  } else if (aConfig.mime_type.EqualsLiteral("video/webm; codecs=vp9")) {
+  } else if (aConfig.mMimeType.EqualsLiteral("video/webm; codecs=vp9")) {
     mStreamType = VP9;
   } else {
     mStreamType = Unknown;
   }
 }
 
 WMFVideoMFTManager::~WMFVideoMFTManager()
 {
@@ -126,22 +126,32 @@ WMFVideoMFTManager::GetMediaSubtypeGUID(
     case VP8: return MFVideoFormat_VP80;
     case VP9: return MFVideoFormat_VP90;
     default: return GUID_NULL;
   };
 }
 
 class CreateDXVAManagerEvent : public nsRunnable {
 public:
+  CreateDXVAManagerEvent(LayersBackend aBackend)
+    : mBackend(aBackend)
+  {}
+
   NS_IMETHOD Run() {
     NS_ASSERTION(NS_IsMainThread(), "Must be on main thread.");
-    mDXVA2Manager = DXVA2Manager::Create();
+    if (mBackend == LayersBackend::LAYERS_D3D11 &&
+        IsWin8OrLater()) {
+      mDXVA2Manager = DXVA2Manager::CreateD3D11DXVA();
+    } else {
+      mDXVA2Manager = DXVA2Manager::CreateD3D9DXVA();
+    }
     return NS_OK;
   }
   nsAutoPtr<DXVA2Manager> mDXVA2Manager;
+  LayersBackend mBackend;
 };
 
 bool
 WMFVideoMFTManager::InitializeDXVA()
 {
   MOZ_ASSERT(!mDXVA2Manager);
 
   // If we use DXVA but aren't running with a D3D layer manager then the
@@ -150,17 +160,17 @@ WMFVideoMFTManager::InitializeDXVA()
   if (!mDXVAEnabled ||
       (mLayersBackend != LayersBackend::LAYERS_D3D9 &&
        mLayersBackend != LayersBackend::LAYERS_D3D10 &&
        mLayersBackend != LayersBackend::LAYERS_D3D11)) {
     return false;
   }
 
   // The DXVA manager must be created on the main thread.
-  nsRefPtr<CreateDXVAManagerEvent> event(new CreateDXVAManagerEvent());
+  nsRefPtr<CreateDXVAManagerEvent> event(new CreateDXVAManagerEvent(mLayersBackend));
   NS_DispatchToMainThread(event, NS_DISPATCH_SYNC);
   mDXVA2Manager = event->mDXVA2Manager;
 
   return mDXVA2Manager != nullptr;
 }
 
 TemporaryRef<MFTDecoder>
 WMFVideoMFTManager::Init()
@@ -285,19 +295,23 @@ WMFVideoMFTManager::ConfigureVideoFrameG
   nsIntSize frameSize = nsIntSize(width, height);
   nsIntSize displaySize = nsIntSize(pictureRegion.width, pictureRegion.height);
   ScaleDisplayByAspectRatio(displaySize, float(aspectNum) / float(aspectDenom));
   if (!IsValidVideoRegion(frameSize, pictureRegion, displaySize)) {
     // Video track's frame sizes will overflow. Ignore the video track.
     return E_FAIL;
   }
 
+  if (mDXVA2Manager) {
+    hr = mDXVA2Manager->ConfigureForSize(width, height);
+    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+  }
+
   // Success! Save state.
   mVideoInfo.mDisplay = displaySize;
-  mVideoInfo.mHasVideo = true;
   GetDefaultStride(mediaType, &mVideoStride);
   mVideoWidth = width;
   mVideoHeight = height;
   mPictureRegion = pictureRegion;
 
   LOG("WMFVideoMFTManager frame geometry frame=(%u,%u) stride=%u picture=(%d, %d, %d, %d) display=(%d,%d) PAR=%d:%d",
       width, height,
       mVideoStride,
--- a/dom/media/fmp4/wmf/WMFVideoMFTManager.h
+++ b/dom/media/fmp4/wmf/WMFVideoMFTManager.h
@@ -15,17 +15,17 @@
 #include "mozilla/RefPtr.h"
 
 namespace mozilla {
 
 class DXVA2Manager;
 
 class WMFVideoMFTManager : public MFTManager {
 public:
-  WMFVideoMFTManager(const mp4_demuxer::VideoDecoderConfig& aConfig,
+  WMFVideoMFTManager(const VideoInfo& aConfig,
                      mozilla::layers::LayersBackend aLayersBackend,
                      mozilla::layers::ImageContainer* aImageContainer,
                      bool aDXVAEnabled);
   ~WMFVideoMFTManager();
 
   virtual TemporaryRef<MFTDecoder> Init() override;
 
   virtual HRESULT Input(MediaRawData* aSample) override;
--- a/dom/media/fmp4/wrappers/H264Converter.cpp
+++ b/dom/media/fmp4/wrappers/H264Converter.cpp
@@ -2,30 +2,25 @@
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "H264Converter.h"
 #include "ImageContainer.h"
 #include "MediaTaskQueue.h"
-#include "mp4_demuxer/DecoderData.h"
+#include "MediaInfo.h"
 #include "mp4_demuxer/AnnexB.h"
 #include "mp4_demuxer/H264.h"
 
 namespace mozilla
 {
 
-  // H264 AnnexB or AVCC handler
-#include "mp4_demuxer/DecoderData.h"
-#include "mp4_demuxer/AnnexB.h"
-#include "mp4_demuxer/H264.h"
-
 H264Converter::H264Converter(PlatformDecoderModule* aPDM,
-                             const mp4_demuxer::VideoDecoderConfig& aConfig,
+                             const VideoInfo& aConfig,
                              layers::LayersBackend aLayersBackend,
                              layers::ImageContainer* aImageContainer,
                              FlushableMediaTaskQueue* aVideoTaskQueue,
                              MediaDataDecoderCallback* aCallback)
   : mPDM(aPDM)
   , mCurrentConfig(aConfig)
   , mLayersBackend(aLayersBackend)
   , mImageContainer(aImageContainer)
@@ -74,17 +69,17 @@ H264Converter::Input(MediaRawData* aSamp
       // Ignore for the time being, the MediaRawData will be dropped.
       return NS_OK;
     }
   } else {
     rv = CheckForSPSChange(aSample);
   }
   NS_ENSURE_SUCCESS(rv, rv);
 
-  aSample->mExtraData = mCurrentConfig.extra_data;
+  aSample->mExtraData = mCurrentConfig.mExtraData;
 
   return mDecoder->Input(aSample);
 }
 
 nsresult
 H264Converter::Flush()
 {
   if (mDecoder) {
@@ -129,21 +124,21 @@ H264Converter::IsHardwareAccelerated() c
     return mDecoder->IsHardwareAccelerated();
   }
   return MediaDataDecoder::IsHardwareAccelerated();
 }
 
 nsresult
 H264Converter::CreateDecoder()
 {
-  if (mNeedAVCC && !mp4_demuxer::AnnexB::HasSPS(mCurrentConfig.extra_data)) {
+  if (mNeedAVCC && !mp4_demuxer::AnnexB::HasSPS(mCurrentConfig.mExtraData)) {
     // nothing found yet, will try again later
     return NS_ERROR_NOT_INITIALIZED;
   }
-  UpdateConfigFromExtraData(mCurrentConfig.extra_data);
+  UpdateConfigFromExtraData(mCurrentConfig.mExtraData);
 
   mDecoder = mPDM->CreateVideoDecoder(mCurrentConfig,
                                       mLayersBackend,
                                       mImageContainer,
                                       mVideoTaskQueue,
                                       mCallback);
   if (!mDecoder) {
     mLastError = NS_ERROR_FAILURE;
@@ -169,17 +164,17 @@ H264Converter::CreateDecoderAndInit(Medi
 
 nsresult
 H264Converter::CheckForSPSChange(MediaRawData* aSample)
 {
   nsRefPtr<DataBuffer> extra_data =
     mp4_demuxer::AnnexB::ExtractExtraData(aSample);
   if (!mp4_demuxer::AnnexB::HasSPS(extra_data) ||
       mp4_demuxer::AnnexB::CompareExtraData(extra_data,
-                                            mCurrentConfig.extra_data)) {
+                                            mCurrentConfig.mExtraData)) {
         return NS_OK;
       }
   if (!mNeedAVCC) {
     UpdateConfigFromExtraData(extra_data);
     mDecoder->ConfigurationChanged(mCurrentConfig);
     return NS_OK;
   }
   // The SPS has changed, signal to flush the current decoder and create a
@@ -191,25 +186,25 @@ H264Converter::CheckForSPSChange(MediaRa
 
 void
 H264Converter::UpdateConfigFromExtraData(DataBuffer* aExtraData)
 {
   mp4_demuxer::SPSData spsdata;
   if (mp4_demuxer::H264::DecodeSPSFromExtraData(aExtraData, spsdata) &&
       spsdata.pic_width > 0 && spsdata.pic_height > 0) {
     mp4_demuxer::H264::EnsureSPSIsSane(spsdata);
-    mCurrentConfig.image_width = spsdata.pic_width;
-    mCurrentConfig.image_height = spsdata.pic_height;
-    mCurrentConfig.display_width = spsdata.display_width;
-    mCurrentConfig.display_height = spsdata.display_height;
+    mCurrentConfig.mImage.width = spsdata.pic_width;
+    mCurrentConfig.mImage.height = spsdata.pic_height;
+    mCurrentConfig.mDisplay.width = spsdata.display_width;
+    mCurrentConfig.mDisplay.height = spsdata.display_height;
   }
-  mCurrentConfig.extra_data = aExtraData;
+  mCurrentConfig.mExtraData = aExtraData;
 }
 
 /* static */
 bool
-H264Converter::IsH264(const mp4_demuxer::TrackConfig& aConfig)
+H264Converter::IsH264(const TrackInfo& aConfig)
 {
-  return aConfig.mime_type.EqualsLiteral("video/avc") ||
-    aConfig.mime_type.EqualsLiteral("video/mp4");
+  return aConfig.mMimeType.EqualsLiteral("video/avc") ||
+    aConfig.mMimeType.EqualsLiteral("video/mp4");
 }
 
 } // namespace mozilla
--- a/dom/media/fmp4/wrappers/H264Converter.h
+++ b/dom/media/fmp4/wrappers/H264Converter.h
@@ -17,45 +17,45 @@ namespace mozilla {
 // provided in the init segment (e.g. AVC3 or Annex B)
 // H264Converter will monitor the input data, and will delay creation of the
 // MediaDataDecoder until a SPS and PPS NALs have been extracted.
 
 class H264Converter : public MediaDataDecoder {
 public:
 
   H264Converter(PlatformDecoderModule* aPDM,
-                const mp4_demuxer::VideoDecoderConfig& aConfig,
+                const VideoInfo& aConfig,
                 layers::LayersBackend aLayersBackend,
                 layers::ImageContainer* aImageContainer,
                 FlushableMediaTaskQueue* aVideoTaskQueue,
                 MediaDataDecoderCallback* aCallback);
   virtual ~H264Converter();
 
   virtual nsresult Init() override;
   virtual nsresult Input(MediaRawData* aSample) override;
   virtual nsresult Flush() override;
   virtual nsresult Drain() override;
   virtual nsresult Shutdown() override;
   virtual bool IsWaitingMediaResources() override;
   virtual bool IsHardwareAccelerated() const override;
 
   // Return true if mimetype is H.264.
-  static bool IsH264(const mp4_demuxer::TrackConfig& aConfig);
+  static bool IsH264(const TrackInfo& aConfig);
 
 private:
   // Will create the required MediaDataDecoder if need AVCC and we have a SPS NAL.
   // Returns NS_ERROR_FAILURE if error is permanent and can't be recovered and
   // will set mError accordingly.
   nsresult CreateDecoder();
   nsresult CreateDecoderAndInit(MediaRawData* aSample);
   nsresult CheckForSPSChange(MediaRawData* aSample);
   void UpdateConfigFromExtraData(DataBuffer* aExtraData);
 
   nsRefPtr<PlatformDecoderModule> mPDM;
-  mp4_demuxer::VideoDecoderConfig mCurrentConfig;
+  VideoInfo mCurrentConfig;
   layers::LayersBackend mLayersBackend;
   nsRefPtr<layers::ImageContainer> mImageContainer;
   nsRefPtr<FlushableMediaTaskQueue> mVideoTaskQueue;
   MediaDataDecoderCallback* mCallback;
   nsRefPtr<MediaDataDecoder> mDecoder;
   bool mNeedAVCC;
   nsresult mLastError;
 };
--- a/dom/media/gstreamer/GStreamerFormatHelper.cpp
+++ b/dom/media/gstreamer/GStreamerFormatHelper.cpp
@@ -274,17 +274,21 @@ static bool SupportsCaps(GstElementFacto
       continue;
     }
 
     GstCaps *caps = gst_static_caps_get(&templ->static_caps);
     if (!caps) {
       continue;
     }
 
-    if (gst_caps_can_intersect(gst_static_caps_get(&templ->static_caps), aCaps)) {
+    bool supported = gst_caps_can_intersect(caps, aCaps);
+
+    gst_caps_unref(caps);
+
+    if (supported) {
       return true;
     }
   }
 
   return false;
 }
 
 bool GStreamerFormatHelper::HaveElementsToProcessCaps(GstCaps* aCaps) {
@@ -302,21 +306,21 @@ bool GStreamerFormatHelper::HaveElements
     bool found = false;
     for (GList *elem = factories; elem; elem = elem->next) {
       if (SupportsCaps(GST_ELEMENT_FACTORY_CAST(elem->data), caps)) {
         found = true;
         break;
       }
     }
 
+    gst_caps_unref(caps);
+
     if (!found) {
       return false;
     }
-
-    gst_caps_unref(caps);
   }
 
   return true;
 }
 
 bool GStreamerFormatHelper::CanHandleContainerCaps(GstCaps* aCaps)
 {
   NS_ASSERTION(sLoadOK, "GStreamer library not linked");
--- a/dom/media/gstreamer/GStreamerReader.cpp
+++ b/dom/media/gstreamer/GStreamerReader.cpp
@@ -490,19 +490,23 @@ nsresult GStreamerReader::ReadMetadata(M
       LOG(PR_LOG_DEBUG, "have duration %" GST_TIME_FORMAT, GST_TIME_ARGS(duration));
       duration = GST_TIME_AS_USECONDS (duration);
       mDecoder->SetMediaDuration(duration);
     }
   }
 
   int n_video = 0, n_audio = 0;
   g_object_get(mPlayBin, "n-video", &n_video, "n-audio", &n_audio, nullptr);
-  mInfo.mVideo.mHasVideo = n_video != 0;
-  mInfo.mAudio.mHasAudio = n_audio != 0;
 
+  if (!n_video) {
+    mInfo.mVideo = VideoInfo();
+  }
+  if (!n_audio) {
+    mInfo.mAudio = AudioInfo();
+  }
   *aInfo = mInfo;
 
   *aTags = nullptr;
 
   // Watch the pipeline for fatal errors
 #if GST_VERSION_MAJOR >= 1
   gst_bus_set_sync_handler(mBus, GStreamerReader::ErrorCb, this, nullptr);
 #else
@@ -1048,17 +1052,17 @@ gboolean GStreamerReader::SeekData(GstAp
   } else {
     MOZ_ASSERT(aOffset == static_cast<guint64>(resource->Tell()));
   }
 
   return NS_SUCCEEDED(rv);
 }
 
 GstFlowReturn GStreamerReader::NewPrerollCb(GstAppSink* aSink,
-                                              gpointer aUserData)
+                                            gpointer aUserData)
 {
   GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
 
   if (aSink == reader->mVideoAppSink)
     reader->VideoPreroll();
   else
     reader->AudioPreroll();
   return GST_FLOW_OK;
@@ -1077,17 +1081,16 @@ void GStreamerReader::AudioPreroll()
   GstStructure* s = gst_caps_get_structure(caps, 0);
   mInfo.mAudio.mRate = mInfo.mAudio.mChannels = 0;
   gst_structure_get_int(s, "rate", (gint*) &mInfo.mAudio.mRate);
   gst_structure_get_int(s, "channels", (gint*) &mInfo.mAudio.mChannels);
   NS_ASSERTION(mInfo.mAudio.mRate != 0, ("audio rate is zero"));
   NS_ASSERTION(mInfo.mAudio.mChannels != 0, ("audio channels is zero"));
   NS_ASSERTION(mInfo.mAudio.mChannels > 0 && mInfo.mAudio.mChannels <= MAX_CHANNELS,
       "invalid audio channels number");
-  mInfo.mAudio.mHasAudio = true;
   gst_caps_unref(caps);
   gst_object_unref(sinkpad);
 }
 
 void GStreamerReader::VideoPreroll()
 {
   /* The first video buffer has reached the video sink. Get width and height */
   LOG(PR_LOG_DEBUG, "Video preroll");
@@ -1118,17 +1121,16 @@ void GStreamerReader::VideoPreroll()
   nsIntSize displaySize = nsIntSize(mPicture.width, mPicture.height);
   ScaleDisplayByAspectRatio(displaySize, float(PARNumerator) / float(PARDenominator));
 
   // If video frame size is overflow, stop playing.
   if (IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
     GstStructure* structure = gst_caps_get_structure(caps, 0);
     gst_structure_get_fraction(structure, "framerate", &fpsNum, &fpsDen);
     mInfo.mVideo.mDisplay = displaySize;
-    mInfo.mVideo.mHasVideo = true;
   } else {
     LOG(PR_LOG_DEBUG, "invalid video region");
     Eos();
   }
   gst_caps_unref(caps);
   gst_object_unref(sinkpad);
 }
 
--- a/dom/media/gtest/TestMP4Demuxer.cpp
+++ b/dom/media/gtest/TestMP4Demuxer.cpp
@@ -59,17 +59,17 @@ TEST(MP4Demuxer, Seek)
     }
     d->SeekVideo(samples[i]->mTime);
     sample = d->DemuxVideoSample();
     EXPECT_EQ(keyFrame, sample->mTimecode);
   }
 }
 
 static nsCString
-ToCryptoString(CryptoSample& aCrypto)
+ToCryptoString(const CryptoSample& aCrypto)
 {
   nsCString res;
   if (aCrypto.mValid) {
     res.AppendPrintf("%d %d ", aCrypto.mMode, aCrypto.mIVSize);
     for (size_t i = 0; i < aCrypto.mKeyId.Length(); i++) {
       res.AppendPrintf("%02x", aCrypto.mKeyId[i]);
     }
     res.Append(" ");
--- a/dom/media/ogg/OggReader.cpp
+++ b/dom/media/ogg/OggReader.cpp
@@ -106,26 +106,30 @@ static const nsString GetKind(const nsCS
   } else if (aRole.Find("video/captioned") != -1) {
     return NS_LITERAL_STRING("captions");
   } else if (aRole.Find("video/subtitled") != -1) {
     return NS_LITERAL_STRING("subtitles");
   }
   return EmptyString();
 }
 
-static void InitTrack(MessageField* aMsgInfo, TrackInfo* aInfo, bool aEnable)
+static void InitTrack(TrackInfo::TrackType aTrackType,
+                      MessageField* aMsgInfo,
+                      TrackInfo* aInfo,
+                      bool aEnable)
 {
   MOZ_ASSERT(aMsgInfo);
   MOZ_ASSERT(aInfo);
 
   nsCString* sName = aMsgInfo->mValuesStore.Get(eName);
   nsCString* sRole = aMsgInfo->mValuesStore.Get(eRole);
   nsCString* sTitle = aMsgInfo->mValuesStore.Get(eTitle);
   nsCString* sLanguage = aMsgInfo->mValuesStore.Get(eLanguage);
-  aInfo->Init(sName? NS_ConvertUTF8toUTF16(*sName):EmptyString(),
+  aInfo->Init(aTrackType,
+              sName? NS_ConvertUTF8toUTF16(*sName):EmptyString(),
               sRole? GetKind(*sRole):EmptyString(),
               sTitle? NS_ConvertUTF8toUTF16(*sTitle):EmptyString(),
               sLanguage? NS_ConvertUTF8toUTF16(*sLanguage):EmptyString(),
               aEnable);
 }
 
 OggReader::OggReader(AbstractMediaDecoder* aDecoder)
   : MediaDecoderReader(aDecoder),
@@ -317,54 +321,62 @@ void OggReader::SetupMediaTracksInfo(con
 
     if (codecState->GetType() == OggCodecState::TYPE_THEORA) {
       TheoraState* theoraState = static_cast<TheoraState*>(codecState);
       if (!(mTheoraState && mTheoraState->mSerial == theoraState->mSerial)) {
         continue;
       }
 
       if (msgInfo) {
-        InitTrack(msgInfo, &mInfo.mVideo.mTrackInfo, mTheoraState == theoraState);
+        InitTrack(TrackInfo::kVideoTrack,
+                  msgInfo,
+                  &mInfo.mVideo,
+                  mTheoraState == theoraState);
       }
 
       nsIntRect picture = nsIntRect(theoraState->mInfo.pic_x,
                                     theoraState->mInfo.pic_y,
                                     theoraState->mInfo.pic_width,
                                     theoraState->mInfo.pic_height);
       nsIntSize displaySize = nsIntSize(theoraState->mInfo.pic_width,
                                         theoraState->mInfo.pic_height);
       nsIntSize frameSize(theoraState->mInfo.frame_width,
                           theoraState->mInfo.frame_height);
       ScaleDisplayByAspectRatio(displaySize, theoraState->mPixelAspectRatio);
-      mInfo.mVideo.mDisplay = displaySize;
-      mInfo.mVideo.mHasVideo = IsValidVideoRegion(frameSize, picture, displaySize)? true:false;
+      if (IsValidVideoRegion(frameSize, picture, displaySize)) {
+        mInfo.mVideo.mDisplay = displaySize;
+      }
     } else if (codecState->GetType() == OggCodecState::TYPE_VORBIS) {
       VorbisState* vorbisState = static_cast<VorbisState*>(codecState);
       if (!(mVorbisState && mVorbisState->mSerial == vorbisState->mSerial)) {
         continue;
       }
 
       if (msgInfo) {
-        InitTrack(msgInfo, &mInfo.mAudio.mTrackInfo, mVorbisState == vorbisState);
+        InitTrack(TrackInfo::kAudioTrack,
+                  msgInfo,
+                  &mInfo.mAudio,
+                  mVorbisState == vorbisState);
       }
 
-      mInfo.mAudio.mHasAudio = true;
       mInfo.mAudio.mRate = vorbisState->mInfo.rate;
       mInfo.mAudio.mChannels = vorbisState->mInfo.channels;
     } else if (codecState->GetType() == OggCodecState::TYPE_OPUS) {
       OpusState* opusState = static_cast<OpusState*>(codecState);
       if (!(mOpusState && mOpusState->mSerial == opusState->mSerial)) {
         continue;
       }
 
       if (msgInfo) {
-        InitTrack(msgInfo, &mInfo.mAudio.mTrackInfo, mOpusState == opusState);
+        InitTrack(TrackInfo::kAudioTrack,
+                  msgInfo,
+                  &mInfo.mAudio,
+                  mOpusState == opusState);
       }
 
-      mInfo.mAudio.mHasAudio = true;
       mInfo.mAudio.mRate = opusState->mRate;
       mInfo.mAudio.mChannels = opusState->mChannels;
     }
   }
 }
 
 nsresult OggReader::ReadMetadata(MediaInfo* aInfo,
                                  MetadataTags** aTags)
@@ -779,46 +791,44 @@ bool OggReader::ReadOggChain()
   if ((newVorbisState && ReadHeaders(newVorbisState)) &&
       (mVorbisState->mInfo.rate == newVorbisState->mInfo.rate) &&
       (mVorbisState->mInfo.channels == newVorbisState->mInfo.channels)) {
 
     SetupTargetVorbis(newVorbisState);
     LOG(PR_LOG_DEBUG, ("New vorbis ogg link, serial=%d\n", mVorbisSerial));
 
     if (msgInfo) {
-      InitTrack(msgInfo, &mInfo.mAudio.mTrackInfo, true);
+      InitTrack(TrackInfo::kAudioTrack, msgInfo, &mInfo.mAudio, true);
     }
     mInfo.mAudio.mRate = newVorbisState->mInfo.rate;
     mInfo.mAudio.mChannels = newVorbisState->mInfo.channels;
 
     chained = true;
     tags = newVorbisState->GetTags();
   }
 
   if ((newOpusState && ReadHeaders(newOpusState)) &&
       (mOpusState->mRate == newOpusState->mRate) &&
       (mOpusState->mChannels == newOpusState->mChannels)) {
 
     SetupTargetOpus(newOpusState);
 
     if (msgInfo) {
-      InitTrack(msgInfo, &mInfo.mAudio.mTrackInfo, true);
+      InitTrack(TrackInfo::kAudioTrack, msgInfo, &mInfo.mAudio, true);
     }
     mInfo.mAudio.mRate = newOpusState->mRate;
     mInfo.mAudio.mChannels = newOpusState->mChannels;
 
     chained = true;
     tags = newOpusState->GetTags();
   }
 
   if (chained) {
     SetChained(true);
     {
-      mInfo.mAudio.mHasAudio = HasAudio();
-      mInfo.mVideo.mHasVideo = HasVideo();
       nsAutoPtr<MediaInfo> info(new MediaInfo());
       *info = mInfo;
       ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
       mDecoder->QueueMetadata((mDecodedAudioFrames * USECS_PER_S) / mInfo.mAudio.mRate,
                               info, tags);
     }
     return true;
   }
--- a/dom/media/omx/MediaCodecReader.cpp
+++ b/dom/media/omx/MediaCodecReader.cpp
@@ -495,23 +495,23 @@ MediaCodecReader::DecodeVideoFrameTask(i
   } else if (VideoQueue().AtEndOfStream()) {
     mVideoTrack.mVideoPromise.Reject(END_OF_STREAM, __func__);
   }
 }
 
 bool
 MediaCodecReader::HasAudio()
 {
-  return mInfo.mAudio.mHasAudio;
+  return mInfo.HasAudio();
 }
 
 bool
 MediaCodecReader::HasVideo()
 {
-  return mInfo.mVideo.mHasVideo;
+  return mInfo.HasVideo();
 }
 
 void
 MediaCodecReader::NotifyDataArrived(const char* aBuffer,
                                     uint32_t aLength,
                                     int64_t aOffset)
 {
   MonitorAutoLock monLock(mParserMonitor);
@@ -1551,17 +1551,16 @@ MediaCodecReader::UpdateAudioInfo()
   int32_t codec_sample_rate = 0;
   if (!audioCodecFormat->findString("mime", &codec_mime) ||
       !audioCodecFormat->findInt32("channel-count", &codec_channel_count) ||
       !audioCodecFormat->findInt32("sample-rate", &codec_sample_rate)) {
     return false;
   }
 
   // Update AudioInfo
-  mInfo.mAudio.mHasAudio = true;
   mInfo.mAudio.mChannels = codec_channel_count;
   mInfo.mAudio.mRate = codec_sample_rate;
 
   return true;
 }
 
 bool
 MediaCodecReader::UpdateVideoInfo()
@@ -1658,17 +1657,16 @@ MediaCodecReader::UpdateVideoInfo()
                               mVideoTrack.mFrameSize.height;
     relative_picture_rect.width = (picture_rect.width * mVideoTrack.mWidth) /
                                   mVideoTrack.mFrameSize.width;
     relative_picture_rect.height = (picture_rect.height * mVideoTrack.mHeight) /
                                    mVideoTrack.mFrameSize.height;
   }
 
   // Update VideoInfo
-  mInfo.mVideo.mHasVideo = true;
   mVideoTrack.mPictureRect = picture_rect;
   mInfo.mVideo.mDisplay = display_size;
   mVideoTrack.mRelativePictureRect = relative_picture_rect;
 
   return true;
 }
 
 status_t
--- a/dom/media/omx/MediaOmxReader.cpp
+++ b/dom/media/omx/MediaOmxReader.cpp
@@ -319,32 +319,32 @@ nsresult MediaOmxReader::ReadMetadata(Me
     // that our video frame creation code doesn't overflow.
     nsIntSize displaySize(displayWidth, displayHeight);
     nsIntSize frameSize(width, height);
     if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
       return NS_ERROR_FAILURE;
     }
 
     // Video track's frame sizes will not overflow. Activate the video track.
-    mHasVideo = mInfo.mVideo.mHasVideo = true;
+    mHasVideo = true;
     mInfo.mVideo.mDisplay = displaySize;
     mPicture = pictureRect;
     mInitialFrame = frameSize;
     VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
     if (container) {
       container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height),
                                  nullptr,
                                  mozilla::TimeStamp::Now());
     }
   }
 
   if (mOmxDecoder->HasAudio()) {
     int32_t numChannels, sampleRate;
     mOmxDecoder->GetAudioParameters(&numChannels, &sampleRate);
-    mHasAudio = mInfo.mAudio.mHasAudio = true;
+    mHasAudio = true;
     mInfo.mAudio.mChannels = numChannels;
     mInfo.mAudio.mRate = sampleRate;
   }
 
  *aInfo = mInfo;
 
 #ifdef MOZ_AUDIO_OFFLOAD
   CheckAudioOffload();
--- a/dom/media/raw/RawReader.cpp
+++ b/dom/media/raw/RawReader.cpp
@@ -70,17 +70,16 @@ nsresult RawReader::ReadMetadata(MediaIn
   ScaleDisplayByAspectRatio(display, pixelAspectRatio);
   mPicture = nsIntRect(0, 0, mMetadata.frameWidth, mMetadata.frameHeight);
   nsIntSize frameSize(mMetadata.frameWidth, mMetadata.frameHeight);
   if (!IsValidVideoRegion(frameSize, mPicture, display)) {
     // Video track's frame sizes will overflow. Fail.
     return NS_ERROR_FAILURE;
   }
 
-  mInfo.mVideo.mHasVideo = true;
   mInfo.mVideo.mDisplay = display;
 
   mFrameRate = static_cast<float>(mMetadata.framerateNumerator) /
                mMetadata.framerateDenominator;
 
   // Make some sanity checks
   if (mFrameRate > 45 ||
       mFrameRate == 0 ||
--- a/dom/media/test/eme.js
+++ b/dom/media/test/eme.js
@@ -48,36 +48,16 @@ function HexToBase64(hex)
 {
   var bin = "";
   for (var i = 0; i < hex.length; i += 2) {
     bin += String.fromCharCode(parseInt(hex.substr(i, 2), 16));
   }
   return window.btoa(bin).replace(/=/g, "").replace(/\+/g, "-").replace(/\//g, "_");
 }
 
-function TimeStamp(token) {
-  function pad(x) {
-    return (x < 10) ? "0" + x : x;
-  }
-  var now = new Date();
-  var ms = now.getMilliseconds();
-  var time = "[" +
-             pad(now.getHours()) + ":" +
-             pad(now.getMinutes()) + ":" +
-             pad(now.getSeconds()) + "." +
-             ms +
-             "]" +
-             (ms < 10 ? "  " : (ms < 100 ? " " : ""));
-  return token ? (time + " " + token) : time;
-}
-
-function Log(token, msg) {
-  info(TimeStamp(token) + " " + msg);
-}
-
 function TimeRangesToString(trs)
 {
   var l = trs.length;
   if (l === 0) { return "-"; }
   var s = "";
   var i = 0;
   for (;;) {
     s += trs.start(i) + "-" + trs.end(i);
--- a/dom/media/test/manifest.js
+++ b/dom/media/test/manifest.js
@@ -107,16 +107,20 @@ var gMediaRecorderTests = [
   { name:"detodos.opus", type:"audio/ogg; codecs=opus", duration:2.9135 }
 ];
 
 // These are files that we want to make sure we can play through.  We can
 // also check metadata.  Put files of the same type together in this list so if
 // something crashes we have some idea of which backend is responsible.
 // Used by test_playback, which expects no error event and one ended event.
 var gPlayTests = [
+  // Test playback of a WebM file with vp9 video
+  //{ name:"vp9.webm", type:"video/webm", duration:4 },
+  { name:"vp9cake.webm", type:"video/webm", duration:7.966 },
+
   // 8-bit samples
   { name:"r11025_u8_c1.wav", type:"audio/x-wav", duration:1.0 },
   // 8-bit samples, file is truncated
   { name:"r11025_u8_c1_trunc.wav", type:"audio/x-wav", duration:1.8 },
   // file has trailing non-PCM data
   { name:"r11025_s16_c1_trailing.wav", type:"audio/x-wav", duration:1.0 },
   // file with list chunk
   { name:"r16000_u8_c1_list.wav", type:"audio/x-wav", duration:4.2 },
@@ -221,20 +225,16 @@ var gPlayTests = [
   // A truncated VBR MP3 with just enough frames to keep most decoders happy.
   // The Xing header reports the length of the file to be around 10 seconds, but
   // there is really only one second worth of data. We want MP3FrameParser to
   // trust the header, so this should be reported as 10 seconds.
   { name:"vbr-head.mp3", type:"audio/mpeg", duration:10.00 },
 
   // Invalid file
   { name:"bogus.duh", type:"bogus/duh", duration:Number.NaN },
-
-  // Test playback of a WebM file with vp9 video
-  //{ name:"vp9.webm", type:"video/webm", duration:4 },
-  { name:"vp9cake.webm", type:"video/webm", duration:7.966 }
 ];
 
 // A file for each type we can support.
 var gSnifferTests = [
   { name:"big.wav", type:"audio/x-wav", duration:9.278981, size:102444 },
   { name:"320x240.ogv", type:"video/ogg", width:320, height:240, duration:0.233, size:28942 },
   { name:"seek.webm", type:"video/webm", duration:3.966, size:215529 },
   { name:"gizmo.mp4", type:"video/mp4", duration:5.56, size:383631 },
@@ -829,16 +829,36 @@ function once(target, name, cb) {
     });
   });
   if (cb) {
     p.then(cb);
   }
   return p;
 }
 
+function TimeStamp(token) {
+  function pad(x) {
+    return (x < 10) ? "0" + x : x;
+  }
+  var now = new Date();
+  var ms = now.getMilliseconds();
+  var time = "[" +
+             pad(now.getHours()) + ":" +
+             pad(now.getMinutes()) + ":" +
+             pad(now.getSeconds()) + "." +
+             ms +
+             "]" +
+             (ms < 10 ? "  " : (ms < 100 ? " " : ""));
+  return token ? (time + " " + token) : time;
+}
+
+function Log(token, msg) {
+  info(TimeStamp(token) + " " + msg);
+}
+
 // Number of tests to run in parallel.
 var PARALLEL_TESTS = 2;
 
 // Prefs to set before running tests.  Use this to improve coverage of
 // conditions that might not otherwise be encountered on the test data.
 var gTestPrefs = [
   ['media.recorder.max_memory', 1024],
   ["media.preload.default", 2], // default preload = metadata
--- a/dom/media/test/test_playback.html
+++ b/dom/media/test/test_playback.html
@@ -5,19 +5,25 @@
   <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
   <script type="text/javascript" src="manifest.js"></script>
 </head>
 <body>
 <pre id="test">
 <script class="testbody" type="text/javascript">
 
-//longer timeout for sometimes B2G emulator runs very slowly
-if (SpecialPowers.Services.appinfo.name == "B2G") {
+function isSlowPlatform() {
+  return SpecialPowers.Services.appinfo.name == "B2G" ||
+         navigator.userAgent.indexOf("Mobile") != -1 && androidVersion == 10;
+}
+
+// longer timeout for slow platforms
+if (isSlowPlatform()) {
   SimpleTest.requestLongerTimeout(3);
+  SimpleTest.requestCompleteLog();
 }
 
 var manager = new MediaTestManager;
 
 function startTest(test, token) {
   var v = document.createElement('video');
   v.preload = "metadata";
   v.token = token;
@@ -112,24 +118,24 @@ function startTest(test, token) {
 
   // We should get "ended" and "suspend" events for every resource
   v.addEventListener("ended", checkEnded, false);
   v.addEventListener("suspend", checkSuspended, false);
 
   document.body.appendChild(v);
   v.play();
 
-  if (test.name == "vp9cake.webm") {
-    // Log events for debugging.
+  // Debug timeouts on slow platforms.
+  if (isSlowPlatform()) {
     var events = ["suspend", "play", "canplay", "canplaythrough", "loadstart", "loadedmetadata",
                   "loadeddata", "playing", "ended", "error", "stalled", "emptied", "abort",
                   "waiting", "pause"];
     function logEvent(e) {
       var v = e.target;
-      info(e.target.token + ": got " + e.type);
+      Log(e.target.token, "got " + e.type);
     }
     events.forEach(function(e) {
       v.addEventListener(e, logEvent, false);
     });
   }
 }
 
 manager.runTests(gPlayTests, startTest);
--- a/dom/media/wave/WaveReader.cpp
+++ b/dom/media/wave/WaveReader.cpp
@@ -137,17 +137,16 @@ nsresult WaveReader::ReadMetadata(MediaI
 
   nsAutoPtr<dom::HTMLMediaElement::MetadataTags> tags;
 
   bool loadAllChunks = LoadAllChunks(tags);
   if (!loadAllChunks) {
     return NS_ERROR_FAILURE;
   }
 
-  mInfo.mAudio.mHasAudio = true;
   mInfo.mAudio.mRate = mSampleRate;
   mInfo.mAudio.mChannels = mChannels;
 
   *aInfo = mInfo;
 
   *aTags = tags.forget();
 
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
--- a/dom/media/webm/IntelWebMVideoDecoder.cpp
+++ b/dom/media/webm/IntelWebMVideoDecoder.cpp
@@ -114,34 +114,34 @@ IntelWebMVideoDecoder::IsSupportedVideoM
 nsresult
 IntelWebMVideoDecoder::Init(unsigned int aWidth, unsigned int aHeight)
 {
   mPlatform = PlatformDecoderModule::Create();
   if (!mPlatform) {
     return NS_ERROR_FAILURE;
   }
 
-  mDecoderConfig = new VideoDecoderConfig();
-  mDecoderConfig->duration = 0;
-  mDecoderConfig->display_width = aWidth;
-  mDecoderConfig->display_height = aHeight;
+  mDecoderConfig = new VideoInfo();
+  mDecoderConfig->mDuration = 0;
+  mDecoderConfig->mDisplay.width = aWidth;
+  mDecoderConfig->mDisplay.height = aHeight;
 
   switch (mReader->GetVideoCodec()) {
   case NESTEGG_CODEC_VP8:
-    mDecoderConfig->mime_type = "video/webm; codecs=vp8";
+    mDecoderConfig->mMimeType = "video/webm; codecs=vp8";
     break;
   case NESTEGG_CODEC_VP9:
-    mDecoderConfig->mime_type = "video/webm; codecs=vp9";
+    mDecoderConfig->mMimeType = "video/webm; codecs=vp9";
     break;
   default:
     return NS_ERROR_FAILURE;
   }
 
-  const VideoDecoderConfig& video = *mDecoderConfig;
-  if (!IsSupportedVideoMimeType(video.mime_type)) {
+  const VideoInfo& video = *mDecoderConfig;
+  if (!IsSupportedVideoMimeType(video.mMimeType)) {
     return NS_ERROR_FAILURE;
   }
   mMediaDataDecoder =
     mPlatform->CreateDecoder(video,
                              mTaskQueue,
                              this,
                              mReader->GetLayersBackendType(),
                              mReader->GetDecoder()->GetImageContainer());
--- a/dom/media/webm/IntelWebMVideoDecoder.h
+++ b/dom/media/webm/IntelWebMVideoDecoder.h
@@ -9,17 +9,17 @@
 #include <stdint.h>
 
 #include "WebMReader.h"
 #include "nsAutoPtr.h"
 #include "PlatformDecoderModule.h"
 #include "mozilla/Monitor.h"
 
 #include "mp4_demuxer/mp4_demuxer.h"
-#include "mp4_demuxer/DecoderData.h"
+#include "MediaInfo.h"
 #include "MediaData.h"
 
 class MediaTaskQueue;
 
 namespace mozilla {
 
 class VP8Sample;
 
@@ -65,17 +65,17 @@ private:
 
   // TaskQueue on which decoder can choose to decode.
   // Only non-null up until the decoder is created.
   nsRefPtr<FlushableMediaTaskQueue> mTaskQueue;
 
   // Monitor that protects all non-threadsafe state; the primitives
   // that follow.
   Monitor mMonitor;
-  nsAutoPtr<mp4_demuxer::VideoDecoderConfig> mDecoderConfig;
+  nsAutoPtr<VideoInfo> mDecoderConfig;
 
   VP8SampleQueue mSampleQueue;
   nsRefPtr<VP8Sample> mQueuedVideoSample;
   uint64_t mNumSamplesInput;
   uint64_t mNumSamplesOutput;
   uint64_t mLastReportedNumDecodedFrames;
   uint32_t mDecodeAhead;
 
--- a/dom/media/webm/WebMReader.cpp
+++ b/dom/media/webm/WebMReader.cpp
@@ -428,17 +428,16 @@ nsresult WebMReader::ReadMetadata(MediaI
       nsIntSize frameSize(params.width, params.height);
       if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
         // Video track's frame sizes will overflow. Ignore the video track.
         continue;
       }
 
       mVideoTrack = track;
       mHasVideo = true;
-      mInfo.mVideo.mHasVideo = true;
 
       mInfo.mVideo.mDisplay = displaySize;
       mPicture = pictureRect;
       mInitialFrame = frameSize;
 
       switch (params.stereo_mode) {
       case NESTEGG_VIDEO_MONO:
         mInfo.mVideo.mStereoMode = StereoMode::MONO;
@@ -461,17 +460,16 @@ nsresult WebMReader::ReadMetadata(MediaI
       r = nestegg_track_audio_params(mContext, track, &params);
       if (r == -1) {
         Cleanup();
         return NS_ERROR_FAILURE;
       }
 
       mAudioTrack = track;
       mHasAudio = true;
-      mInfo.mAudio.mHasAudio = true;
       mAudioCodec = nestegg_track_codec_id(mContext, track);
       mCodecDelay = params.codec_delay / NS_PER_USEC;
 
       if (mAudioCodec == NESTEGG_CODEC_VORBIS) {
         // Get the Vorbis header data
         unsigned int nheaders = 0;
         r = nestegg_track_codec_data_count(mContext, track, &nheaders);
         if (r == -1 || nheaders != 3) {
--- a/dom/media/wmf/DXVA2Manager.cpp
+++ b/dom/media/wmf/DXVA2Manager.cpp
@@ -1,25 +1,50 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "DXVA2Manager.h"
+#include <d3d11.h>
 #include "nsThreadUtils.h"
 #include "ImageContainer.h"
+#include "gfxWindowsPlatform.h"
 #include "D3D9SurfaceImage.h"
+#include "mozilla/layers/D3D11ShareHandleImage.h"
 #include "mozilla/Preferences.h"
+#include "mfapi.h"
+#include "MFTDecoder.h"
+
+const CLSID CLSID_VideoProcessorMFT =
+{
+  0x88753b26,
+  0x5b24,
+  0x49bd,
+  { 0xb2, 0xe7, 0xc, 0x44, 0x5c, 0x78, 0xc9, 0x82 }
+};
+
+const GUID MF_XVP_PLAYBACK_MODE =
+{
+  0x3c5d293f,
+  0xad67,
+  0x4e29,
+  { 0xaf, 0x12, 0xcf, 0x3e, 0x23, 0x8a, 0xcc, 0xe9 }
+};
+
+DEFINE_GUID(MF_LOW_LATENCY,
+  0x9c27891a, 0xed7a, 0x40e1, 0x88, 0xe8, 0xb2, 0x27, 0x27, 0xa0, 0x24, 0xee);
 
 namespace mozilla {
 
 using layers::Image;
 using layers::ImageContainer;
 using layers::D3D9SurfaceImage;
+using layers::D3D11ShareHandleImage;
 
 class D3D9DXVA2Manager : public DXVA2Manager
 {
 public:
   D3D9DXVA2Manager();
   virtual ~D3D9DXVA2Manager();
 
   HRESULT Init();
@@ -166,17 +191,17 @@ D3D9DXVA2Manager::CopyToImage(IMFSample*
 }
 
 // Count of the number of DXVAManager's we've created. This is also the
 // number of videos we're decoding with DXVA. Use on main thread only.
 static uint32_t sDXVAVideosCount = 0;
 
 /* static */
 DXVA2Manager*
-DXVA2Manager::Create()
+DXVA2Manager::CreateD3D9DXVA()
 {
   MOZ_ASSERT(NS_IsMainThread());
   HRESULT hr;
 
   // DXVA processing takes up a lot of GPU resources, so limit the number of
   // videos we use DXVA with at any one time.
   const uint32_t dxvaLimit =
     Preferences::GetInt("media.windows-media-foundation.max-dxva-videos", 8);
@@ -189,16 +214,263 @@ DXVA2Manager::Create()
   if (SUCCEEDED(hr)) {
     return d3d9Manager.forget();
   }
 
   // No hardware accelerated video decoding. :(
   return nullptr;
 }
 
+class D3D11DXVA2Manager : public DXVA2Manager
+{
+public:
+  D3D11DXVA2Manager();
+  virtual ~D3D11DXVA2Manager();
+
+  HRESULT Init();
+
+  IUnknown* GetDXVADeviceManager() override;
+
+  // Copies a region (aRegion) of the video frame stored in aVideoSample
+  // into an image which is returned by aOutImage.
+  HRESULT CopyToImage(IMFSample* aVideoSample,
+                      const nsIntRect& aRegion,
+                      ImageContainer* aContainer,
+                      Image** aOutImage) override;
+
+  virtual HRESULT ConfigureForSize(uint32_t aWidth, uint32_t aHeight) override;
+
+private:
+  HRESULT CreateFormatConverter();
+
+  HRESULT CreateOutputSample(RefPtr<IMFSample>& aSample,
+                             RefPtr<ID3D11Texture2D>& aTexture);
+
+  RefPtr<ID3D11Device> mDevice;
+  RefPtr<ID3D11DeviceContext> mContext;
+  RefPtr<IMFDXGIDeviceManager> mDXGIDeviceManager;
+  RefPtr<MFTDecoder> mTransform;
+  uint32_t mWidth;
+  uint32_t mHeight;
+  UINT mDeviceManagerToken;
+};
+
+D3D11DXVA2Manager::D3D11DXVA2Manager()
+  : mWidth(0)
+  , mHeight(0)
+  , mDeviceManagerToken(0)
+{
+}
+
+D3D11DXVA2Manager::~D3D11DXVA2Manager()
+{
+}
+
+IUnknown*
+D3D11DXVA2Manager::GetDXVADeviceManager()
+{
+  MutexAutoLock lock(mLock);
+  return mDXGIDeviceManager;
+}
+
+HRESULT
+D3D11DXVA2Manager::Init()
+{
+  HRESULT hr;
+
+  mDevice = gfxWindowsPlatform::GetPlatform()->CreateD3D11DecoderDevice();
+  NS_ENSURE_TRUE(mDevice, E_FAIL);
+
+  mDevice->GetImmediateContext(byRef(mContext));
+  NS_ENSURE_TRUE(mContext, E_FAIL);
+
+  hr = wmf::MFCreateDXGIDeviceManager(&mDeviceManagerToken, byRef(mDXGIDeviceManager));
+  NS_ENSURE_TRUE(SUCCEEDED(hr),hr);
+
+  hr = mDXGIDeviceManager->ResetDevice(mDevice, mDeviceManagerToken);
+  NS_ENSURE_TRUE(SUCCEEDED(hr),hr);
+
+  mTransform = new MFTDecoder();
+  hr = mTransform->Create(CLSID_VideoProcessorMFT);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = mTransform->SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER, ULONG_PTR(mDXGIDeviceManager.get()));
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  return S_OK;
+}
+
+HRESULT
+D3D11DXVA2Manager::CreateOutputSample(RefPtr<IMFSample>& aSample, RefPtr<ID3D11Texture2D>& aTexture)
+{
+  RefPtr<IMFSample> sample;
+  HRESULT hr = wmf::MFCreateSample(byRef(sample));
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  D3D11_TEXTURE2D_DESC desc;
+  desc.Width = mWidth;
+  desc.Height = mHeight;
+  desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
+  desc.MipLevels = 1;
+  desc.ArraySize = 1;
+  desc.SampleDesc.Count = 1;
+  desc.SampleDesc.Quality = 0;
+  desc.Usage = D3D11_USAGE_DEFAULT;
+  desc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
+  desc.CPUAccessFlags = 0;
+  desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
+
+  RefPtr<ID3D11Texture2D> texture;
+  hr = mDevice->CreateTexture2D(&desc, nullptr, byRef(texture));
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  RefPtr<IMFMediaBuffer> buffer;
+  hr = wmf::MFCreateDXGISurfaceBuffer(__uuidof(ID3D11Texture2D), texture, 0, FALSE, byRef(buffer));
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  sample->AddBuffer(buffer);
+
+  aSample = sample;
+  aTexture = texture;
+  return S_OK;
+}
+
+HRESULT
+D3D11DXVA2Manager::CopyToImage(IMFSample* aVideoSample,
+                               const nsIntRect& aRegion,
+                               ImageContainer* aContainer,
+                               Image** aOutImage)
+{
+  NS_ENSURE_TRUE(aVideoSample, E_POINTER);
+  NS_ENSURE_TRUE(aContainer, E_POINTER);
+  NS_ENSURE_TRUE(aOutImage, E_POINTER);
+
+  // Our video frame is stored in a non-sharable ID3D11Texture2D. We need
+  // to create a copy of that frame as a sharable resource, save its share
+  // handle, and put that handle into the rendering pipeline.
+
+  HRESULT hr = mTransform->Input(aVideoSample);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  RefPtr<IMFSample> sample;
+  RefPtr<ID3D11Texture2D> texture;
+  hr = CreateOutputSample(sample, texture);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  RefPtr<IDXGIKeyedMutex> keyedMutex;
+  hr = texture->QueryInterface(static_cast<IDXGIKeyedMutex**>(byRef(keyedMutex)));
+  NS_ENSURE_TRUE(SUCCEEDED(hr) && keyedMutex, hr);
+
+  hr = keyedMutex->AcquireSync(0, INFINITE);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = mTransform->Output(&sample);
+
+  keyedMutex->ReleaseSync(0);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  ImageFormat format = ImageFormat::D3D11_SHARE_HANDLE_TEXTURE;
+  nsRefPtr<Image> image(aContainer->CreateImage(format));
+  NS_ENSURE_TRUE(image, E_FAIL);
+  NS_ASSERTION(image->GetFormat() == ImageFormat::D3D11_SHARE_HANDLE_TEXTURE,
+               "Wrong format?");
+
+  D3D11ShareHandleImage* videoImage = static_cast<D3D11ShareHandleImage*>(image.get());
+  hr = videoImage->SetData(D3D11ShareHandleImage::Data(texture, mDevice, mContext, aRegion));
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  image.forget(aOutImage);
+
+  return S_OK;
+}
+
+HRESULT ConfigureOutput(IMFMediaType* aOutput, void* aData)
+{
+  HRESULT hr = aOutput->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = aOutput->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  gfx::IntSize* size = reinterpret_cast<gfx::IntSize*>(aData);
+  hr = MFSetAttributeSize(aOutput, MF_MT_FRAME_SIZE, size->width, size->height);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  return S_OK;
+}
+
+HRESULT
+D3D11DXVA2Manager::ConfigureForSize(uint32_t aWidth, uint32_t aHeight)
+{
+  mWidth = aWidth;
+  mHeight = aHeight;
+
+  RefPtr<IMFMediaType> inputType;
+  HRESULT hr = wmf::MFCreateMediaType(byRef(inputType));
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = inputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = inputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = inputType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = inputType->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  RefPtr<IMFAttributes> attr = mTransform->GetAttributes();
+
+  hr = attr->SetUINT32(MF_XVP_PLAYBACK_MODE, TRUE);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = attr->SetUINT32(MF_LOW_LATENCY, FALSE);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = MFSetAttributeSize(inputType, MF_MT_FRAME_SIZE, aWidth, aHeight);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  RefPtr<IMFMediaType> outputType;
+  hr = wmf::MFCreateMediaType(byRef(outputType));
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = outputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = outputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_ARGB32);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  gfx::IntSize size(mWidth, mHeight);
+  hr = mTransform->SetMediaTypes(inputType, outputType, ConfigureOutput, &size);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  return S_OK;
+}
+
+/* static */
+DXVA2Manager*
+DXVA2Manager::CreateD3D11DXVA()
+{
+  // DXVA processing takes up a lot of GPU resources, so limit the number of
+  // videos we use DXVA with at any one time.
+  const uint32_t dxvaLimit =
+    Preferences::GetInt("media.windows-media-foundation.max-dxva-videos", 8);
+  if (sDXVAVideosCount == dxvaLimit) {
+    return nullptr;
+  }
+
+  nsAutoPtr<D3D11DXVA2Manager> manager(new D3D11DXVA2Manager());
+  HRESULT hr = manager->Init();
+  NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
+
+  return manager.forget();
+}
+
 DXVA2Manager::DXVA2Manager()
   : mLock("DXVA2Manager")
 {
   MOZ_ASSERT(NS_IsMainThread());
   ++sDXVAVideosCount;
 }
 
 DXVA2Manager::~DXVA2Manager()
--- a/dom/media/wmf/DXVA2Manager.h
+++ b/dom/media/wmf/DXVA2Manager.h
@@ -17,33 +17,35 @@ namespace mozilla {
 namespace layers {
 class Image;
 class ImageContainer;
 }
 
 class DXVA2Manager {
 public:
 
-  // Creates and initializes a DXVA2Manager. Currently we always use D3D9Ex
-  // to access DXVA, but via this interface we can seamlessly support D3D11
-  // DXVA integration if need be.
-  static DXVA2Manager* Create();
+  // Creates and initializes a DXVA2Manager. We can use DXVA2 via either
+  // D3D9Ex or D3D11.
+  static DXVA2Manager* CreateD3D9DXVA();
+  static DXVA2Manager* CreateD3D11DXVA();
 
   // Returns a pointer to the D3D device manager responsible for managing the
   // device we're using for hardware accelerated video decoding. If we're using
-  // D3D9, this is an IDirect3DDeviceManager9. It is safe to call this on any
-  // thread.
+  // D3D9Ex, this is an IDirect3DDeviceManager9. For D3D11 this is an
+  // IMFDXGIDeviceManager. It is safe to call this on any thread.
   virtual IUnknown* GetDXVADeviceManager() = 0;
 
   // Creates an Image for the video frame stored in aVideoSample.
   virtual HRESULT CopyToImage(IMFSample* aVideoSample,
                               const nsIntRect& aRegion,
                               layers::ImageContainer* aContainer,
                               layers::Image** aOutImage) = 0;
 
+  virtual HRESULT ConfigureForSize(uint32_t aWidth, uint32_t aHeight) { return S_OK; }
+
   virtual ~DXVA2Manager();
 
 protected:
   Mutex mLock;
   DXVA2Manager();
 };
 
 } // namespace mozilla
--- a/dom/media/wmf/WMF.h
+++ b/dom/media/wmf/WMF.h
@@ -118,22 +118,31 @@ HRESULT MFTEnumEx(GUID guidCategory,
 HRESULT MFGetService(IUnknown *punkObject,
                      REFGUID guidService,
                      REFIID riid,
                      LPVOID *ppvObject);
 
 HRESULT DXVA2CreateDirect3DDeviceManager9(UINT *pResetToken,
                                           IDirect3DDeviceManager9 **ppDXVAManager);
 
+
+HRESULT MFCreateDXGIDeviceManager(UINT *pResetToken, IMFDXGIDeviceManager **ppDXVAManager);
+
 HRESULT MFCreateSample(IMFSample **ppIMFSample);
 
 HRESULT MFCreateAlignedMemoryBuffer(DWORD cbMaxLength,
                                     DWORD fAlignmentFlags,
                                     IMFMediaBuffer **ppBuffer);
 
+HRESULT MFCreateDXGISurfaceBuffer(REFIID riid,
+                                  IUnknown *punkSurface,
+                                  UINT uSubresourceIndex,
+                                  BOOL fButtomUpWhenLinear,
+                                  IMFMediaBuffer **ppBuffer);
+
 } // end namespace wmf
 } // end namespace mozilla
 
 
 
 #pragma pop_macro("WINVER")
 
 #endif
--- a/dom/media/wmf/WMFReader.cpp
+++ b/dom/media/wmf/WMFReader.cpp
@@ -108,17 +108,17 @@ WMFReader::InitializeDXVA()
 
   LayersBackend backend = layerManager->GetCompositorBackendType();
   if (backend != LayersBackend::LAYERS_D3D9 &&
       backend != LayersBackend::LAYERS_D3D10 &&
       backend != LayersBackend::LAYERS_D3D11) {
     return false;
   }
 
-  mDXVA2Manager = DXVA2Manager::Create();
+  mDXVA2Manager = DXVA2Manager::CreateD3D9DXVA();
 
   return mDXVA2Manager != nullptr;
 }
 
 nsresult
 WMFReader::Init(MediaDecoderReader* aCloneDonor)
 {
   NS_ASSERTION(NS_IsMainThread(), "Must be on main thread.");
@@ -361,17 +361,17 @@ WMFReader::ConfigureVideoDecoder()
 
   if (FAILED(ConfigureVideoFrameGeometry(mediaType))) {
     NS_WARNING("Failed configured video frame dimensions");
     return hr;
   }
 
   DECODER_LOG("Successfully configured video stream");
 
-  mHasVideo = mInfo.mVideo.mHasVideo = true;
+  mHasVideo = true;
 
   return S_OK;
 }
 
 void
 WMFReader::GetSupportedAudioCodecs(const GUID** aCodecs, uint32_t* aNumCodecs)
 {
   MOZ_ASSERT(aCodecs);
@@ -430,17 +430,17 @@ WMFReader::ConfigureAudioDecoder()
   }
 
   mAudioRate = MFGetAttributeUINT32(mediaType, MF_MT_AUDIO_SAMPLES_PER_SECOND, 0);
   mAudioChannels = MFGetAttributeUINT32(mediaType, MF_MT_AUDIO_NUM_CHANNELS, 0);
   mAudioBytesPerSample = MFGetAttributeUINT32(mediaType, MF_MT_AUDIO_BITS_PER_SAMPLE, 16) / 8;
 
   mInfo.mAudio.mChannels = mAudioChannels;
   mInfo.mAudio.mRate = mAudioRate;
-  mHasAudio = mInfo.mAudio.mHasAudio = true;
+  mHasAudio = true;
 
   DECODER_LOG("Successfully configured audio stream. rate=%u channels=%u bitsPerSample=%u",
               mAudioRate, mAudioChannels, mAudioBytesPerSample);
 
   return S_OK;
 }
 
 HRESULT
@@ -468,17 +468,17 @@ WMFReader::CreateSourceReader()
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   hr = ConfigureVideoDecoder();
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   hr = ConfigureAudioDecoder();
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
-  if (mUseHwAccel && mInfo.mVideo.mHasVideo) {
+  if (mUseHwAccel && mInfo.HasVideo()) {
     RefPtr<IMFTransform> videoDecoder;
     hr = mSourceReader->GetServiceForStream(MF_SOURCE_READER_FIRST_VIDEO_STREAM,
                                             GUID_NULL,
                                             IID_IMFTransform,
                                             (void**)(IMFTransform**)(byRef(videoDecoder)));
 
     if (SUCCEEDED(hr)) {
       ULONG_PTR manager = ULONG_PTR(mDXVA2Manager->GetDXVADeviceManager());
--- a/dom/media/wmf/WMFUtils.cpp
+++ b/dom/media/wmf/WMFUtils.cpp
@@ -487,17 +487,17 @@ UnloadDLLs()
     sDLLsLoaded = false;
   }
   return S_OK;
 }
 
 #define ENSURE_FUNCTION_PTR_HELPER(FunctionType, FunctionName, DLL) \
   static FunctionType FunctionName##Ptr = nullptr; \
   if (!FunctionName##Ptr) { \
-    FunctionName##Ptr = (FunctionType) GetProcAddress(GetModuleHandle(#DLL), #FunctionName); \
+    FunctionName##Ptr = (FunctionType) GetProcAddress(GetModuleHandleW(L#DLL), #FunctionName); \
     if (!FunctionName##Ptr) { \
       NS_WARNING("Failed to get GetProcAddress of " #FunctionName " from " #DLL); \
       return E_FAIL; \
     } \
   }
 
 #define ENSURE_FUNCTION_PTR(FunctionName, DLL) \
   ENSURE_FUNCTION_PTR_HELPER(decltype(::FunctionName)*, FunctionName, DLL) \
@@ -671,10 +671,29 @@ HRESULT
 MFCreateAlignedMemoryBuffer(DWORD cbMaxLength,
                             DWORD fAlignmentFlags,
                             IMFMediaBuffer **ppBuffer)
 {
   ENSURE_FUNCTION_PTR(MFCreateAlignedMemoryBuffer, mfplat.dll)
   return (MFCreateAlignedMemoryBufferPtr)(cbMaxLength, fAlignmentFlags, ppBuffer);
 }
 
+HRESULT
+MFCreateDXGIDeviceManager(UINT *pResetToken, IMFDXGIDeviceManager **ppDXVAManager)
+{
+  DECL_FUNCTION_PTR(MFCreateDXGIDeviceManager, UINT*, IMFDXGIDeviceManager**);
+  ENSURE_FUNCTION_PTR(MFCreateDXGIDeviceManager, mfplat.dll)
+  return (MFCreateDXGIDeviceManagerPtr)(pResetToken, ppDXVAManager);
+}
+
+HRESULT
+MFCreateDXGISurfaceBuffer(REFIID riid,
+                          IUnknown *punkSurface,
+                          UINT uSubresourceIndex,
+                          BOOL fButtomUpWhenLinear,
+                          IMFMediaBuffer **ppBuffer)
+{
+  ENSURE_FUNCTION_PTR(MFCreateDXGISurfaceBuffer, mfplat.dll)
+  return (MFCreateDXGISurfaceBufferPtr)(riid, punkSurface, uSubresourceIndex, fButtomUpWhenLinear, ppBuffer);
+}
+
 } // end namespace wmf
 } // end namespace mozilla
--- a/dom/media/wmf/moz.build
+++ b/dom/media/wmf/moz.build
@@ -19,16 +19,18 @@ UNIFIED_SOURCES += [
     'WMFReader.cpp',
     'WMFSourceReaderCallback.cpp',
 ]
 
 SOURCES += [
     'WMFUtils.cpp',
 ]
 
+include('/ipc/chromium/chromium-config.mozbuild')
+
 FAIL_ON_WARNINGS = True
 
 FINAL_LIBRARY = 'xul'
 
 if CONFIG['OS_ARCH'] == 'WINNT':
     DEFINES['NOMINMAX'] = True
 
 CXXFLAGS += CONFIG['MOZ_CAIRO_CFLAGS']
--- a/dom/quota/QuotaManager.cpp
+++ b/dom/quota/QuotaManager.cpp
@@ -2242,17 +2242,17 @@ QuotaManager::EnsureOriginIsInitialized(
   // Get directory for this origin and persistence type.
   nsCOMPtr<nsIFile> directory;
   rv = GetDirectoryForOrigin(aPersistenceType, aOrigin,
                              getter_AddRefs(directory));
   NS_ENSURE_SUCCESS(rv, rv);
 
   if (IsTreatedAsPersistent(aPersistenceType, aIsApp)) {
     if (mInitializedOrigins.Contains(OriginKey(aPersistenceType, aOrigin))) {
-      NS_ADDREF(*aDirectory = directory);
+      directory.forget(aDirectory);
       return NS_OK;
     }
   } else if (!mTemporaryStorageInitialized) {
     rv = InitializeRepository(aPersistenceType);
     if (NS_WARN_IF(NS_FAILED(rv))) {
       // We have to cleanup partially initialized quota.
       RemoveQuota();
 
--- a/dom/svg/SVGDocument.cpp
+++ b/dom/svg/SVGDocument.cpp
@@ -63,26 +63,27 @@ SVGDocument::GetRootElement(ErrorResult&
     return nullptr;
   }
   return static_cast<nsSVGElement*>(root);
 }
 
 nsresult
 SVGDocument::InsertChildAt(nsIContent* aKid, uint32_t aIndex, bool aNotify)
 {
-  nsresult rv = XMLDocument::InsertChildAt(aKid, aIndex, aNotify);
-
-  if (NS_SUCCEEDED(rv) && aKid->IsElement() && !aKid->IsSVGElement()) {
+  if (aKid->IsElement() && !aKid->IsSVGElement()) {
     // We can get here when well formed XML with a non-SVG root element is
     // served with the SVG MIME type, for example. In that case we need to load
-    // the non-SVG UA sheets or else we can get bugs like bug 1016145.
+    // the non-SVG UA sheets or else we can get bugs like bug 1016145.  Note
+    // that we have to do this _before_ the XMLDocument::InsertChildAt call,
+    // since that can try to construct frames, and we need to have the sheets
+    // loaded by then.
     EnsureNonSVGUserAgentStyleSheetsLoaded();
   }
 
-  return rv;
+  return XMLDocument::InsertChildAt(aKid, aIndex, aNotify);
 }
 
 nsresult
 SVGDocument::Clone(mozilla::dom::NodeInfo *aNodeInfo, nsINode **aResult) const
 {
   NS_ASSERTION(aNodeInfo->NodeInfoManager() == mNodeInfoManager,
                "Can't import this document into another document!");
 
new file mode 100644
--- /dev/null
+++ b/dom/svg/crashtests/1035248-1.svg
@@ -0,0 +1,18 @@
+<svg xmlns="http://www.w3.org/2000/svg">
+<script>
+
+function boom()
+{
+    var outer = document.createElementNS("http://www.w3.org/1999/xhtml", "div");
+    var inner = document.createElementNS("http://www.w3.org/1999/xhtml", "div");
+    inner.setAttributeNS(null, "style", "display: table-row-group;");
+    outer.appendChild(inner);
+
+    document.removeChild(document.documentElement);
+    document.appendChild(outer);
+}
+
+window.addEventListener("load", boom, false);
+
+</script>
+</svg>
new file mode 100644
--- /dev/null
+++ b/dom/svg/crashtests/1035248-2.svg
@@ -0,0 +1,16 @@
+<svg xmlns="http://www.w3.org/2000/svg">
+
+<script>
+
+window.addEventListener("load", function() {
+    var div = document.createElementNS('http://www.w3.org/1999/xhtml', 'div');
+    var tr = document.createElementNS('http://www.w3.org/1999/xhtml', 'tr');
+    tr.style.display = "table-row";
+    document.removeChild(document.documentElement);
+    div.appendChild(tr);
+    document.appendChild(div);
+}, false);
+
+</script>
+
+</svg>
--- a/dom/svg/crashtests/crashtests.list
+++ b/dom/svg/crashtests/crashtests.list
@@ -68,8 +68,10 @@ load 842463-1.html
 load 847138-1.svg
 load 864509.svg
 load 880544-1.svg
 load 880544-2.svg
 load 880544-3.svg
 load 880544-4.svg
 load 880544-5.svg
 load 898915-1.svg
+load 1035248-1.svg
+load 1035248-2.svg
--- a/dom/system/gonk/nsVolumeService.cpp
+++ b/dom/system/gonk/nsVolumeService.cpp
@@ -234,17 +234,17 @@ nsVolumeService::GetVolumeNames(nsIArray
 
     rv = isupportsString->SetData(vol->Name());
     NS_ENSURE_SUCCESS(rv, rv);
 
     rv = volNames->AppendElement(isupportsString, false);
     NS_ENSURE_SUCCESS(rv, rv);
   }
 
-  NS_ADDREF(*aVolNames = volNames);
+  volNames.forget(aVolNames);
   return NS_OK;
 }
 
 void
 nsVolumeService::GetVolumesForIPC(nsTArray<VolumeInfo>* aResult)
 {
   MOZ_ASSERT(XRE_GetProcessType() == GeckoProcessType_Default);
   MOZ_ASSERT(NS_IsMainThread());
--- a/dom/xul/XULDocument.cpp
+++ b/dom/xul/XULDocument.cpp
@@ -2036,18 +2036,17 @@ XULDocument::PrepareToLoadPrototype(nsIU
 
     parser->SetCommand(nsCRT::strcmp(aCommand, "view-source") ? eViewNormal :
                        eViewSource);
 
     parser->SetDocumentCharset(NS_LITERAL_CSTRING("UTF-8"),
                                kCharsetFromDocTypeDefault);
     parser->SetContentSink(sink); // grabs a reference to the parser
 
-    *aResult = parser;
-    NS_ADDREF(*aResult);
+    parser.forget(aResult);
     return NS_OK;
 }
 
 
 nsresult
 XULDocument::ApplyPersistentAttributes()
 {
     // For non-chrome documents, persistance is simply broken
@@ -3656,17 +3655,17 @@ XULDocument::CreateOverlayElement(nsXULP
         new OverlayForwardReference(this, element);
     if (! fwdref)
         return NS_ERROR_OUT_OF_MEMORY;
 
     // transferring ownership to ya...
     rv = AddForwardReference(fwdref);
     if (NS_FAILED(rv)) return rv;
 
-    NS_ADDREF(*aResult = element);
+    element.forget(aResult);
     return NS_OK;
 }
 
 nsresult
 XULDocument::AddAttributes(nsXULPrototypeElement* aPrototype,
                            nsIContent* aElement)
 {
     nsresult rv;
--- a/dom/xul/nsXULControllers.cpp
+++ b/dom/xul/nsXULControllers.cpp
@@ -102,18 +102,17 @@ nsXULControllers::GetControllerForComman
     {
       nsCOMPtr<nsIController> controller;
       controllerData->GetController(getter_AddRefs(controller));
       if (controller)
       {
         bool supportsCommand;
         controller->SupportsCommand(aCommand, &supportsCommand);
         if (supportsCommand) {
-          *_retval = controller;
-          NS_ADDREF(*_retval);
+          controller.forget(_retval);
           return NS_OK;
         }
       }
     }
   }
   
   return NS_OK;
 }
--- a/dom/xul/nsXULPrototypeCache.cpp
+++ b/dom/xul/nsXULPrototypeCache.cpp
@@ -367,17 +367,17 @@ nsXULPrototypeCache::GetInputStream(nsIU
         return NS_ERROR_NOT_AVAILABLE;
 
     rv = NewObjectInputStreamFromBuffer(buf, len, getter_AddRefs(ois));
     NS_ENSURE_SUCCESS(rv, rv);
     buf.forget();
 
     mInputStreamTable.Put(uri, ois);
     
-    NS_ADDREF(*stream = ois);
+    ois.forget(stream);
     return NS_OK;
 }
 
 nsresult
 nsXULPrototypeCache::FinishInputStream(nsIURI* uri) {
     mInputStreamTable.Remove(uri);
     return NS_OK;
 }
@@ -397,17 +397,17 @@ nsXULPrototypeCache::GetOutputStream(nsI
         objectOutput->SetOutputStream(outputStream);
     } else {
         rv = NewObjectOutputWrappedStorageStream(getter_AddRefs(objectOutput), 
                                                  getter_AddRefs(storageStream),
                                                  false);
         NS_ENSURE_SUCCESS(rv, rv);
         mOutputStreamTable.Put(uri, storageStream);
     }
-    NS_ADDREF(*stream = objectOutput);
+    objectOutput.forget(stream);
     return NS_OK;
 }
 
 nsresult
 nsXULPrototypeCache::FinishOutputStream(nsIURI* uri) 
 {
     nsresult rv;
     StartupCache* sc = StartupCache::GetSingleton();
--- a/dom/xul/nsXULPrototypeDocument.cpp
+++ b/dom/xul/nsXULPrototypeDocument.cpp
@@ -85,29 +85,26 @@ NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(
 NS_INTERFACE_MAP_END
 
 NS_IMPL_CYCLE_COLLECTING_ADDREF(nsXULPrototypeDocument)
 NS_IMPL_CYCLE_COLLECTING_RELEASE(nsXULPrototypeDocument)
 
 NS_IMETHODIMP
 NS_NewXULPrototypeDocument(nsXULPrototypeDocument** aResult)
 {
-    *aResult = new nsXULPrototypeDocument();
-    if (! *aResult)
-        return NS_ERROR_OUT_OF_MEMORY;
+    *aResult = nullptr;
+    nsRefPtr<nsXULPrototypeDocument> doc =
+      new nsXULPrototypeDocument();
 
-    nsresult rv;
-    rv = (*aResult)->Init();
+    nsresult rv = doc->Init();
     if (NS_FAILED(rv)) {
-        delete *aResult;
-        *aResult = nullptr;
         return rv;
     }
 
-    NS_ADDREF(*aResult);
+    doc.forget(aResult);
     return rv;
 }
 
 //----------------------------------------------------------------------
 //
 // nsISerializable methods
 //
 
--- a/dom/xul/templates/nsXULContentBuilder.cpp
+++ b/dom/xul/templates/nsXULContentBuilder.cpp
@@ -1245,18 +1245,17 @@ nsXULContentBuilder::EnsureElementHasGen
         if (NS_FAILED(rv))
             return rv;
 
         // XXX Note that the notification ensures we won't batch insertions! This could be bad! - Dave
         rv = parent->AppendChildTo(element, aNotify);
         if (NS_FAILED(rv))
             return rv;
 
-        *result = element;
-        NS_ADDREF(*result);
+        element.forget(result);
         return NS_ELEMENT_GOT_CREATED;
     }
     else {
         return NS_ELEMENT_WAS_THERE;
     }
 }
 
 bool
--- a/dom/xul/templates/nsXULTemplateQueryProcessorRDF.cpp
+++ b/dom/xul/templates/nsXULTemplateQueryProcessorRDF.cpp
@@ -420,18 +420,17 @@ nsXULTemplateQueryProcessorRDF::CompileQ
     }
 
     rv = lastnode->AddChild(instnode);
     if (NS_FAILED(rv))
         return rv;
 
     mQueries.AppendElement(query);
 
-    *_retval = query;
-    NS_ADDREF(*_retval);
+    query.forget(_retval);
 
     return NS_OK;
 }
 
 NS_IMETHODIMP
 nsXULTemplateQueryProcessorRDF::GenerateResults(nsISupports* aDatasource,
                                                 nsIXULTemplateResult* aRef,
                                                 nsISupports* aQuery,
@@ -579,22 +578,21 @@ nsXULTemplateQueryProcessorRDF::Translat
     // make sure the RDF service is set up
     nsresult rv = InitGlobals();
     if (NS_FAILED(rv))
         return rv;
 
     nsCOMPtr<nsIRDFResource> uri;
     gRDFService->GetUnicodeResource(aRefString, getter_AddRefs(uri));
 
-    nsXULTemplateResultRDF* refresult = new nsXULTemplateResultRDF(uri);
+    nsRefPtr<nsXULTemplateResultRDF> refresult = new nsXULTemplateResultRDF(uri);
     if (! refresult)
         return NS_ERROR_OUT_OF_MEMORY;
 
-    *aRef = refresult;
-    NS_ADDREF(*aRef);
+    refresult.forget(aRef);
 
     return NS_OK;
 }
 
 NS_IMETHODIMP
 nsXULTemplateQueryProcessorRDF::CompareResults(nsIXULTemplateResult* aLeft,
                                                nsIXULTemplateResult* aRight,
                                                nsIAtom* aVar,
--- a/dom/xul/templates/nsXULTemplateQueryProcessorStorage.cpp
+++ b/dom/xul/templates/nsXULTemplateQueryProcessorStorage.cpp
@@ -235,17 +235,17 @@ nsXULTemplateQueryProcessorStorage::GetD
     // ok now we have an URI of a sqlite file
     nsCOMPtr<mozIStorageConnection> connection;
     rv = storage->OpenDatabase(databaseFile, getter_AddRefs(connection));
     if (NS_FAILED(rv)) {
         nsXULContentUtils::LogTemplateError(ERROR_TEMPLATE_STORAGE_CANNOT_OPEN_DATABASE);
         return rv;
     }
 
-    NS_ADDREF(*aReturn = connection);
+    connection.forget(aReturn);
     return NS_OK;
 }
 
 
 
 NS_IMETHODIMP
 nsXULTemplateQueryProcessorStorage::InitializeForBuilding(nsISupports* aDatasource,
                                                           nsIXULTemplateBuilder* aBuilder,
--- a/dom/xul/templates/nsXULTemplateQueryProcessorXML.cpp
+++ b/dom/xul/templates/nsXULTemplateQueryProcessorXML.cpp
@@ -280,18 +280,17 @@ nsXULTemplateQueryProcessorXML::CompileQ
 
                 nsCOMPtr<nsIAtom> varatom = do_GetAtom(var);
 
                 query->AddBinding(varatom, Move(compiledexpr));
             }
         }
     }
 
-    *_retval = query;
-    NS_ADDREF(*_retval);
+    query.forget(_retval);
 
     return NS_OK;
 }
 
 NS_IMETHODIMP
 nsXULTemplateQueryProcessorXML::GenerateResults(nsISupports* aDatasource,
                                                 nsIXULTemplateResult* aRef,
                                                 nsISupports* aQuery,
@@ -322,22 +321,21 @@ nsXULTemplateQueryProcessorXML::Generate
     ErrorResult rv;
     nsRefPtr<XPathResult> exprresults =
         expr->Evaluate(*context, XPathResult::ORDERED_NODE_SNAPSHOT_TYPE,
                        nullptr, rv);
     if (rv.Failed()) {
         return rv.ErrorCode();
     }
 
-    nsXULTemplateResultSetXML* results =
+    nsRefPtr<nsXULTemplateResultSetXML> results =
         new nsXULTemplateResultSetXML(xmlquery, exprresults.forget(),
                                       xmlquery->GetBindingSet());
 
-    *aResults = results;
-    NS_ADDREF(*aResults);
+    results.forget(aResults);
 
     return NS_OK;
 }
 
 NS_IMETHODIMP
 nsXULTemplateQueryProcessorXML::AddBinding(nsIDOMNode* aRuleNode,
                                            nsIAtom* aVar,
                                            nsIAtom* aRef,
@@ -381,18 +379,18 @@ nsXULTemplateQueryProcessorXML::Translat
         rootElement = doc->GetRootElement();
     else
         rootElement = do_QueryInterface(aDatasource);
 
     // if no root element, just return. The document may not have loaded yet
     if (!rootElement)
         return NS_OK;
     
-    *aRef = new nsXULTemplateResultXML(nullptr, rootElement, nullptr);
-    NS_ADDREF(*aRef);
+    nsRefPtr<nsXULTemplateResultXML> result = new nsXULTemplateResultXML(nullptr, rootElement, nullptr);
+    result.forget(aRef);
 
     return NS_OK;
 }
 
 
 NS_IMETHODIMP
 nsXULTemplateQueryProcessorXML::CompareResults(nsIXULTemplateResult* aLeft,
                                                nsIXULTemplateResult* aRight,
--- a/dom/xul/templates/nsXULTemplateResultSetRDF.cpp
+++ b/dom/xul/templates/nsXULTemplateResultSetRDF.cpp
@@ -71,13 +71,12 @@ nsXULTemplateResultSetRDF::GetNext(nsISu
         return NS_ERROR_OUT_OF_MEMORY;
 
     // add the supporting memory elements to the processor's map. These are
     // used to remove the results when an assertion is removed from the graph
     mProcessor->AddMemoryElements(mCurrent->mInstantiation, nextresult);
 
     mCheckedNext = false;
 
-    *aResult = nextresult;
-    NS_ADDREF(*aResult);
+    nextresult.forget(aResult);
 
     return NS_OK;
 }
--- a/embedding/components/printingui/ipc/PrintProgressDialogChild.cpp
+++ b/embedding/components/printingui/ipc/PrintProgressDialogChild.cpp
@@ -13,24 +13,24 @@ using mozilla::unused;
 
 namespace mozilla {
 namespace embedding {
 
 NS_IMPL_ISUPPORTS(PrintProgressDialogChild,
                   nsIWebProgressListener,
                   nsIPrintProgressParams)
 
-MOZ_IMPLICIT PrintProgressDialogChild::PrintProgressDialogChild(
+PrintProgressDialogChild::PrintProgressDialogChild(
   nsIObserver* aOpenObserver) :
   mOpenObserver(aOpenObserver)
 {
   MOZ_COUNT_CTOR(PrintProgressDialogChild);
 }
 
-MOZ_IMPLICIT PrintProgressDialogChild::~PrintProgressDialogChild()
+PrintProgressDialogChild::~PrintProgressDialogChild()
 {
   // When the printing engine stops supplying information about printing
   // progress, it'll drop references to us and destroy us. We need to signal
   // the parent to decrement its refcount, as well as prevent it from attempting
   // to contact us further.
   unused << Send__delete__(this);
   MOZ_COUNT_DTOR(PrintProgressDialogChild);
 }
--- a/embedding/components/printingui/ipc/PrintProgressDialogParent.cpp
+++ b/embedding/components/printingui/ipc/PrintProgressDialogParent.cpp
@@ -9,23 +9,23 @@
 
 using mozilla::unused;
 
 namespace mozilla {
 namespace embedding {
 
 NS_IMPL_ISUPPORTS(PrintProgressDialogParent, nsIObserver)
 
-MOZ_IMPLICIT PrintProgressDialogParent::PrintProgressDialogParent() :
+PrintProgressDialogParent::PrintProgressDialogParent() :
   mActive(true)
 {
   MOZ_COUNT_CTOR(PrintProgressDialogParent);
 }
 
-MOZ_IMPLICIT PrintProgressDialogParent::~PrintProgressDialogParent()
+PrintProgressDialogParent::~PrintProgressDialogParent()
 {
   MOZ_COUNT_DTOR(PrintProgressDialogParent);
 }
 
 void
 PrintProgressDialogParent::SetWebProgressListener(nsIWebProgressListener* aListener)
 {
   mWebProgressListener = aListener;
--- a/embedding/components/printingui/ipc/PrintSettingsDialogChild.cpp
+++ b/embedding/components/printingui/ipc/PrintSettingsDialogChild.cpp
@@ -4,23 +4,23 @@
 
 #include "PrintSettingsDialogChild.h"
 
 using mozilla::unused;
 
 namespace mozilla {
 namespace embedding {
 
-MOZ_IMPLICIT PrintSettingsDialogChild::PrintSettingsDialogChild()
+PrintSettingsDialogChild::PrintSettingsDialogChild()
 : mReturned(false)
 {
   MOZ_COUNT_CTOR(PrintSettingsDialogChild);
 }
 
-MOZ_IMPLICIT PrintSettingsDialogChild::~PrintSettingsDialogChild()
+PrintSettingsDialogChild::~PrintSettingsDialogChild()
 {
   MOZ_COUNT_DTOR(PrintSettingsDialogChild);
 }
 
 bool
 PrintSettingsDialogChild::Recv__delete__(const nsresult& aResult,
                                          const PrintData& aData)
 {
--- a/embedding/components/printingui/ipc/PrintSettingsDialogParent.cpp
+++ b/embedding/components/printingui/ipc/PrintSettingsDialogParent.cpp
@@ -3,22 +3,22 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "PrintSettingsDialogParent.h"
 
 // C++ file contents
 namespace mozilla {
 namespace embedding {
 
-MOZ_IMPLICIT PrintSettingsDialogParent::PrintSettingsDialogParent()
+PrintSettingsDialogParent::PrintSettingsDialogParent()
 {
   MOZ_COUNT_CTOR(PrintSettingsDialogParent);
 }
 
-MOZ_IMPLICIT PrintSettingsDialogParent::~PrintSettingsDialogParent()
+PrintSettingsDialogParent::~PrintSettingsDialogParent()
 {
   MOZ_COUNT_DTOR(PrintSettingsDialogParent);
 }
 
 void
 PrintSettingsDialogParent::ActorDestroy(ActorDestroyReason aWhy)
 {
 }
--- a/embedding/components/printingui/ipc/PrintingParent.cpp
+++ b/embedding/components/printingui/ipc/PrintingParent.cpp
@@ -209,21 +209,21 @@ PrintingParent::DOMWindowFromBrowserPare
   nsCOMPtr<nsIDOMWindow> parentWin = do_QueryInterface(frame->OwnerDoc()->GetWindow());
   if (!parentWin) {
     return nullptr;
   }
 
   return parentWin;
 }
 
-MOZ_IMPLICIT PrintingParent::PrintingParent()
+PrintingParent::PrintingParent()
 {
     MOZ_COUNT_CTOR(PrintingParent);
 }
 
-MOZ_IMPLICIT PrintingParent::~PrintingParent()
+PrintingParent::~PrintingParent()
 {
     MOZ_COUNT_DTOR(PrintingParent);
 }
 
 } // namespace embedding
 } // namespace mozilla
 
--- a/gfx/2d/BaseRect.h
+++ b/gfx/2d/BaseRect.h
@@ -221,30 +221,16 @@ struct BaseRect {
   {
     x -= aMargin.left;
     y -= aMargin.top;
     width += aMargin.LeftRight();
     height += aMargin.TopBottom();
   }
   void Inflate(const SizeT& aSize) { Inflate(aSize.width, aSize.height); }
 
-  void InflateToMultiple(const SizeT& aMultiple)
-  {
-    T xMost = XMost();
-    T yMost = YMost();
-
-    x = static_cast<T>(floor(x / aMultiple.width)) * aMultiple.width;
-    y = static_cast<T>(floor(y / aMultiple.height)) * aMultiple.height;
-    xMost = static_cast<T>(ceil(x / aMultiple.width)) * aMultiple.width;
-    yMost = static_cast<T>(ceil(y / aMultiple.height)) * aMultiple.height;
-
-    width = xMost - x;
-    height = yMost - y;
-  }
-
   void Deflate(T aD) { Deflate(aD, aD); }
   void Deflate(T aDx, T aDy)
   {
     x += aDx;
     y += aDy;
     width = std::max(T(0), width - 2 * aDx);
     height = std::max(T(0), height - 2 * aDy);
   }
new file mode 100644
--- /dev/null
+++ b/gfx/2d/NumericTools.h
@@ -0,0 +1,37 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MOZILLA_GFX_NUMERICTOOLS_H_
+#define MOZILLA_GFX_NUMERICTOOLS_H_
+
+// Returns the largest multiple of aMultiplied that's <= x.
+// Same as int32_t(floor(double(x) / aMultiplier)) * aMultiplier,
+// but faster.
+static int32_t
+RoundDownToMultiple(int32_t x, int32_t aMultiplier)
+{
+  // We don't use float division + floor because that's hard for the compiler
+  // to optimize.
+  int mod = x % aMultiplier;
+  if (x > 0) {
+    return x - mod;
+  }
+  return mod ? x - aMultiplier - mod : x;
+}
+
+// Returns the smallest multiple of aMultiplied that's >= x.
+// Same as int32_t(ceil(double(x) / aMultiplier)) * aMultiplier,
+// but faster.
+static int32_t
+RoundUpToMultiple(int32_t x, int32_t aMultiplier)
+{
+  int mod = x % aMultiplier;
+  if (x > 0) {
+    return mod ? x + aMultiplier - mod : x;
+  }
+  return x - mod;
+}
+
+#endif /* MOZILLA_GFX_NUMERICTOOLS_H_ */
--- a/gfx/2d/moz.build
+++ b/gfx/2d/moz.build
@@ -21,16 +21,17 @@ EXPORTS.mozilla.gfx += [
     'BorrowedContext.h',
     'Coord.h',
     'DataSurfaceHelpers.h',
     'DrawTargetTiled.h',
     'Filters.h',
     'Helpers.h',
     'Logging.h',
     'Matrix.h',
+    'NumericTools.h',
     'PathHelpers.h',
     'PatternHelpers.h',
     'Point.h',
     'Quaternion.h',
     'Rect.h',
     'Scale.h',
     'ScaleFactor.h',
     'ScaleFactors2D.h',
--- a/gfx/gl/GLContextEGL.h
+++ b/gfx/gl/GLContextEGL.h
@@ -91,24 +91,16 @@ public:
     // hold a reference to the given surface
     // for the lifetime of this context.
     void HoldSurface(gfxASurface *aSurf);
 
     EGLContext GetEGLContext() {
         return mContext;
     }
 
-    EGLSurface GetEGLSurface() {
-        return mSurface;
-    }
-
-    EGLDisplay GetEGLDisplay() {
-        return EGL_DISPLAY();
-    }
-
     bool BindTex2DOffscreen(GLContext *aOffscreen);
     void UnbindTex2DOffscreen(GLContext *aOffscreen);
     void BindOffscreenFramebuffer();
 
     static already_AddRefed<GLContextEGL>
     CreateEGLPixmapOffscreenContext(const gfxIntSize& size);
 
     static already_AddRefed<GLContextEGL>
--- a/gfx/gl/GLContextProviderEGL.cpp
+++ b/gfx/gl/GLContextProviderEGL.cpp
@@ -225,16 +225,27 @@ GLContextEGL::GLContextEGL(
     , mOwnsContext(true)
 {
     // any EGL contexts will always be GLESv2
     SetProfileVersion(ContextProfile::OpenGLES, 200);
 
 #ifdef DEBUG
     printf_stderr("Initializing context %p surface %p on display %p\n", mContext, mSurface, EGL_DISPLAY());
 #endif
+#if defined(MOZ_WIDGET_GONK)
+    if (!mIsOffscreen) {
+        mHwc = HwcComposer2D::GetInstance();
+        MOZ_ASSERT(!mHwc->Initialized());
+
+        if (mHwc->Init(EGL_DISPLAY(), mSurface, this)) {
+            NS_WARNING("HWComposer initialization failed!");
+            mHwc = nullptr;
+        }
+    }
+#endif
 }
 
 GLContextEGL::~GLContextEGL()
 {
     MarkDestroyed();
 
     // Wrapped context should not destroy eglContext/Surface
     if (!mOwnsContext) {
@@ -447,17 +458,26 @@ GLContextEGL::SetupLookupFunction()
 
 bool
 GLContextEGL::SwapBuffers()
 {
     EGLSurface surface = mSurfaceOverride != EGL_NO_SURFACE
                           ? mSurfaceOverride
                           : mSurface;
     if (surface) {
-        return sEGLLibrary.fSwapBuffers(EGL_DISPLAY(), surface);
+#ifdef MOZ_WIDGET_GONK
+        if (!mIsOffscreen) {
+            if (mHwc) {
+                return mHwc->Render(EGL_DISPLAY(), surface);
+            } else {
+                return GetGonkDisplay()->SwapBuffers(EGL_DISPLAY(), surface);
+            }
+        } else
+#endif
+            return sEGLLibrary.fSwapBuffers(EGL_DISPLAY(), surface);
     } else {
         return false;
     }
 }
 
 // hold a reference to the given surface
 // for the lifetime of this context.
 void
new file mode 100644
--- /dev/null
+++ b/gfx/layers/D3D11ShareHandleImage.cpp
@@ -0,0 +1,145 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WMF.h"
+#include "D3D11ShareHandleImage.h"
+#include "gfxImageSurface.h"
+#include "gfxWindowsPlatform.h"
+#include "mozilla/layers/TextureClient.h"
+#include "mozilla/layers/TextureD3D11.h"
+#include "mozilla/layers/CompositableClient.h"
+#include "mozilla/layers/CompositableForwarder.h"
+#include "d3d11.h"
+
+namespace mozilla {
+namespace layers {
+
+HRESULT
+D3D11ShareHandleImage::SetData(const Data& aData)
+{
+  NS_ENSURE_TRUE(aData.mTexture, E_POINTER);
+  mTexture = aData.mTexture;
+  mPictureRect = aData.mRegion;
+
+  D3D11_TEXTURE2D_DESC frameDesc;
+  mTexture->GetDesc(&frameDesc);
+
+  mFormat = gfx::SurfaceFormat::B8G8R8A8;
+  mSize.width = frameDesc.Width;
+  mSize.height = frameDesc.Height;
+
+  return S_OK;
+}
+
+gfx::IntSize
+D3D11ShareHandleImage::GetSize()
+{
+  return mSize;
+}
+
+TextureClient*
+D3D11ShareHandleImage::GetTextureClient(CompositableClient* aClient)
+{
+  if (!mTextureClient) {
+    RefPtr<TextureClientD3D11> textureClient =
+      new TextureClientD3D11(aClient->GetForwarder(),
+                             mFormat,
+                             TextureFlags::DEFAULT);
+    textureClient->InitWith(mTexture, mSize);
+    mTextureClient = textureClient;
+  }
+  return mTextureClient;
+}
+
+TemporaryRef<gfx::SourceSurface>
+D3D11ShareHandleImage::GetAsSourceSurface()
+{
+  if (!mTexture) {
+    NS_WARNING("Cannot readback from shared texture because no texture is available.");
+    return nullptr;
+  }
+
+  RefPtr<ID3D11Device> device;
+  mTexture->GetDevice(byRef(device));
+
+  RefPtr<IDXGIKeyedMutex> keyedMutex;
+  if (FAILED(mTexture->QueryInterface(static_cast<IDXGIKeyedMutex**>(byRef(keyedMutex))))) {
+    NS_WARNING("Failed to QueryInterface for IDXGIKeyedMutex, strange.");
+    return nullptr;
+  }
+
+  if (FAILED(keyedMutex->AcquireSync(0, 0))) {
+    NS_WARNING("Failed to acquire sync for keyedMutex, plugin failed to release?");
+    return nullptr;
+  }
+
+  D3D11_TEXTURE2D_DESC desc;
+  mTexture->GetDesc(&desc);
+
+  CD3D11_TEXTURE2D_DESC softDesc(desc.Format, desc.Width, desc.Height);
+  softDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
+  softDesc.BindFlags = 0;
+  softDesc.MiscFlags = 0;
+  softDesc.MipLevels = 1;
+  softDesc.Usage = D3D11_USAGE_STAGING;
+
+  RefPtr<ID3D11Texture2D> softTexture;
+  HRESULT hr = device->CreateTexture2D(&softDesc,
+                                       NULL,
+                                       static_cast<ID3D11Texture2D**>(byRef(softTexture)));
+
+  if (FAILED(hr)) {
+    NS_WARNING("Failed to create 2D staging texture.");
+    keyedMutex->ReleaseSync(0);
+    return nullptr;
+  }
+
+  RefPtr<ID3D11DeviceContext> context;
+  device->GetImmediateContext(byRef(context));
+  if (!context) {
+    keyedMutex->ReleaseSync(0);
+    return nullptr;
+  }
+
+  context->CopyResource(softTexture, mTexture);
+  keyedMutex->ReleaseSync(0);
+
+  RefPtr<gfx::DataSourceSurface> surface =
+    gfx::Factory::CreateDataSourceSurface(mSize, gfx::SurfaceFormat::B8G8R8X8);
+  if (NS_WARN_IF(!surface)) {
+    return nullptr;
+  }
+
+  gfx::DataSourceSurface::MappedSurface mappedSurface;
+  if (!surface->Map(gfx::DataSourceSurface::WRITE, &mappedSurface)) {
+    return nullptr;
+  }
+
+  D3D11_MAPPED_SUBRESOURCE map;
+  hr = context->Map(softTexture, 0, D3D11_MAP_READ, 0, &map);
+  if (!SUCCEEDED(hr)) {
+    surface->Unmap();
+    return nullptr;
+  }
+
+  for (int y = 0; y < mSize.height; y++) {
+    memcpy(mappedSurface.mData + mappedSurface.mStride * y,
+           (unsigned char*)(map.pData) + map.RowPitch * y,
+           mSize.width * 4);
+  }
+
+  context->Unmap(softTexture, 0);
+  surface->Unmap();
+
+  return surface.forget();
+}
+
+ID3D11Texture2D*
+D3D11ShareHandleImage::GetTexture() const {
+  return mTexture;
+}
+
+} /* layers */
+} /* mozilla */
new file mode 100644
--- /dev/null
+++ b/gfx/layers/D3D11ShareHandleImage.h
@@ -0,0 +1,71 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GFX_D311_SHARE_HANDLE_IMAGE_H
+#define GFX_D311_SHARE_HANDLE_IMAGE_H
+
+#include "mozilla/RefPtr.h"
+#include "ImageContainer.h"
+#include "nsAutoPtr.h"
+#include "d3d11.h"
+#include "mozilla/layers/TextureClient.h"
+
+namespace mozilla {
+namespace layers {
+
+// Image class that wraps a ID3D11Texture2D. This class copies the image
+// passed into SetData(), so that it can be accessed from other D3D devices.
+// This class also manages the synchronization of the copy, to ensure the
+// resource is ready to use.
+class D3D11ShareHandleImage : public Image {
+public:
+
+  struct Data {
+    Data(ID3D11Texture2D* aTexture,
+         ID3D11Device* aDevice,
+         ID3D11DeviceContext* aContext,
+         const nsIntRect& aRegion)
+      : mTexture(aTexture),
+        mDevice(aDevice),
+        mContext(aContext),
+        mRegion(aRegion) {}
+    RefPtr<ID3D11Texture2D> mTexture;
+    RefPtr<ID3D11Device> mDevice;
+    RefPtr<ID3D11DeviceContext> mContext;
+    nsIntRect mRegion;
+  };
+
+  D3D11ShareHandleImage() : Image(NULL, ImageFormat::D3D11_SHARE_HANDLE_TEXTURE), mSize(0, 0) {}
+  virtual ~D3D11ShareHandleImage() {}
+
+  // Copies the surface into a sharable texture's surface, and initializes
+  // the image.
+  HRESULT SetData(const Data& aData);
+
+  gfx::IntSize GetSize() override;
+
+  virtual TemporaryRef<gfx::SourceSurface> GetAsSourceSurface() override;
+
+  virtual TextureClient* GetTextureClient(CompositableClient* aClient) override;
+
+  ID3D11Texture2D* GetTexture() const;
+
+  virtual nsIntRect GetPictureRect() override { return mPictureRect; }
+
+private:
+
+  gfx::IntSize mSize;
+  nsIntRect mPictureRect;
+  RefPtr<ID3D11Texture2D> mTexture;
+  RefPtr<TextureClient> mTextureClient;
+  HANDLE mShareHandle;
+  gfx::SurfaceFormat mFormat;
+
+};
+
+} // namepace layers
+} // namespace mozilla
+
+#endif // GFX_D3DSURFACEIMAGE_H
--- a/gfx/layers/ImageContainer.cpp
+++ b/gfx/layers/ImageContainer.cpp
@@ -31,16 +31,17 @@
 #include "MacIOSurfaceImage.h"
 #endif
 
 #ifdef XP_WIN
 #include "gfxD2DSurface.h"
 #include "gfxWindowsPlatform.h"
 #include <d3d10_1.h>
 #include "D3D9SurfaceImage.h"
+#include "D3D11ShareHandleImage.h"
 #endif
 
 namespace mozilla {
 namespace layers {
 
 using namespace mozilla::ipc;
 using namespace android;
 using namespace mozilla::gfx;
@@ -89,16 +90,20 @@ ImageFactory::CreateImage(ImageFormat aF
   }
 #ifdef XP_MACOSX
   if (aFormat == ImageFormat::MAC_IOSURFACE) {
     img = new MacIOSurfaceImage();
     return img.forget();
   }
 #endif
 #ifdef XP_WIN
+  if (aFormat == ImageFormat::D3D11_SHARE_HANDLE_TEXTURE) {
+    img = new D3D11ShareHandleImage();
+    return img.forget();
+  }
   if (aFormat == ImageFormat::D3D9_RGB32_TEXTURE) {
     img = new D3D9SurfaceImage();
     return img.forget();
   }
 #endif
   return nullptr;
 }
 
--- a/gfx/layers/ImageTypes.h
+++ b/gfx/layers/ImageTypes.h
@@ -76,17 +76,22 @@ enum class ImageFormat {
    * IDirect3DTexture9 in RGB32 layout.
    */
   D3D9_RGB32_TEXTURE,
 
   /**
    * An Image type carries an opaque handle once for each stream.
    * The opaque handle would be a platform specific identifier.
    */
-  OVERLAY_IMAGE
+  OVERLAY_IMAGE,
+
+  /**
+   * A share handle to a ID3D11Texture2D.
+   */
+  D3D11_SHARE_HANDLE_TEXTURE
 };
 
 enum class StereoMode {
   MONO,
   LEFT_RIGHT,
   RIGHT_LEFT,
   BOTTOM_TOP,
   TOP_BOTTOM
--- a/gfx/layers/apz/src/APZCTreeManager.cpp
+++ b/gfx/layers/apz/src/APZCTreeManager.cpp
@@ -875,39 +875,47 @@ APZCTreeManager::ProcessEvent(WidgetInpu
 }
 
 nsEventStatus
 APZCTreeManager::ProcessWheelEvent(WidgetWheelEvent& aEvent,
                                    ScrollableLayerGuid* aOutTargetGuid,
                                    uint64_t* aOutInputBlockId)
 {
   ScrollWheelInput::ScrollMode scrollMode = ScrollWheelInput::SCROLLMODE_INSTANT;
-  if (gfxPrefs::SmoothScrollEnabled() && gfxPrefs::WheelSmoothScrollEnabled()) {
+  if (aEvent.deltaMode == nsIDOMWheelEvent::DOM_DELTA_LINE &&
+      gfxPrefs::SmoothScrollEnabled() && gfxPrefs::WheelSmoothScrollEnabled()) {
     scrollMode = ScrollWheelInput::SCROLLMODE_SMOOTH;
   }
 
   ScreenPoint origin(aEvent.refPoint.x, aEvent.refPoint.y);
   ScrollWheelInput input(aEvent.time, aEvent.timeStamp, 0,
                          scrollMode,
-                         ScrollWheelInput::SCROLLDELTA_LINE,
+                         ScrollWheelInput::DeltaTypeForDeltaMode(aEvent.deltaMode),
                          origin,
                          aEvent.deltaX,
                          aEvent.deltaY);
 
   nsEventStatus status = ReceiveInputEvent(input, aOutTargetGuid, aOutInputBlockId);
   aEvent.refPoint.x = input.mOrigin.x;
   aEvent.refPoint.y = input.mOrigin.y;
   return status;
 }
 
 bool
 APZCTreeManager::WillHandleWheelEvent(WidgetWheelEvent* aEvent)
 {
+  // Only support pixel units on OS X for now because it causes more test
+  // failures when APZ is turned on, and we want to do that on Windows very
+  // soon.
   return EventStateManager::WheelEventIsScrollAction(aEvent) &&
-         aEvent->deltaMode == nsIDOMWheelEvent::DOM_DELTA_LINE &&
+         (aEvent->deltaMode == nsIDOMWheelEvent::DOM_DELTA_LINE
+#ifdef XP_MACOSX
+            || aEvent->deltaMode == nsIDOMWheelEvent::DOM_DELTA_PIXEL
+#endif
+           ) &&
          !EventStateManager::WheelEventNeedsDeltaMultipliers(aEvent);
 }
 
 nsEventStatus
 APZCTreeManager::ReceiveInputEvent(WidgetInputEvent& aEvent,
                                    ScrollableLayerGuid* aOutTargetGuid,
                                    uint64_t* aOutInputBlockId)
 {
--- a/gfx/layers/apz/src/AsyncPanZoomController.cpp
+++ b/gfx/layers/apz/src/AsyncPanZoomController.cpp
@@ -1419,16 +1419,20 @@ AsyncPanZoomController::GetScrollWheelDe
   LayoutDevicePoint delta(aEvent.mDeltaX, aEvent.mDeltaY);
   switch (aEvent.mDeltaType) {
     case ScrollWheelInput::SCROLLDELTA_LINE: {
       LayoutDeviceIntSize scrollAmount = mFrameMetrics.GetLineScrollAmount();
       delta.x *= scrollAmount.width;
       delta.y *= scrollAmount.height;
       break;
     }
+    case ScrollWheelInput::SCROLLDELTA_PIXEL: {
+      // aOutDeltaX is already in CSS pixels.
+      break;
+    }
     default:
       MOZ_ASSERT_UNREACHABLE("unexpected scroll delta type");
   }
 
   if (gfxPrefs::MouseWheelHasRootScrollDeltaOverride()) {
     // Only apply delta multipliers if we're increasing the delta.
     double hfactor = double(gfxPrefs::MouseWheelRootHScrollDeltaFactor()) / 100;
     double vfactor = double(gfxPrefs::MouseWheelRootVScrollDeltaFactor()) / 100;
--- a/gfx/layers/apz/src/Axis.cpp
+++ b/gfx/layers/apz/src/Axis.cpp
@@ -258,17 +258,19 @@ void Axis::StepOverscrollAnimation(doubl
   mVelocity *= pow(double(1 - kSpringFriction), aStepDurationMilliseconds);
   AXIS_LOG("%p|%s sampled overscroll animation, leaving velocity at %f\n",
     mAsyncPanZoomController, Name(), mVelocity);
 
   // At the peak of each oscillation, record new offset and scaling factors for
   // overscroll, to ensure that GetOverscroll always returns a value of the
   // same sign, and that this value is correctly adjusted as the spring is
   // dampened.
-  bool velocitySignChange = (oldVelocity * mVelocity) < 0;
+  // To handle the case where one of the velocity samples is exaclty zero,
+  // consider a sign change to have occurred when the outgoing velocity is zero.
+  bool velocitySignChange = (oldVelocity * mVelocity) < 0 || mVelocity == 0;
   if (mFirstOverscrollAnimationSample == 0.0f) {
     mFirstOverscrollAnimationSample = mOverscroll;
 
     // It's possible to start sampling overscroll with velocity == 0, or
     // velocity in the opposite direction of overscroll, so make sure we
     // correctly record the peak in this case.
     if (mOverscroll != 0 && ((mOverscroll > 0 ? oldVelocity : -oldVelocity) <= 0.0f)) {
       velocitySignChange = true;
--- a/gfx/layers/apz/util/APZCCallbackHelper.cpp
+++ b/gfx/layers/apz/util/APZCCallbackHelper.cpp
@@ -176,25 +176,37 @@ SetDisplayPortMargins(nsIDOMWindowUtils*
   nsRect base(0, 0,
               baseCSS.width * nsPresContext::AppUnitsPerCSSPixel(),
               baseCSS.height * nsPresContext::AppUnitsPerCSSPixel());
   nsLayoutUtils::SetDisplayPortBaseIfNotSet(aContent, base);
 }
 
 void
 APZCCallbackHelper::UpdateRootFrame(nsIDOMWindowUtils* aUtils,
+                                    nsIPresShell* aPresShell,
                                     FrameMetrics& aMetrics)
 {
   // Precondition checks
   MOZ_ASSERT(aUtils);
+  MOZ_ASSERT(aPresShell);
   MOZ_ASSERT(aMetrics.GetUseDisplayPortMargins());
   if (aMetrics.GetScrollId() == FrameMetrics::NULL_SCROLL_ID) {
     return;
   }
 
+  float presShellResolution = nsLayoutUtils::GetResolution(aPresShell);
+
+  // If the pres shell resolution has changed on the content side side
+  // the time this repaint request was fired, consider this request out of date
+  // and drop it; setting a zoom based on the out-of-date resolution can have
+  // the effect of getting us stuck with the stale resolution.
+  if (presShellResolution != aMetrics.GetPresShellResolution()) {
+    return;
+  }
+
   // Set the scroll port size, which determines the scroll range. For example if
   // a 500-pixel document is shown in a 100-pixel frame, the scroll port length would
   // be 100, and gecko would limit the maximum scroll offset to 400 (so as to prevent
   // overscroll). Note that if the content here was zoomed to 2x, the document would
   // be 1000 pixels long but the frame would still be 100 pixels, and so the maximum
   // scroll range would be 900. Therefore this calculation depends on the zoom applied
   // to the content relative to the container.
   // Note that this needs to happen before scrolling the frame (in UpdateFrameCommon),
@@ -202,18 +214,18 @@ APZCCallbackHelper::UpdateRootFrame(nsID
   CSSSize scrollPort = aMetrics.CalculateCompositedSizeInCssPixels();
   aUtils->SetScrollPositionClampingScrollPortSize(scrollPort.width, scrollPort.height);
 
   nsIContent* content = nsLayoutUtils::FindContentFor(aMetrics.GetScrollId());
   ScrollFrame(content, aMetrics);
 
   // The pres shell resolution is updated by the the async zoom since the
   // last paint.
-  float presShellResolution = aMetrics.GetPresShellResolution()
-                            * aMetrics.GetAsyncZoom().scale;
+  presShellResolution = aMetrics.GetPresShellResolution()
+                      * aMetrics.GetAsyncZoom().scale;
   aUtils->SetResolutionAndScaleTo(presShellResolution);
 
   SetDisplayPortMargins(aUtils, content, aMetrics);
 }
 
 void
 APZCCallbackHelper::UpdateSubFrame(nsIContent* aContent,
                                    FrameMetrics& aMetrics)
--- a/gfx/layers/apz/util/APZCCallbackHelper.h
+++ b/gfx/layers/apz/util/APZCCallbackHelper.h
@@ -61,16 +61,17 @@ public:
 
     /* Applies the scroll and zoom parameters from the given FrameMetrics object to
        the root frame corresponding to the given DOMWindowUtils. If tiled thebes
        layers are enabled, this will align the displayport to tile boundaries.
        Setting the scroll position can cause some small adjustments to be made
        to the actual scroll position. aMetrics' display port and scroll position
        will be updated with any modifications made. */
     static void UpdateRootFrame(nsIDOMWindowUtils* aUtils,
+                                nsIPresShell* aPresShell,
                                 FrameMetrics& aMetrics);
 
     /* Applies the scroll parameters from the given FrameMetrics object to the subframe
        corresponding to the given content object. If tiled thebes
        layers are enabled, this will align the displayport to tile boundaries.
        Setting the scroll position can cause some small adjustments to be made
        to the actual scroll position. aMetrics' display port and scroll position
        will be updated with any modifications made. */
--- a/gfx/layers/client/ClientTiledPaintedLayer.cpp
+++ b/gfx/layers/client/ClientTiledPaintedLayer.cpp
@@ -421,17 +421,17 @@ ClientTiledPaintedLayer::RenderLayer()
   // This is handled by PadDrawTargetOutFromRegion in TiledContentClient for mobile
   if (MayResample()) {
     // If we're resampling then bilinear filtering can read up to 1 pixel
     // outside of our texture coords. Make the visible region a single rect,
     // and pad it out by 1 pixel (restricted to tile boundaries) so that
     // we always have valid content or transparent pixels to sample from.
     nsIntRect bounds = neededRegion.GetBounds();
     nsIntRect wholeTiles = bounds;
-    wholeTiles.Inflate(nsIntSize(
+    wholeTiles.InflateToMultiple(nsIntSize(
       gfxPlatform::GetPlatform()->GetTileWidth(),
       gfxPlatform::GetPlatform()->GetTileHeight()));
     nsIntRect padded = bounds;
     padded.Inflate(1);
     padded.IntersectRect(padded, wholeTiles);
     neededRegion = padded;
   }
 #endif
--- a/gfx/layers/composite/LayerManagerComposite.cpp
+++ b/gfx/layers/composite/LayerManagerComposite.cpp
@@ -647,35 +647,37 @@ LayerManagerComposite::Render()
     auto packet = MakeUnique<layerscope::Packet>();
     layerscope::LayersPacket* layersPacket = packet->mutable_layers();
     this->Dump(layersPacket);
     LayerScope::SendLayerDump(Move(packet));
   }
 
   /** Our more efficient but less powerful alter ego, if one is available. */
   nsRefPtr<Composer2D> composer2D;
-  composer2D = mCompositor->GetWidget()->GetComposer2D();
 
-  // We can't use composert2D if we have layer effects
-  if (!mTarget && !haveLayerEffects &&
-      composer2D && composer2D->HasHwc() && composer2D->TryRenderWithHwc(mRoot, mGeometryChanged))
-  {
+  // We can't use composert2D if we have layer effects, so only get it
+  // when we don't have any effects.
+  if (!haveLayerEffects) {
+    composer2D = mCompositor->GetWidget()->GetComposer2D();
+  }
+
+  if (!mTarget && composer2D && composer2D->TryRender(mRoot, mGeometryChanged)) {
     LayerScope::SetHWComposed();
     if (mFPS) {
       double fps = mFPS->mCompositionFps.AddFrameAndGetFps(TimeStamp::Now());
       if (gfxPrefs::LayersDrawFPS()) {
         printf_stderr("HWComposer: FPS is %g\n", fps);
       }
     }
     mCompositor->EndFrameForExternalComposition(Matrix());
     // Reset the invalid region as compositing is done
     mInvalidRegion.SetEmpty();
     mLastFrameMissedHWC = false;
     return;
-  } else if (!mTarget && !haveLayerEffects) {
+  } else if (!mTarget) {
     mLastFrameMissedHWC = !!composer2D;
   }
 
   {
     PROFILER_LABEL("LayerManagerComposite", "PreRender",
       js::ProfileEntry::Category::GRAPHICS);
 
     if (!mCompositor->GetWidget()->PreRender(this)) {
@@ -756,20 +758,16 @@ LayerManagerComposite::Render()
   {
     PROFILER_LABEL("LayerManagerComposite", "EndFrame",
       js::ProfileEntry::Category::GRAPHICS);
 
     mCompositor->EndFrame();
     mCompositor->SetFBAcquireFence(mRoot);
   }
 
-  if (composer2D) {
-    composer2D->Render();
-  }
-
   mCompositor->GetWidget()->PostRender(this);
 
   RecordFrame();
 }
 
 static void
 SubtractTransformedRegion(nsIntRegion& aRegion,
                           const nsIntRegion& aRegionToSubtract,
--- a/gfx/layers/d3d11/TextureD3D11.h
+++ b/gfx/layers/d3d11/TextureD3D11.h
@@ -29,16 +29,22 @@ class TextureClientD3D11 : public Textur
 {
 public:
   TextureClientD3D11(ISurfaceAllocator* aAllocator,
                      gfx::SurfaceFormat aFormat,
                      TextureFlags aFlags);
 
   virtual ~TextureClientD3D11();
 
+  void InitWith(ID3D11Texture2D* aTexture, const gfx::IntSize& aSize)
+  {
+    mTexture = aTexture;
+    mSize = aSize;
+  }
+
   // TextureClient
 
   virtual bool IsAllocated() const override { return mTexture || mTexture10; }
 
   virtual bool Lock(OpenMode aOpenMode) override;
 
   virtual void Unlock() override;
 
--- a/gfx/layers/moz.build
+++ b/gfx/layers/moz.build
@@ -41,16 +41,19 @@ EXPORTS += [
     'opengl/OGLShaderProgram.h',
     'opengl/TexturePoolOGL.h',
     'protobuf/LayerScopePacket.pb.h',
     'ReadbackLayer.h',
     'TiledLayerBuffer.h',
 ]
 
 if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+    SOURCES += [
+	    'D3D11ShareHandleImage.cpp',
+    ]
     UNIFIED_SOURCES += [
         'D3D9SurfaceImage.cpp',
         'IMFYCbCrImage.cpp',
         'TextureDIB.cpp',
     ]
     EXPORTS.mozilla.layers += [
         'TextureDIB.h',
     ]
@@ -123,16 +126,17 @@ EXPORTS.mozilla.layers += [
     'composite/ContentHost.h',
     'composite/ImageHost.h',
     'composite/ImageLayerComposite.h',
     'composite/LayerManagerComposite.h',
     'composite/PaintedLayerComposite.h',
     'composite/TextureHost.h',
     'Compositor.h',
     'CompositorTypes.h',
+    'D3D11ShareHandleImage.h',
     'D3D9SurfaceImage.h',
     'Effects.h',
     'ImageDataSerializer.h',
     'ipc/AsyncTransactionTracker.h',
     'ipc/CompositableForwarder.h',
     'ipc/CompositableTransactionParent.h',
     'ipc/CompositorChild.h',
     'ipc/CompositorParent.h',
--- a/gfx/layers/opengl/Composer2D.h
+++ b/gfx/layers/opengl/Composer2D.h
@@ -47,27 +47,15 @@ public:
   /**
    * Return true if |aRoot| met the implementation's criteria for fast
    * composition and the render was successful.  Return false to fall
    * back on the GPU.
    *
    * Currently, when TryRender() returns true, the entire framebuffer
    * must have been rendered.
    */
-  virtual bool TryRenderWithHwc(Layer* aRoot, bool aGeometryChanged) = 0;
-
-  /**
-   * Return true if Composer2D does composition. Return false if Composer2D
-   * failed the composition.
-   */
-  virtual bool Render() = 0;
-
-  /**
-   * Return true if Composer2D has a fast composition hardware.
-   * Return false if Composer2D does not have a fast composition hardware.
-   */
-  virtual bool HasHwc() = 0;
+  virtual bool TryRender(Layer* aRoot, bool aGeometryChanged) = 0;
 };
 
 } // namespace layers
 } // namespace mozilla
 
 #endif // mozilla_layers_Composer2D_h
--- a/gfx/layers/opengl/CompositorOGL.cpp
+++ b/gfx/layers/opengl/CompositorOGL.cpp
@@ -126,29 +126,23 @@ CompositorOGL::CreateContext()
     caps.bpp16 = gfxPlatform::GetPlatform()->GetOffscreenFormat() == gfxImageFormat::RGB16_565;
 
     bool requireCompatProfile = true;
     context = GLContextProvider::CreateOffscreen(gfxIntSize(mSurfaceSize.width,
                                                             mSurfaceSize.height),
                                                  caps, requireCompatProfile);
   }
 
-  if (!context) {
+  if (!context)
     context = gl::GLContextProvider::CreateForWindow(mWidget);
-  }
 
   if (!context) {
     NS_WARNING("Failed to create CompositorOGL context");
   }
 
-#ifdef MOZ_WIDGET_GONK
-  mWidget->SetNativeData(NS_NATIVE_OPENGL_CONTEXT,
-                         reinterpret_cast<uintptr_t>(context.get()));
-#endif
-
   return context.forget();
 }
 
 void
 CompositorOGL::Destroy()
 {
   if (mTexturePool) {
     mTexturePool->Clear();
--- a/gfx/src/nsRect.h
+++ b/gfx/src/nsRect.h
@@ -9,16 +9,17 @@
 
 #include <stdio.h>                      // for FILE
 #include <stdint.h>                     // for int32_t, int64_t
 #include <algorithm>                    // for min/max
 #include "nsDebug.h"                    // for NS_WARNING
 #include "gfxCore.h"                    // for NS_GFX
 #include "mozilla/Likely.h"             // for MOZ_UNLIKELY
 #include "mozilla/gfx/BaseRect.h"       // for BaseRect
+#include "mozilla/gfx/NumericTools.h"   // for RoundUpToMultiple, RoundDownToMultiple
 #include "nsCoord.h"                    // for nscoord, etc
 #include "nsISupportsImpl.h"            // for MOZ_COUNT_CTOR, etc
 #include "nsPoint.h"                    // for nsIntPoint, nsPoint
 #include "nsSize.h"                     // for nsIntSize, nsSize
 #include "nscore.h"                     // for NS_BUILD_REFCNT_LOGGING
 
 struct nsIntRect;
 struct nsMargin;
@@ -202,16 +203,30 @@ struct NS_GFX nsIntRect :
 
   // Returns a special nsIntRect that's used in some places to signify
   // "all available space".
   static const nsIntRect& GetMaxSizedIntRect() {
     static const nsIntRect r(0, 0, INT32_MAX, INT32_MAX);
     return r;
   }
 
+  void InflateToMultiple(const nsIntSize& aTileSize)
+  {
+    int32_t xMost = XMost();
+    int32_t yMost = YMost();
+
+    x = RoundDownToMultiple(x, aTileSize.width);
+    y = RoundDownToMultiple(y, aTileSize.height);
+    xMost = RoundUpToMultiple(xMost, aTileSize.width);
+    yMost = RoundUpToMultiple(yMost, aTileSize.height);
+
+    width = xMost - x;
+    height = yMost - y;
+  }
+
   // This is here only to keep IPDL-generated code happy. DO NOT USE.
   bool operator==(const nsIntRect& aRect) const
   {
     return IsEqualEdges(aRect);
   }
 };
 
 /*
--- a/gfx/tests/gtest/TestAsyncPanZoomController.cpp
+++ b/gfx/tests/gtest/TestAsyncPanZoomController.cpp
@@ -270,16 +270,18 @@ protected:
     apzc->UpdateZoomConstraints(ZoomConstraints(true, true, CSSToParentLayerScale(0.25f), CSSToParentLayerScale(4.0f)));
   }
 
   void MakeApzcUnzoomable()
   {
     apzc->UpdateZoomConstraints(ZoomConstraints(false, false, CSSToParentLayerScale(1.0f), CSSToParentLayerScale(1.0f)));
   }
 
+  void TestOverscroll();
+
   AsyncPanZoomController::GestureBehavior mGestureBehavior;
   TimeStamp testStartTime;
   nsRefPtr<MockContentControllerDelayed> mcc;
   nsRefPtr<TestAPZCTreeManager> tm;
   nsRefPtr<TestAsyncPanZoomController> apzc;
 };
 
 class APZCGestureDetectorTester : public APZCBasicTester {
@@ -1128,45 +1130,76 @@ TEST_F(APZCBasicTester, PanningTransform
   check.Call("Simple pan");
   ApzcPanNoFling(apzc, time, 50, 25);
   check.Call("Complex pan");
   Pan(apzc, time, 25, 45);
   apzc->AdvanceAnimationsUntilEnd(testStartTime);
   check.Call("Done");
 }
 
-TEST_F(APZCBasicTester, OverScrollPanning) {
-  SCOPED_GFX_PREF(APZOverscrollEnabled, bool, true);
-
+void APZCBasicTester::TestOverscroll()
+{
   // Pan sufficiently to hit overscroll behavior
   int time = 0;
   int touchStart = 500;
   int touchEnd = 10;
   Pan(apzc, time, touchStart, touchEnd);
   EXPECT_TRUE(apzc->IsOverscrolled());
 
   // Check that we recover from overscroll via an animation.
   const TimeDuration increment = TimeDuration::FromMilliseconds(1);
   bool recoveredFromOverscroll = false;
   ParentLayerPoint pointOut;
   ViewTransform viewTransformOut;
   while (apzc->SampleContentTransformForFrame(testStartTime, &viewTransformOut, pointOut)) {
     // The reported scroll offset should be the same throughout.
     EXPECT_EQ(ParentLayerPoint(0, 90), pointOut);
 
+    // Trigger computation of the overscroll tranform, to make sure
+    // no assetions fire during the calculation.
+    apzc->GetOverscrollTransform();
+
     if (!apzc->IsOverscrolled()) {
       recoveredFromOverscroll = true;
     }
 
     testStartTime += increment;
   }
   EXPECT_TRUE(recoveredFromOverscroll);
   apzc->AssertStateIsReset();
 }
 
+
+TEST_F(APZCBasicTester, OverScrollPanning) {
+  SCOPED_GFX_PREF(APZOverscrollEnabled, bool, true);
+
+  TestOverscroll();
+}
+
+// Tests that an overscroll animation doesn't trigger an assertion failure
+// in the case where a sample has a velocity of zero.
+TEST_F(APZCBasicTester, OverScroll_Bug1152051) {
+  SCOPED_GFX_PREF(APZOverscrollEnabled, bool, true);
+
+  // Doctor the prefs to make the velocity zero at the end of the first sample.
+
+  // This ensures our incoming velocity to the overscroll animation is
+  // a round(ish) number, 4.9 (that being the distance of the pan before
+  // overscroll, which is 500 - 10 = 490 pixels, divided by the duration of
+  // the pan, which is 100 ms).
+  SCOPED_GFX_PREF(APZFlingFriction, float, 0);
+
+  // To ensure the velocity after the first sample is 0, set the spring
+  // stiffness to the incoming velocity (4.9) divided by the overscroll
+  // (400 pixels) times the step duration (1 ms).
+  SCOPED_GFX_PREF(APZOverscrollSpringStiffness, float, 0.01225f);
+
+  TestOverscroll();
+}
+
 TEST_F(APZCBasicTester, OverScrollAbort) {
   SCOPED_GFX_PREF(APZOverscrollEnabled, bool, true);
 
   // Pan sufficiently to hit overscroll behavior
   int time = 0;
   int touchStart = 500;
   int touchEnd = 10;
   Pan(apzc, time, touchStart, touchEnd);
--- a/gfx/thebes/gfxPrefs.h
+++ b/gfx/thebes/gfxPrefs.h
@@ -263,16 +263,17 @@ private:
   DECL_GFX_PREF(Live, "image.single-color-optimization.enabled", ImageSingleColorOptimizationEnabled, bool, true);
 
   DECL_GFX_PREF(Once, "layers.acceleration.disabled",          LayersAccelerationDisabled, bool, false);
   DECL_GFX_PREF(Live, "layers.acceleration.draw-fps",          LayersDrawFPS, bool, false);
   DECL_GFX_PREF(Live, "layers.acceleration.draw-fps.print-histogram",  FPSPrintHistogram, bool, false);
   DECL_GFX_PREF(Live, "layers.acceleration.draw-fps.write-to-file", WriteFPSToFile, bool, false);
   DECL_GFX_PREF(Once, "layers.acceleration.force-enabled",     LayersAccelerationForceEnabled, bool, false);
   DECL_GFX_PREF(Once, "layers.async-pan-zoom.enabled",         AsyncPanZoomEnabled, bool, false);
+  DECL_GFX_PREF(Once, "layers.async-pan-zoom.separate-event-thread", AsyncPanZoomSeparateEventThread, bool, false);
   DECL_GFX_PREF(Once, "layers.async-video.enabled",            AsyncVideoEnabled, bool, true);
   DECL_GFX_PREF(Once, "layers.async-video-oop.enabled",        AsyncVideoOOPEnabled, bool, true);
   DECL_GFX_PREF(Live, "layers.bench.enabled",                  LayersBenchEnabled, bool, false);
   DECL_GFX_PREF(Once, "layers.bufferrotation.enabled",         BufferRotationEnabled, bool, true);
 #ifdef MOZ_GFX_OPTIMIZE_MOBILE
   // If MOZ_GFX_OPTIMIZE_MOBILE is defined, we force component alpha off
   // and ignore the preference.
   DECL_GFX_PREF(Skip, "layers.componentalpha.enabled",         ComponentAlphaEnabled, bool, false);
--- a/gfx/thebes/gfxWindowsPlatform.cpp
+++ b/gfx/thebes/gfxWindowsPlatform.cpp
@@ -2022,16 +2022,68 @@ gfxWindowsPlatform::InitD3D11Devices()
     mCanInitMediaDevice = true;
   }
 
   // We leak these everywhere and we need them our entire runtime anyway, let's
   // leak it here as well.
   d3d11Module.disown();
 }
 
+TemporaryRef<ID3D11Device>
+gfxWindowsPlatform::CreateD3D11DecoderDevice()
+{
+  nsModuleHandle d3d11Module(LoadLibrarySystem32(L"d3d11.dll"));
+  decltype(D3D11CreateDevice)* d3d11CreateDevice = (decltype(D3D11CreateDevice)*)
+    GetProcAddress(d3d11Module, "D3D11CreateDevice");
+
+   if (!d3d11CreateDevice) {
+    // We should just be on Windows Vista or XP in this case.
+    return nullptr;
+  }
+
+  nsTArray<D3D_FEATURE_LEVEL> featureLevels;
+  if (IsWin8OrLater()) {
+    featureLevels.AppendElement(D3D_FEATURE_LEVEL_11_1);
+  }
+  featureLevels.AppendElement(D3D_FEATURE_LEVEL_11_0);
+  featureLevels.AppendElement(D3D_FEATURE_LEVEL_10_1);
+  featureLevels.AppendElement(D3D_FEATURE_LEVEL_10_0);
+  featureLevels.AppendElement(D3D_FEATURE_LEVEL_9_3);
+
+  RefPtr<IDXGIAdapter1> adapter = GetDXGIAdapter();
+
+  if (!adapter) {
+    return nullptr;
+  }
+
+  HRESULT hr = E_INVALIDARG;
+
+  RefPtr<ID3D11Device> device;
+
+  MOZ_SEH_TRY{
+    hr = d3d11CreateDevice(adapter, D3D_DRIVER_TYPE_UNKNOWN, nullptr,
+                           D3D11_CREATE_DEVICE_VIDEO_SUPPORT,
+                           featureLevels.Elements(), featureLevels.Length(),
+                           D3D11_SDK_VERSION, byRef(device), nullptr, nullptr);
+  } MOZ_SEH_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
+    return nullptr;
+  }
+
+  if (FAILED(hr) || !DoesD3D11DeviceWork(device)) {
+    return nullptr;
+  }
+
+  nsRefPtr<ID3D10Multithread> multi;
+  device->QueryInterface(__uuidof(ID3D10Multithread), getter_AddRefs(multi));
+
+  multi->SetMultithreadProtected(TRUE);
+
+  return device;
+}
+
 static bool
 DwmCompositionEnabled()
 {
   MOZ_ASSERT(WinUtils::dwmIsCompositionEnabledPtr);
   BOOL dwmEnabled = false;
   WinUtils::dwmIsCompositionEnabledPtr(&dwmEnabled);
   return dwmEnabled;
 }
--- a/gfx/thebes/gfxWindowsPlatform.h
+++ b/gfx/thebes/gfxWindowsPlatform.h
@@ -243,16 +243,19 @@ public:
 #ifdef CAIRO_HAS_D2D_SURFACE
     cairo_device_t *GetD2DDevice() { return mD2DDevice; }
     ID3D10Device1 *GetD3D10Device() { return mD2DDevice ? cairo_d2d_device_get_device(mD2DDevice) : nullptr; }
 #endif
     ID3D11Device *GetD3D11Device();
     ID3D11Device *GetD3D11ContentDevice();
     ID3D11Device *GetD3D11MediaDevice();
 
+    // Create a D3D11 device to be used for DXVA decoding.
+    mozilla::TemporaryRef<ID3D11Device> CreateD3D11DecoderDevice();
+
     mozilla::layers::ReadbackManagerD3D11* GetReadbackManager();
 
     static bool IsOptimus();
 
     bool IsWARP() { return mIsWARP; }
 
     bool SupportsApzWheelInput() override {
       return true;
--- a/ipc/glue/MessageChannel.cpp
+++ b/ipc/glue/MessageChannel.cpp
@@ -308,16 +308,17 @@ MessageChannel::MessageChannel(MessageLi
     mAwaitingSyncReply(false),
     mAwaitingSyncReplyPriority(0),
     mDispatchingSyncMessage(false),
     mDispatchingSyncMessagePriority(0),
     mDispatchingAsyncMessage(false),
     mDispatchingAsyncMessagePriority(0),
     mCurrentTransaction(0),
     mTimedOutMessageSeqno(0),
+    mTimedOutMessagePriority(0),
     mRecvdErrors(0),
     mRemoteStackDepthGuess(false),
     mSawInterruptOutMsg(false),
     mIsWaitingForIncoming(false),
     mAbortOnError(false),
     mBlockScripts(false),
     mFlags(REQUIRE_DEFAULT),
     mPeerPidSet(false),
@@ -779,20 +780,21 @@ MessageChannel::Send(Message* aMsg, Mess
     if (!Connected()) {
         ReportConnectionError("MessageChannel::SendAndWait");
         return false;
     }
 
     msg->set_seqno(NextSeqno());
 
     int32_t seqno = msg->seqno();
+    int prio = msg->priority();
     DebugOnly<msgid_t> replyType = msg->type() + 1;
 
     AutoSetValue<bool> replies(mAwaitingSyncReply, true);
-    AutoSetValue<int> prio(mAwaitingSyncReplyPriority, msg->priority());
+    AutoSetValue<int> prioSet(mAwaitingSyncReplyPriority, prio);
     AutoEnterTransaction transact(this, seqno);
 
     int32_t transaction = mCurrentTransaction;
     msg->set_transaction_id(transaction);
 
     ProcessPendingRequests();
 
     mLink->SendMessage(msg.forget());
@@ -831,16 +833,17 @@ MessageChannel::Send(Message* aMsg, Mess
                 mRecvdErrors--;
                 return false;
             }
             if (mRecvd) {
                 break;
             }
 
             mTimedOutMessageSeqno = seqno;
+            mTimedOutMessagePriority = prio;
             return false;
         }
     }
 
     MOZ_ASSERT(mRecvd);
     MOZ_ASSERT(mRecvd->is_reply(), "expected reply");
     MOZ_ASSERT(!mRecvd->is_reply_error());
     MOZ_ASSERT(mRecvd->type() == replyType, "wrong reply type");
@@ -1184,22 +1187,30 @@ MessageChannel::DispatchSyncMessage(cons
                "priority inversion while dispatching sync message");
     IPC_ASSERT(prio >= mAwaitingSyncReplyPriority,
                "dispatching a message of lower priority while waiting for a response");
 
     bool dummy;
     bool& blockingVar = ShouldBlockScripts() ? gParentIsBlocked : dummy;
 
     Result rv;
-    if (mTimedOutMessageSeqno) {
+    if (mTimedOutMessageSeqno && mTimedOutMessagePriority >= prio) {
         // If the other side sends a message in response to one of our messages
         // that we've timed out, then we reply with an error.
         //
-        // We even reject messages that were sent before the other side even got
-        // to our timed out message.
+        // We do this because want to avoid a situation where we process an
+        // incoming message from the child here while it simultaneously starts
+        // processing our timed-out CPOW. It's very bad for both sides to
+        // be processing sync messages concurrently.
+        //
+        // The only exception is if the incoming message has urgent priority and
+        // our timed-out message had only high priority. In that case it's safe
+        // to process the incoming message because we know that the child won't
+        // process anything (the child will defer incoming messages when waiting
+        // for a response to its urgent message).
         rv = MsgNotAllowed;
     } else {
         AutoSetValue<bool> blocked(blockingVar, true);
         AutoSetValue<bool> sync(mDispatchingSyncMessage, true);
         AutoSetValue<int> prioSet(mDispatchingSyncMessagePriority, prio);
         rv = mListener->OnMessageReceived(aMsg, *getter_Transfers(reply));
     }
 
--- a/ipc/glue/MessageChannel.h
+++ b/ipc/glue/MessageChannel.h
@@ -587,16 +587,17 @@ class MessageChannel : HasResultCodes
     // side is responsible for replying to all sync messages sent by the other
     // side when it dispatches the timed out message. The response is always an
     // error.
     //
     // A message is only timed out if it initiated a transaction. This avoids
     // hitting a lot of corner cases with message nesting that we don't really
     // care about.
     int32_t mTimedOutMessageSeqno;
+    int mTimedOutMessagePriority;
 
     // If waiting for the reply to a sync out-message, it will be saved here
     // on the I/O thread and then read and cleared by the worker thread.
     nsAutoPtr<Message> mRecvd;
 
     // If a sync message reply that is an error arrives, we increment this
     // counter rather than storing it in mRecvd.
     size_t mRecvdErrors;
--- a/ipc/ipdl/test/cxx/PTestUrgentHangs.ipdl
+++ b/ipc/ipdl/test/cxx/PTestUrgentHangs.ipdl
@@ -1,24 +1,28 @@
 namespace mozilla {
 namespace _ipdltest {
 
-prio(normal upto high) sync protocol PTestUrgentHangs
+prio(normal upto urgent) sync protocol PTestUrgentHangs
 {
 parent:
     prio(high) sync Test1_2();
 
     prio(high) sync TestInner();
+    prio(urgent) sync TestInnerUrgent();
 
 child:
     prio(high) sync Test1_1();
     prio(high) sync Test1_3();
 
     prio(high) sync Test2();
 
     prio(high) sync Test3();
 
     async Test4();
     prio(high) sync Test4_1();
+
+    async Test5();
+    prio(high) sync Test5_1();
 };
 
 } // namespace _ipdltest
 } // namespace mozilla
--- a/ipc/ipdl/test/cxx/TestUrgentHangs.cpp
+++ b/ipc/ipdl/test/cxx/TestUrgentHangs.cpp
@@ -20,16 +20,18 @@ struct RunnableMethodTraits<mozilla::_ip
 
 namespace mozilla {
 namespace _ipdltest {
 
 //-----------------------------------------------------------------------------
 // parent
 
 TestUrgentHangsParent::TestUrgentHangsParent()
+  : mInnerCount(0),
+    mInnerUrgentCount(0)
 {
     MOZ_COUNT_CTOR(TestUrgentHangsParent);
 }
 
 TestUrgentHangsParent::~TestUrgentHangsParent()
 {
     MOZ_COUNT_DTOR(TestUrgentHangsParent);
 }
@@ -50,34 +52,57 @@ TestUrgentHangsParent::Main()
 
     // Also fails since we haven't gotten a response for Test2 yet.
     if (SendTest3())
         fail("sending Test3");
 
     // Do a second round of testing once the reply to Test2 comes back.
     MessageLoop::current()->PostDelayedTask(
         FROM_HERE,
-        NewRunnableMethod(this, &TestUrgentHangsParent::FinishTesting),
+        NewRunnableMethod(this, &TestUrgentHangsParent::SecondStage),
         3000);
 }
 
 void
-TestUrgentHangsParent::FinishTesting()
+TestUrgentHangsParent::SecondStage()
 {
     // Send an async message that waits 2 seconds and then sends a sync message
     // (which should be processed).
     if (!SendTest4())
         fail("sending Test4");
 
     // Send a sync message that will time out because the child is waiting
     // inside RecvTest4.
     if (SendTest4_1())
         fail("sending Test4_1");
 
-    // Close the channel after the child finishes its work in RecvTest4.
+    MessageLoop::current()->PostDelayedTask(
+        FROM_HERE,
+        NewRunnableMethod(this, &TestUrgentHangsParent::ThirdStage),
+        3000);
+}
+
+void
+TestUrgentHangsParent::ThirdStage()
+{
+    // The third stage does the same thing as the second stage except that the
+    // child sends an urgent message to us. In this case, we actually answer
+    // that message unconditionally.
+
+    // Send an async message that waits 2 seconds and then sends a sync message
+    // (which should be processed).
+    if (!SendTest5())
+        fail("sending Test5");
+
+    // Send a sync message that will time out because the child is waiting
+    // inside RecvTest5.
+    if (SendTest5_1())
+        fail("sending Test5_1");
+
+    // Close the channel after the child finishes its work in RecvTest5.
     MessageLoop::current()->PostDelayedTask(
         FROM_HERE,
         NewRunnableMethod(this, &TestUrgentHangsParent::Close),
         3000);
 }
 
 bool
 TestUrgentHangsParent::RecvTest1_2()
@@ -85,17 +110,24 @@ TestUrgentHangsParent::RecvTest1_2()
     if (!SendTest1_3())
         fail("sending Test1_3");
     return true;
 }
 
 bool
 TestUrgentHangsParent::RecvTestInner()
 {
-    fail("TestInner should never be dispatched");
+    mInnerCount++;
+    return true;
+}
+
+bool
+TestUrgentHangsParent::RecvTestInnerUrgent()
+{
+    mInnerUrgentCount++;
     return true;
 }
 
 //-----------------------------------------------------------------------------
 // child
 
 bool
 TestUrgentHangsChild::RecvTest1_1()
@@ -152,16 +184,40 @@ TestUrgentHangsChild::RecvTest4_1()
     // This should fail because Test4_1 timed out and hasn't gotten a response
     // yet.
     if (SendTestInner())
         fail("sending TestInner");
 
     return true;
 }
 
+bool
+TestUrgentHangsChild::RecvTest5()
+{
+    PR_Sleep(PR_SecondsToInterval(2));
+
+    // This message will actually be handled by the parent even though it's in
+    // the timeout state.
+    if (!SendTestInnerUrgent())
+        fail("sending TestInner");
+
+    return true;
+}
+
+bool
+TestUrgentHangsChild::RecvTest5_1()
+{
+    // This message will actually be handled by the parent even though it's in
+    // the timeout state.
+    if (!SendTestInnerUrgent())
+        fail("sending TestInner");
+
+    return true;
+}
+
 TestUrgentHangsChild::TestUrgentHangsChild()
 {
     MOZ_COUNT_CTOR(TestUrgentHangsChild);
 }
 
 TestUrgentHangsChild::~TestUrgentHangsChild()
 {
     MOZ_COUNT_DTOR(TestUrgentHangsChild);
--- a/ipc/ipdl/test/cxx/TestUrgentHangs.h
+++ b/ipc/ipdl/test/cxx/TestUrgentHangs.h
@@ -16,46 +16,59 @@ class TestUrgentHangsParent :
 public:
     TestUrgentHangsParent();
     virtual ~TestUrgentHangsParent();
 
     static bool RunTestInProcesses() { return true; }
     static bool RunTestInThreads() { return false; }
 
     void Main();
-    void FinishTesting();
+    void SecondStage();
+    void ThirdStage();
 
     bool RecvTest1_2();
     bool RecvTestInner();
+    bool RecvTestInnerUrgent();
 
     bool ShouldContinueFromReplyTimeout() override
     {
       return false;
     }
     virtual void ActorDestroy(ActorDestroyReason why) override
     {
+	if (mInnerCount != 1) {
+	    fail("wrong mInnerCount");
+	}
+	if (mInnerUrgentCount != 2) {
+	    fail("wrong mInnerUrgentCount");
+	}
         passed("ok");
         QuitParent();
     }
+
+private:
+    size_t mInnerCount, mInnerUrgentCount;
 };
 
 
 class TestUrgentHangsChild :
     public PTestUrgentHangsChild
 {
 public:
     TestUrgentHangsChild();
     virtual ~TestUrgentHangsChild();
 
     bool RecvTest1_1();
     bool RecvTest1_3();
     bool RecvTest2();
     bool RecvTest3();
     bool RecvTest4();
     bool RecvTest4_1();
+    bool RecvTest5();
+    bool RecvTest5_1();
 
     virtual void ActorDestroy(ActorDestroyReason why) override
     {
         QuitChild();
     }
 };
 
 
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -114,16 +114,24 @@ GetBuildConfiguration(JSContext* cx, uns
 #ifdef MOZ_ASAN
     value = BooleanValue(true);
 #else
     value = BooleanValue(false);
 #endif
     if (!JS_SetProperty(cx, info, "asan", value))
         return false;
 
+#ifdef MOZ_TSAN
+    value = BooleanValue(true);
+#else
+    value = BooleanValue(false);
+#endif
+    if (!JS_SetProperty(cx, info, "tsan", value))
+        return false;
+
 #ifdef JS_GC_ZEAL
     value = BooleanValue(true);
 #else
     value = BooleanValue(false);
 #endif
     if (!JS_SetProperty(cx, info, "has-gczeal", value))
         return false;
 
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -267,43 +267,16 @@ CheckMarkedThing<jsid>(JSTracer* trc, js
 #endif
 }
 
 #define JS_ROOT_MARKING_ASSERT(trc) \
     MOZ_ASSERT_IF(trc->isMarkingTracer(), \
                   trc->runtime()->gc.state() == NO_INCREMENTAL || \
                   trc->runtime()->gc.state() == MARK_ROOTS);
 
-/*
- * We only set the maybeAlive flag for objects and scripts. It's assumed that,
- * if a compartment is alive, then it will have at least some live object or
- * script it in. Even if we get this wrong, the worst that will happen is that
- * scheduledForDestruction will be set on the compartment, which will cause some
- * extra GC activity to try to free the compartment.
- */
-template<typename T>
-static inline void
-SetMaybeAliveFlag(T* thing)
-{
-}
-
-template<>
-void
-SetMaybeAliveFlag(JSObject* thing)
-{
-    thing->compartment()->maybeAlive = true;
-}
-
-template<>
-void
-SetMaybeAliveFlag(JSScript* thing)
-{
-    thing->compartment()->maybeAlive = true;
-}
-
 #define FOR_EACH_GC_LAYOUT(D) \
     D(Object, JSObject) \
     D(String, JSString) \
     D(Symbol, JS::Symbol) \
     D(Script, JSScript) \
     D(Shape, js::Shape) \
     D(BaseShape, js::BaseShape) \
     D(JitCode, js::jit::JitCode) \
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -570,16 +570,23 @@ js::gc::GCRuntime::bufferGrayRoots()
     if (grayBufferer.failed()) {
       grayBufferState = GrayBufferState::Failed;
       resetBufferedGrayRoots();
     } else {
       grayBufferState = GrayBufferState::Okay;
     }
 }
 
+struct SetMaybeAliveFunctor {
+    template <typename T>
+    void operator()(Cell* cell) {
+        SetMaybeAliveFlag<T>(static_cast<T*>(cell));
+    }
+};
+
 void
 BufferGrayRootsTracer::appendGrayRoot(Cell* thing, JSGCTraceKind kind)
 {
     MOZ_ASSERT(runtime()->isHeapBusy());
 
     if (bufferingGrayRootsFailed)
         return;
 
@@ -591,26 +598,18 @@ BufferGrayRootsTracer::appendGrayRoot(Ce
 #endif
 
     Zone* zone = TenuredCell::fromPointer(thing)->zone();
     if (zone->isCollecting()) {
         // See the comment on SetMaybeAliveFlag to see why we only do this for
         // objects and scripts. We rely on gray root buffering for this to work,
         // but we only need to worry about uncollected dead compartments during
         // incremental GCs (when we do gray root buffering).
-        switch (kind) {
-          case JSTRACE_OBJECT:
-            static_cast<JSObject*>(thing)->compartment()->maybeAlive = true;
-            break;
-          case JSTRACE_SCRIPT:
-            static_cast<JSScript*>(thing)->compartment()->maybeAlive = true;
-            break;
-          default:
-            break;
-        }
+        CallTyped(SetMaybeAliveFunctor(), kind, thing);
+
         if (!zone->gcGrayRoots.append(root))
             bufferingGrayRootsFailed = true;
     }
 }
 
 void
 GCRuntime::markBufferedGrayRoots(JS::Zone* zone)
 {
--- a/js/src/jscompartment.h
+++ b/js/src/jscompartment.h
@@ -607,16 +607,25 @@ struct JSCompartment
 inline bool
 JSRuntime::isAtomsZone(JS::Zone* zone)
 {
     return zone == atomsCompartment_->zone();
 }
 
 namespace js {
 
+// We only set the maybeAlive flag for objects and scripts. It's assumed that,
+// if a compartment is alive, then it will have at least some live object or
+// script it in. Even if we get this wrong, the worst that will happen is that
+// scheduledForDestruction will be set on the compartment, which will cause
+// some extra GC activity to try to free the compartment.
+template<typename T> inline void SetMaybeAliveFlag(T* thing) {}
+template<> inline void SetMaybeAliveFlag(JSObject* thing) {thing->compartment()->maybeAlive = true;}
+template<> inline void SetMaybeAliveFlag(JSScript* thing) {thing->compartment()->maybeAlive = true;}
+
 inline js::Handle<js::GlobalObject*>
 ExclusiveContext::global() const
 {
     /*
      * It's safe to use |unsafeGet()| here because any compartment that is
      * on-stack will be marked automatically, so there's no need for a read
      * barrier on it. Once the compartment is popped, the handle is no longer
      * safe to use.
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -429,18 +429,36 @@ js::EnqueuePendingParseTasksAfterGC(JSRu
     AutoLockHelperThreadState lock;
 
     for (size_t i = 0; i < newTasks.length(); i++)
         HelperThreadState().parseWorklist().append(newTasks[i]);
 
     HelperThreadState().notifyAll(GlobalHelperThreadState::PRODUCER);
 }
 
-static const uint32_t HELPER_STACK_SIZE = 512 * 1024;
-static const uint32_t HELPER_STACK_QUOTA = 450 * 1024;
+static const uint32_t kDefaultHelperStackSize = 512 * 1024;
+static const uint32_t kDefaultHelperStackQuota = 450 * 1024;
+
+// TSan enforces a minimum stack size that's just slightly larger than our
+// default helper stack size.  It does this to store blobs of TSan-specific
+// data on each thread's stack.  Unfortunately, that means that even though
+// we'll actually receive a larger stack than we requested, the effective
+// usable space of that stack is significantly less than what we expect.
+// To offset TSan stealing our stack space from underneath us, double the
+// default.
+//
+// Note that we don't need this for ASan/MOZ_ASAN because ASan doesn't
+// require all the thread-specific state that TSan does.
+#if defined(MOZ_TSAN)
+static const uint32_t HELPER_STACK_SIZE = 2 * kDefaultHelperStackSize;
+static const uint32_t HELPER_STACK_QUOTA = 2 * kDefaultHelperStackQuota;
+#else
+static const uint32_t HELPER_STACK_SIZE = kDefaultHelperStackSize;
+static const uint32_t HELPER_STACK_QUOTA = kDefaultHelperStackQuota;
+#endif
 
 void
 GlobalHelperThreadState::ensureInitialized()
 {
     MOZ_ASSERT(CanUseExtraThreads());
 
     MOZ_ASSERT(this == &HelperThreadState());
     AutoLockHelperThreadState lock;
--- a/js/src/vm/UnboxedObject.cpp
+++ b/js/src/vm/UnboxedObject.cpp
@@ -950,18 +950,26 @@ PropertiesAreSuperset(const UnboxedLayou
     }
     return true;
 }
 
 bool
 js::TryConvertToUnboxedLayout(ExclusiveContext* cx, Shape* templateShape,
                               ObjectGroup* group, PreliminaryObjectArray* objects)
 {
-    if (!templateShape->runtimeFromAnyThread()->options().unboxedObjects())
-        return true;
+    // Unboxed objects are nightly only for now. The getenv() call will be
+    // removed when they are on by default. See bug 1153266.
+#ifdef NIGHTLY_BUILD
+    if (!getenv("JS_OPTION_USE_UNBOXED_OBJECTS")) {
+        if (!templateShape->runtimeFromAnyThread()->options().unboxedObjects())
+            return true;
+    }
+#else
+    return true;
+#endif
 
     if (templateShape->runtimeFromAnyThread()->isSelfHostingGlobal(cx->global()))
         return true;
 
     if (templateShape->slotSpan() == 0)
         return true;
 
     UnboxedLayout::PropertyVector properties;
--- a/layout/base/FrameLayerBuilder.cpp
+++ b/layout/base/FrameLayerBuilder.cpp
@@ -42,71 +42,56 @@
 using namespace mozilla::layers;
 using namespace mozilla::gfx;
 
 namespace mozilla {
 
 class PaintedDisplayItemLayerUserData;
 
 FrameLayerBuilder::DisplayItemData::DisplayItemData(LayerManagerData* aParent, uint32_t aKey,
-                                                    nsIFrame* aFrame)
+                                                    Layer* aLayer, nsIFrame* aFrame)
 
   : mParent(aParent)
-  , mLayer(nullptr)
+  , mLayer(aLayer)
   , mDisplayItemKey(aKey)
   , mItem(nullptr)
   , mUsed(true)
   , mIsInvalid(false)
 {
+  MOZ_RELEASE_ASSERT(mLayer);
   if (aFrame) {
     AddFrame(aFrame);
   }
 }
 
-FrameLayerBuilder::DisplayItemData::DisplayItemData(DisplayItemData &toCopy)
-{
-  // This isn't actually a copy-constructor; notice that it steals toCopy's
-  // mGeometry pointer.  Be careful.
-  mParent = toCopy.mParent;
-  mLayer = toCopy.mLayer;
-  mInactiveManager = toCopy.mInactiveManager;
-  mFrameList = toCopy.mFrameList;
-  mGeometry = toCopy.mGeometry;
-  mDisplayItemKey = toCopy.mDisplayItemKey;
-  mClip = toCopy.mClip;
-  mContainerLayerGeneration = toCopy.mContainerLayerGeneration;
-  mLayerState = toCopy.mLayerState;
-  mItem = toCopy.mItem;
-  mUsed = toCopy.mUsed;
-}
-
 void
 FrameLayerBuilder::DisplayItemData::AddFrame(nsIFrame* aFrame)
 {
+  MOZ_RELEASE_ASSERT(mLayer);
   mFrameList.AppendElement(aFrame);
 
-  nsTArray<DisplayItemData*> *array =
-    reinterpret_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(FrameLayerBuilder::LayerManagerDataProperty()));
+  nsTArray<DisplayItemData*>* array =
+    static_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(FrameLayerBuilder::LayerManagerDataProperty()));
   if (!array) {
     array = new nsTArray<DisplayItemData*>();
     aFrame->Properties().Set(FrameLayerBuilder::LayerManagerDataProperty(), array);
   }
   array->AppendElement(this);
 }
 
 void
 FrameLayerBuilder::DisplayItemData::RemoveFrame(nsIFrame* aFrame)
 {
   MOZ_RELEASE_ASSERT(mLayer);
-  DebugOnly<bool> result = mFrameList.RemoveElement(aFrame);
-  NS_ASSERTION(result, "Can't remove a frame that wasn't added!");
-
-  nsTArray<DisplayItemData*> *array =
-    reinterpret_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(FrameLayerBuilder::LayerManagerDataProperty()));
-  NS_ASSERTION(array, "Must be already stored on the frame!");
+  bool result = mFrameList.RemoveElement(aFrame);
+  MOZ_RELEASE_ASSERT(result, "Can't remove a frame that wasn't added!");
+
+  nsTArray<DisplayItemData*>* array =
+    static_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(FrameLayerBuilder::LayerManagerDataProperty()));
+  MOZ_RELEASE_ASSERT(array, "Must be already stored on the frame!");
   array->RemoveElement(this);
 }
 
 void
 FrameLayerBuilder::DisplayItemData::EndUpdate()
 {
   MOZ_RELEASE_ASSERT(mLayer);
   MOZ_ASSERT(!mItem);
@@ -128,16 +113,17 @@ FrameLayerBuilder::DisplayItemData::EndU
   EndUpdate();
 }
 
 void
 FrameLayerBuilder::DisplayItemData::BeginUpdate(Layer* aLayer, LayerState aState,
                                                 uint32_t aContainerLayerGeneration,
                                                 nsDisplayItem* aItem /* = nullptr */)
 {
+  MOZ_RELEASE_ASSERT(mLayer);
   MOZ_RELEASE_ASSERT(aLayer);
   mLayer = aLayer;
   mOptLayer = nullptr;
   mInactiveManager = nullptr;
   mLayerState = aState;
   mContainerLayerGeneration = aContainerLayerGeneration;
   mUsed = true;
 
@@ -170,16 +156,17 @@ FrameLayerBuilder::DisplayItemData::Begi
     RemoveFrame(copy[i]);
     mFrameListChanges.AppendElement(copy[i]);
   }
 }
 
 static nsIFrame* sDestroyedFrame = nullptr;
 FrameLayerBuilder::DisplayItemData::~DisplayItemData()
 {
+  MOZ_RELEASE_ASSERT(mLayer);
   for (uint32_t i = 0; i < mFrameList.Length(); i++) {
     nsIFrame* frame = mFrameList[i];
     if (frame == sDestroyedFrame) {
       continue;
     }
     nsTArray<DisplayItemData*> *array =
       reinterpret_cast<nsTArray<DisplayItemData*>*>(frame->Properties().Get(LayerManagerDataProperty()));
     array->RemoveElement(this);
@@ -1405,18 +1392,18 @@ FrameLayerBuilder::FlashPaint(gfxContext
   float b = float(rand()) / RAND_MAX;
   aContext->SetColor(gfxRGBA(r, g, b, 0.4));
   aContext->Paint();
 }
 
 FrameLayerBuilder::DisplayItemData*
 FrameLayerBuilder::GetDisplayItemData(nsIFrame* aFrame, uint32_t aKey)
 {
-  nsTArray<DisplayItemData*> *array =
-    reinterpret_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
+  const nsTArray<DisplayItemData*>* array =
+    static_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
   if (array) {
     for (uint32_t i = 0; i < array->Length(); i++) {
       DisplayItemData* item = array->ElementAt(i);
       MOZ_RELEASE_ASSERT(item);
       MOZ_RELEASE_ASSERT(item->mLayer);
       if (item->mDisplayItemKey == aKey &&
           item->mLayer->Manager() == mRetainingManager) {
         return item;
@@ -1518,16 +1505,17 @@ GetTranslationForPaintedLayer(PaintedLay
  *
  * Cache the destroyed frame pointer here so we can avoid crashing in this case.
  */
 
 /* static */ void
 FrameLayerBuilder::RemoveFrameFromLayerManager(nsIFrame* aFrame,
                                                void* aPropertyValue)
 {
+  MOZ_RELEASE_ASSERT(!sDestroyedFrame);
   sDestroyedFrame = aFrame;
   nsTArray<DisplayItemData*> *array =
     reinterpret_cast<nsTArray<DisplayItemData*>*>(aPropertyValue);
 
   // Hold a reference to all the items so that they don't get
   // deleted from under us.
   nsTArray<nsRefPtr<DisplayItemData> > arrayCopy;
   for (uint32_t i = 0; i < array->Length(); ++i) {
@@ -1701,50 +1689,50 @@ FrameLayerBuilder::DumpDisplayItemDataFo
 #endif
   return PL_DHASH_NEXT;
 }
 
 /* static */ FrameLayerBuilder::DisplayItemData*
 FrameLayerBuilder::GetDisplayItemDataForManager(nsDisplayItem* aItem,
                                                 LayerManager* aManager)
 {
-  nsTArray<DisplayItemData*> *array =
-    reinterpret_cast<nsTArray<DisplayItemData*>*>(aItem->Frame()->Properties().Get(LayerManagerDataProperty()));
+  const nsTArray<DisplayItemData*>* array =
+    static_cast<nsTArray<DisplayItemData*>*>(aItem->Frame()->Properties().Get(LayerManagerDataProperty()));
   if (array) {
     for (uint32_t i = 0; i < array->Length(); i++) {
       DisplayItemData* item = array->ElementAt(i);
       if (item->mDisplayItemKey == aItem->GetPerFrameKey() &&
           item->mLayer->Manager() == aManager) {
         return item;
       }
     }
   }
   return nullptr;
 }
 
 bool
 FrameLayerBuilder::HasRetainedDataFor(nsIFrame* aFrame, uint32_t aDisplayItemKey)
 {
-  nsTArray<DisplayItemData*> *array =
-    reinterpret_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
+  const nsTArray<DisplayItemData*>* array =
+    static_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
   if (array) {
     for (uint32_t i = 0; i < array->Length(); i++) {
       if (array->ElementAt(i)->mDisplayItemKey == aDisplayItemKey) {
         return true;
       }
     }
   }
   return false;
 }
 
 void
 FrameLayerBuilder::IterateRetainedDataFor(nsIFrame* aFrame, DisplayItemDataCallback aCallback)
 {
-  nsTArray<DisplayItemData*> *array =
-    reinterpret_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
+  const nsTArray<DisplayItemData*>* array =
+    static_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
   if (!array) {
     return;
   }
 
   for (uint32_t i = 0; i < array->Length(); i++) {
     DisplayItemData* data = array->ElementAt(i);
     if (data->mDisplayItemKey != nsDisplayItem::TYPE_ZERO) {
       aCallback(aFrame, data);
@@ -1800,18 +1788,18 @@ FrameLayerBuilder::ClearCachedGeometry(n
   if (oldData) {
     oldData->mGeometry = nullptr;
   }
 }
 
 /* static */ Layer*
 FrameLayerBuilder::GetDebugOldLayerFor(nsIFrame* aFrame, uint32_t aDisplayItemKey)
 {
-  nsTArray<DisplayItemData*> *array =
-    reinterpret_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
+  const nsTArray<DisplayItemData*>* array =
+    static_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
 
   if (!array) {
     return nullptr;
   }
 
   for (uint32_t i = 0; i < array->Length(); i++) {
     DisplayItemData *data = array->ElementAt(i);
 
@@ -1820,18 +1808,18 @@ FrameLayerBuilder::GetDebugOldLayerFor(n
     }
   }
   return nullptr;
 }
 
 /* static */ Layer*
 FrameLayerBuilder::GetDebugSingleOldLayerForFrame(nsIFrame* aFrame)
 {
-  nsTArray<DisplayItemData*>* array =
-    reinterpret_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
+  const nsTArray<DisplayItemData*>* array =
+    static_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
 
   if (!array) {
     return nullptr;
   }
 
   Layer* layer = nullptr;
   for (DisplayItemData* data : *array) {
     if (layer && layer != data->mLayer) {
@@ -4082,17 +4070,17 @@ FrameLayerBuilder::StoreDataForFrame(nsD
     }
     return oldData;
   }
 
   LayerManagerData* lmd = static_cast<LayerManagerData*>
     (mRetainingManager->GetUserData(&gLayerManagerUserData));
 
   nsRefPtr<DisplayItemData> data =
-    new DisplayItemData(lmd, aItem->GetPerFrameKey());
+    new DisplayItemData(lmd, aItem->GetPerFrameKey(), aLayer);
 
   data->BeginUpdate(aLayer, aState, mContainerLayerGeneration, aItem);
 
   lmd->mDisplayItems.PutEntry(data);
   return data;
 }
 
 void
@@ -4106,17 +4094,17 @@ FrameLayerBuilder::StoreDataForFrame(nsI
     oldData->BeginUpdate(aLayer, aState, mContainerLayerGeneration);
     return;
   }
 
   LayerManagerData* lmd = static_cast<LayerManagerData*>
     (mRetainingManager->GetUserData(&gLayerManagerUserData));
 
   nsRefPtr<DisplayItemData> data =
-    new DisplayItemData(lmd, aDisplayItemKey, aFrame);
+    new DisplayItemData(lmd, aDisplayItemKey, aLayer, aFrame);
 
   data->BeginUpdate(aLayer, aState, mContainerLayerGeneration);
 
   lmd->mDisplayItems.PutEntry(data);
 }
 
 FrameLayerBuilder::ClippedDisplayItem::~ClippedDisplayItem()
 {
@@ -4907,35 +4895,35 @@ FrameLayerBuilder::InvalidateAllLayers(L
   if (data) {
     data->mInvalidateAllLayers = true;
   }
 }
 
 /* static */ void
 FrameLayerBuilder::InvalidateAllLayersForFrame(nsIFrame *aFrame)
 {
-  nsTArray<DisplayItemData*> *array =
-    reinterpret_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
+  const nsTArray<DisplayItemData*>* array =
+    static_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
   if (array) {
     for (uint32_t i = 0; i < array->Length(); i++) {
       array->ElementAt(i)->mParent->mInvalidateAllLayers = true;
     }
   }
 }
 
 /* static */
 Layer*
 FrameLayerBuilder::GetDedicatedLayer(nsIFrame* aFrame, uint32_t aDisplayItemKey)
 {
   //TODO: This isn't completely correct, since a frame could exist as a layer
   // in the normal widget manager, and as a different layer (or no layer)
   // in the secondary manager
 
-  nsTArray<DisplayItemData*> *array =
-    reinterpret_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
+  const nsTArray<DisplayItemData*>* array =
+    static_cast<nsTArray<DisplayItemData*>*>(aFrame->Properties().Get(LayerManagerDataProperty()));
   if (array) {
     for (uint32_t i = 0; i < array->Length(); i++) {
       DisplayItemData *element = array->ElementAt(i);
       if (!element->mParent->mLayerManager->IsWidgetLayerManager()) {
         continue;
       }
       if (element->mDisplayItemKey == aDisplayItemKey) {
         if (element->mOptLayer) {
@@ -4980,18 +4968,18 @@ FrameLayerBuilder::GetPaintedLayerScaleF
 
     if (nsLayoutUtils::IsPopup(f)) {
       // Don't examine ancestors of a popup. It won't make sense to check
       // the transform from some content inside the popup to some content
       // which is an ancestor of the popup.
       break;
     }
 
-    nsTArray<DisplayItemData*> *array =
-      reinterpret_cast<nsTArray<DisplayItemData*>*>(f->Properties().Get(LayerManagerDataProperty()));
+    const nsTArray<DisplayItemData*>* array =
+      static_cast<nsTArray<DisplayItemData*>*>(f->Properties().Get(LayerManagerDataProperty()));
     if (!array) {
       continue;
     }
 
     for (uint32_t i = 0; i < array->Length(); i++) {
       Layer* layer = array->ElementAt(i)->mLayer;
       ContainerLayer* container = layer->AsContainerLayer();
       if (!container ||
@@ -5430,17 +5418,17 @@ FrameLayerBuilder::DumpRetainedLayerTree
 
 nsDisplayItemGeometry*
 FrameLayerBuilder::GetMostRecentGeometry(nsDisplayItem* aItem)
 {
   typedef nsTArray<DisplayItemData*> DataArray;
 
   // Retrieve the array of DisplayItemData associated with our frame.
   FrameProperties properties = aItem->Frame()->Properties();
-  auto dataArray =
+  const DataArray* dataArray =
     static_cast<DataArray*>(properties.Get(LayerManagerDataProperty()));
   if (!dataArray) {
     return nullptr;
   }
 
   // Find our display item data, if it exists, and return its geometry.
   uint32_t itemPerFrameKey = aItem->GetPerFrameKey();
   for (uint32_t i = 0; i < dataArray->Length(); i++) {
--- a/layout/base/FrameLayerBuilder.h
+++ b/layout/base/FrameLayerBuilder.h
@@ -441,18 +441,18 @@ public:
     uint32_t GetDisplayItemKey() { return mDisplayItemKey; }
     Layer* GetLayer() { return mLayer; }
     nsDisplayItemGeometry* GetGeometry() const { return mGeometry.get(); }
     void Invalidate() { mIsInvalid = true; }
 
   private:
     DisplayItemData(LayerManagerData* aParent,
                     uint32_t aKey,
+                    Layer* aLayer,
                     nsIFrame* aFrame = nullptr);
-    DisplayItemData(DisplayItemData &toCopy);
 
     /**
      * Removes any references to this object from frames
      * in mFrameList.
      */
     ~DisplayItemData();
 
     NS_INLINE_DECL_REFCOUNTING(DisplayItemData)
--- a/layout/base/nsDisplayList.cpp
+++ b/layout/base/nsDisplayList.cpp
@@ -1230,17 +1230,17 @@ nsDisplayListBuilder::IsAnimatedGeometry
   }
 
   if (aFrame->StyleDisplay()->mPosition == NS_STYLE_POSITION_STICKY &&
       IsStickyFrameActive(this, aFrame, parent))
   {
     return true;
   }
 
-  if (parentType == nsGkAtoms::scrollFrame) {
+  if (parentType == nsGkAtoms::scrollFrame || parentType == nsGkAtoms::listControlFrame) {
     nsIScrollableFrame* sf = do_QueryFrame(parent);
     if (sf->IsScrollingActive(this) && sf->GetScrolledFrame() == aFrame) {
       return true;
     }
   }
 
   // Fixed-pos frames are parented by the viewport frame, which has no parent.
   if (nsLayoutUtils::IsFixedPosFrameInDisplayPort(aFrame)) {
--- a/layout/base/nsLayoutUtils.cpp
+++ b/layout/base/nsLayoutUtils.cpp
@@ -7984,16 +7984,26 @@ nsLayoutUtils::HasDocumentLevelListeners
       if (HasApzAwareListeners(targets[i]->GetExistingListenerManager())) {
         return true;
       }
     }
   }
   return false;
 }
 
+/* static */ float
+nsLayoutUtils::GetResolution(nsIPresShell* aPresShell)
+{
+  nsIScrollableFrame* sf = aPresShell->GetRootScrollFrameAsScrollable();
+  if (sf) {
+    return sf->GetResolution();
+  }
+  return aPresShell->GetResolution();
+}
+
 /* static */ uint32_t
 nsLayoutUtils::GetTouchActionFromFrame(nsIFrame* aFrame)
 {
   // If aFrame is null then return default value
   if (!aFrame) {
     return NS_STYLE_TOUCH_ACTION_AUTO;
   }
 
--- a/layout/base/nsLayoutUtils.h
+++ b/layout/base/nsLayoutUtils.h
@@ -2572,16 +2572,22 @@ public:
                                       nsHTMLReflowMetrics& aMetrics,
                                       const mozilla::LogicalMargin& aFramePadding,
                                       mozilla::WritingMode aLineWM,
                                       mozilla::WritingMode aFrameWM);
 
   static bool HasApzAwareListeners(mozilla::EventListenerManager* aElm);
   static bool HasDocumentLevelListenersForApzAwareEvents(nsIPresShell* aShell);
 
+  /**
+   * Get the resolution at which rescalable web content is drawn
+   * (see nsIDOMWindowUtils.getResolution).
+   */
+  static float GetResolution(nsIPresShell* aPresShell);
+
 private:
   static uint32_t sFontSizeInflationEmPerLine;
   static uint32_t sFontSizeInflationMinTwips;
   static uint32_t sFontSizeInflationLineThreshold;
   static int32_t  sFontSizeInflationMappingIntercept;
   static uint32_t sFontSizeInflationMaxRatio;
   static bool sFontSizeInflationForceEnabled;
   static bool sFontSizeInflationDisabledInMasterProcess;
--- a/layout/generic/nsFrame.cpp
+++ b/layout/generic/nsFrame.cpp
@@ -2667,18 +2667,17 @@ nsFrame::GetDataForTableSelection(const 
   nsCOMPtr<nsIContent> parentContent = tableOrCellContent->GetParent();
   if (!parentContent) return NS_ERROR_FAILURE;
 
   int32_t offset = parentContent->IndexOf(tableOrCellContent);
   // Not likely?
   if (offset < 0) return NS_ERROR_FAILURE;
 
   // Everything is OK -- set the return values
-  *aParentContent = parentContent;
-  NS_ADDREF(*aParentContent);
+  parentContent.forget(aParentContent);
 
   *aContentOffset = offset;
 
 #if 0
   if (selectRow)
     *aTarget = nsISelectionPrivate::TABLESELECTION_ROW;
   else if (selectColumn)
     *aTarget = nsISelectionPrivate::TABLESELECTION_COLUMN;
--- a/layout/generic/nsTextFrame.cpp
+++ b/layout/generic/nsTextFrame.cpp
@@ -7588,18 +7588,19 @@ nsTextFrame::AddInlinePrefISizeForFlow(n
   uint32_t loopStart = (preformatNewlines || preformatTabs) ? start : flowEndInTextRun;
   for (uint32_t i = loopStart, lineStart = start; i <= flowEndInTextRun; ++i) {
     bool preformattedNewline = false;
     bool preformattedTab = false;
     if (i < flowEndInTextRun) {
       // XXXldb Shouldn't we be including the newline as part of the
       // segment that it ends rather than part of the segment that it
       // starts?
-      NS_ASSERTION(preformatNewlines,
-                   "We can't be here unless newlines are hard breaks");
+      NS_ASSERTION(preformatNewlines || preformatTabs,
+                   "We can't be here unless newlines are "
+                   "hard breaks or there are tabs");
       preformattedNewline = preformatNewlines && textRun->CharIsNewline(i);
       preformattedTab = preformatTabs && textRun->CharIsTab(i);
       if (!preformattedNewline && !preformattedTab) {
         // we needn't break here (and it's not the end of the flow)
         continue;
       }
     }
 
--- a/layout/inspector/inDOMUtils.cpp
+++ b/layout/inspector/inDOMUtils.cpp
@@ -229,18 +229,17 @@ inDOMUtils::GetCSSStyleRules(nsIDOMEleme
     cssRule = do_QueryObject(ruleNode->GetRule());
     if (cssRule) {
       nsCOMPtr<nsIDOMCSSRule> domRule = cssRule->GetDOMRule();
       if (domRule)
         rules->InsertElementAt(domRule, 0);
     }
   }
 
-  *_retval = rules;
-  NS_ADDREF(*_retval);
+  rules.forget(_retval);
 
   return NS_OK;
 }
 
 static already_AddRefed<StyleRule>
 GetRuleFromDOMRule(nsIDOMCSSStyleRule *aRule, ErrorResult& rv)
 {
   nsCOMPtr<nsICSSStyleRuleDOMWrapper> rule = do_QueryInterface(aRule);
@@ -897,17 +896,17 @@ inDOMUtils::GetBindingURLs(nsIDOMElement
 
   nsXBLBinding *binding = content->GetXBLBinding();
 
   while (binding) {
     urls->AppendElement(binding->PrototypeBinding()->BindingURI(), false);
     binding = binding->GetBaseBinding();
   }
 
-  NS_ADDREF(*_retval = urls);
+  urls.forget(_retval);
   return NS_OK;
 }
 
 NS_IMETHODIMP
 inDOMUtils::SetContentState(nsIDOMElement* aElement,
                             EventStates::InternalType aState)
 {
   NS_ENSURE_ARG_POINTER(aElement);
--- a/layout/reftests/bugs/reftest.list
+++ b/layout/reftests/bugs/reftest.list
@@ -289,17 +289,17 @@ skip-if((B2G&&browserIsRemote)||Mulet) =
 == 269908-4.html 269908-4-ref.html
 == 269908-5.html 269908-5-ref.html
 == 271747-1a.html 271747-1-ref.html
 == 271747-1b.html 271747-1-ref.html
 skip-if((B2G&&browserIsRemote)||Mulet) == 272646-1.xul 272646-1-ref.xul # bug 974780 # Initial mulet triage: parity with B2G/B2G Desktop
 skip-if((B2G&&browserIsRemote)||Mulet) == 272646-2a.xul 272646-2-ref.xul # bug 974780 # Initial mulet triage: parity with B2G/B2G Desktop
 skip-if((B2G&&browserIsRemote)||Mulet) == 272646-2b.xul 272646-2-ref.xul # bug 974780 # Initial mulet triage: parity with B2G/B2G Desktop
 skip-if((B2G&&browserIsRemote)||Mulet) == 272646-2c.xul 272646-2-ref.xul # bug 974780 # Initial mulet triage: parity with B2G/B2G Desktop
-skip-if(B2G||Mulet) == 273681-1.html 273681-1-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,3,50) == 273681-1.html 273681-1-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
 == 278266-1a.html 278266-1-ref.html
 == 278266-1b.html 278266-1-ref.html
 == 280708-1a.html 280708-1-ref.html
 == 280708-1b.html 280708-1-ref.html
 == 281241-1.html 281241-1-ref.html
 == 281241-2.xhtml 281241-1-ref.html
 == 283686-1.html about:blank
 == 283686-2.html 283686-2-ref.html
@@ -420,17 +420,17 @@ skip-if((B2G&&browserIsRemote)||Mulet) =
 == 322436-1.html 322436-1-ref.html
 == 322461-1.xml 322461-1-ref.html
 == 323656-1.html 323656-1-ref.html
 == 323656-2.html 323656-2-ref.html
 == 323656-3.html 323656-3-ref.html
 == 323656-4.html 323656-4-ref.html
 == 323656-5.svg 323656-5-ref.svg
 == 323656-6.html 323656-6-ref.html
-== 325292-1.html 325292-1-ref.html
+fuzzy-if(Android,2,140) == 325292-1.html 325292-1-ref.html
 == 325486-1.html 325486-1-ref.html
 == 328111-1.html 328111-1-ref.html
 random == 328829-1.xhtml 328829-1-ref.xhtml # bug 369046 (intermittent)
 == 328829-2.xhtml 328829-2-ref.xhtml
 == 329359-1.html 329359-1-ref.html
 == 331809-1.html 331809-1-ref.html # bug 580499 is blacked out
 == 332360.html 332360-ref.html
 == 332360-ltr.html 332360-ltr-ref.html
@@ -690,18 +690,18 @@ skip-if(B2G||Mulet) == 378937-1.html 378
 == 379316-1.html 379316-1-ref.html
 skip-if(B2G||Mulet) fails-if(Android) random-if(cocoaWidget) random-if(/^Windows\x20NT\x206\.1/.test(http.oscpu)) fuzzy-if(/^Windows\x20NT\x206\.2/.test(http.oscpu),1,170) fuzzy-if(gtk2Widget,1,191) == 379316-2.html 379316-2-ref.html # bug 379786 # Initial mulet triage: parity with B2G/B2G Desktop
 == 379328-1.html 379328-1-ref.html
 == 379349-1a.xhtml 379349-1-ref.xhtml
 == 379349-1b.xhtml 379349-1-ref.xhtml
 == 379349-1c.xhtml 379349-1-ref.xhtml
 == 379349-2a.xhtml 379349-2-ref.xhtml
 == 379349-2b.xhtml 379349-2-ref.xhtml
-skip-if(B2G||Mulet) == 379349-3a.xhtml 379349-3-ref.xhtml # Initial mulet triage: parity with B2G/B2G Desktop
-skip-if(B2G||Mulet) == 379349-3b.xhtml 379349-3-ref.xhtml # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,2,140) == 379349-3a.xhtml 379349-3-ref.xhtml # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,2,140) == 379349-3b.xhtml 379349-3-ref.xhtml # Initial mulet triage: parity with B2G/B2G Desktop
 == 379361-1.html 379361-1-ref.html
 == 379361-2.html 379361-2-ref.html
 == 379361-3.html 379361-3-ref.html
 == 379461-1.xhtml 379461-1.html
 == 379461-2.xhtml 379461-2.html
 skip-if(B2G||Mulet) == 379461-3-container-xhtml.html 379461-3-container-html.html # Initial mulet triage: parity with B2G/B2G Desktop
 skip-if(B2G||Mulet) fails-if(Android) != 379461-3-container-xhtml.html 379461-3-container-blank.html # there is a scrollbar # Initial mulet triage: parity with B2G/B2G Desktop
 == 380004-1.html 380004-1-ref.html
@@ -1399,30 +1399,30 @@ skip-if(B2G||Mulet) == 502447-1.html 502
 == 502942-1.html 502942-1-ref.html
 == 503364-1a.html 503364-1-ref.html
 == 503364-1b.html 503364-1-ref.html
 # Reftest for bug 503531 marked as failing; should be re-enabled when
 # bug 607548 gets resolved.
 needs-focus fails == 503531-1.html 503531-1-ref.html
 == 504032-1.html 504032-1-ref.html
 == 505743-1.html about:blank
-skip-if(B2G||Mulet) == 506481-1.html 506481-1-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,5,2800) == 506481-1.html 506481-1-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
 == 507187-1.html 507187-1-ref.html
 == 507487-1.html 507487-1-ref.html
 == 507487-2.xhtml 507487-2-ref.xhtml
 == 507762-1.html 507762-1-ref.html
 == 507762-2.html 507762-2-ref.html
 == 507762-3.html 507762-1-ref.html
 == 507762-4.html 507762-2-ref.html
 skip-if(B2G||Mulet) random-if(cocoaWidget) == 508816-1.xul 508816-1-ref.xul # Bug 631982 # Initial mulet triage: parity with B2G/B2G Desktop
 skip-if(B2G||Mulet) == 508816-2.html 508816-2-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
 skip-if((B2G&&browserIsRemote)||Mulet) == 508908-1.xul 508908-1-ref.xul # bug 974780 # Initial mulet triage: parity with B2G/B2G Desktop
 == 508919-1.xhtml 508919-1-ref.xhtml
 == 509155-1.xhtml 509155-1-ref.xhtml
-skip-if(B2G||Mulet) == 512410.html 512410-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,5,1656) == 512410.html 512410-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
 == 512631-1.html 512631-1-ref.html
 == 513153-1a.html 513153-1-ref.html
 == 513153-1b.html 513153-1-ref.html
 == 513153-2a.html 513153-2-ref.html
 == 513153-2b.html 513153-2-ref.html
 skip-if((B2G&&browserIsRemote)||Mulet) == 513318-1.xul 513318-1-ref.xul # bug 974780 # Initial mulet triage: parity with B2G/B2G Desktop
 skip-if(B2G||Mulet) fails-if(Android) != 513318-2.xul 513318-2-ref.xul # Initial mulet triage: parity with B2G/B2G Desktop
 == 514917-1.html 514917-1-ref.html
@@ -1449,17 +1449,17 @@ random-if(d2d) == 523468-1.html 523468-1
 == 528038-1c.html 528038-1-ref.html
 == 528038-1d.html 528038-1-ref.html
 == 528038-1e.html 528038-1-ref.html
 == 528038-1f.html 528038-1-ref.html
 == 528038-2.html 528038-2-ref.html
 == 528096-1.html 528096-1-ref.html
 == 530686-1.html 530686-1-ref.html
 == 531098-1.html 531098-1-ref.html
-== 531200-1.html 531200-1-ref.html
+fuzzy-if(Android,2,48) == 531200-1.html 531200-1-ref.html
 == 531371-1.html 531371-1-ref.html
 == 534526-1a.html 534526-1-ref.html
 == 534526-1b.html 534526-1-ref.html
 == 534804-1.html 534804-1-ref.html
 == 534808-1.html 534808-1-ref.html
 == 534808-2.html 534808-2-ref.html
 fails-if(OSX==1007) == 534919-1.html 534919-1-ref.html # Bug 705044
 random == 536061.html 536061-ref.html # fixedpoint division in blur code makes this fail
@@ -1514,39 +1514,39 @@ skip-if(B2G||Mulet) fails-if(Android) ==
 == 561981-3.html 561981-3-ref.html
 == 561981-4.html 561981-4-ref.html
 == 561981-5.html 561981-5-ref.html
 == 561981-6.html 561981-6-ref.html
 == 561981-7.html 561981-7-ref.html
 == 561981-8.html 561981-8-ref.html
 == 562835-1.html 562835-ref.html
 == 562835-2.html 562835-ref.html
-skip-if(B2G||Mulet) == 563584-1.html 563584-1-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
-skip-if(B2G||Mulet) == 563584-2.html 563584-2-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
-skip-if(B2G||Mulet) == 563584-3.html 563584-3-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
-skip-if(B2G||Mulet) == 563584-4.html 563584-4-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
-== 563584-5.html 563584-5-ref.html
+skip-if(B2G||Mulet) fuzzy-if(Android,2,48) == 563584-1.html 563584-1-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,2,48) == 563584-2.html 563584-2-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,4,180) == 563584-3.html 563584-3-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,4,180) == 563584-4.html 563584-4-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+fuzzy-if(Android,2,48) == 563584-5.html 563584-5-ref.html
 test-pref(layout.float-fragments-inside-column.enabled,false) == 563584-6-columns.html 563584-6-columns-ref.html
 test-pref(layout.float-fragments-inside-column.enabled,true) == 563584-6-columns.html 563584-6-columns-ref-enabled.html
-skip-if(B2G||Mulet) == 563584-6-printing.html 563584-6-printing-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
-skip-if(B2G||Mulet) == 563584-7.html 563584-7-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,2,48) == 563584-6-printing.html 563584-6-printing-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,2,48) == 563584-7.html 563584-7-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
 # FIXME: It would be nice to have variants of these -8 tests for the
 # table narrowing quirk causing a change to mIsTopOfPage (though I'm not
 # entirely sure our behavior is the right one, either).
-skip-if(B2G||Mulet) == 563584-8a.html 563584-8a-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
-skip-if(B2G||Mulet) == 563584-8b.html 563584-8b-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
-skip-if(B2G||Mulet) == 563584-8c.html 563584-8c-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
-skip-if(B2G||Mulet) == 563584-8d.html 563584-8d-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,2,48) == 563584-8a.html 563584-8a-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,2,48) == 563584-8b.html 563584-8b-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,4,180) == 563584-8c.html 563584-8c-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,4,180) == 563584-8d.html 563584-8d-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
 == 563584-9a.html 563584-9a-ref.html
 == 563584-9b.html 563584-9b-ref.html
 == 563584-9c.html 563584-9cd-ref.html
 == 563584-9d.html 563584-9cd-ref.html
-== 563584-10a.html 563584-10-ref.html
-== 563584-10b.html 563584-10-ref.html
-== 563584-11.html 563584-11-ref.html
+fuzzy-if(Android,2,48) == 563584-10a.html 563584-10-ref.html
+fuzzy-if(Android,2,48) == 563584-10b.html 563584-10-ref.html
+fuzzy-if(Android,2,48) == 563584-11.html 563584-11-ref.html
 == 563884-1.html 563884-1-ref.html
 == 564002-1.html 564002-1-ref.html
 == 564054-1.html 564054-1-ref.html
 skip-if(B2G||Mulet) fails-if(Android) random-if(layersGPUAccelerated) == 564991-1.html 564991-1-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
 == 565819-1.html 565819-ref.html
 == 565819-2.html 565819-ref.html
 needs-focus == 568441.html 568441-ref.html
 skip-if(B2G||Mulet) == 569006-1.html 569006-1-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
@@ -1575,23 +1575,23 @@ random-if(!winWidget) fails-if(winWidget
 skip-if(!haveTestPlugin) skip-if(B2G||Mulet) fails-if(Android) == 579808-1.html 579808-1-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
 skip-if(B2G||Mulet) fails-if(Android) random-if(layersGPUAccelerated) == 579985-1.html 579985-1-ref.html # bug 623452 for WinXP; this bug was only for a regression in BasicLayers anyway # Initial mulet triage: parity with B2G/B2G Desktop
 skip-if(B2G||Mulet) skip-if(Android) == 580160-1.html 580160-1-ref.html # bug 920927 for Android; issues without the test-plugin # Initial mulet triage: parity with B2G/B2G Desktop
 HTTP(..) == 580863-1.html 580863-1-ref.html
 skip-if(B2G||Mulet) fails-if(Android) random-if(layersGPUAccelerated) == 581317-1.html 581317-1-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
 == 581579-1.html 581579-1-ref.html
 == 582037-1a.html 582037-1-ref.html
 == 582037-1b.html 582037-1-ref.html
-skip-if(B2G||Mulet) == 582037-2a.html 582037-2-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
-skip-if(B2G||Mulet) == 582037-2b.html 582037-2-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,3,256) == 582037-2a.html 582037-2-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
+skip-if(B2G||Mulet) fuzzy-if(Android,3,256) == 582037-2b.html 582037-2-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
 asserts(1-2) == 582146-1.html about:blank
 skip-if(B2G||Mulet) == 582476-1.svg 582476-1-ref.svg # Initial mulet triage: parity with B2G/B2G Desktop
 == 584400-dash-length.svg 584400-dash-length-ref.svg
 == 584699-1.html 584699-1-ref.html
-== 585598-2.xhtml 585598-2-ref.xhtml
+fuzzy-if(Android,2,48) == 585598-2.xhtml 585598-2-ref.xhtml
 == 586400-1.html 586400-1-ref.html
 skip-if(B2G||Mulet) fuzzy-if(d2d,52,1051) fuzzy-if(OSX==1008,129,1068) == 586683-1.html 586683-1-ref.html # Initial mulet triage: parity with B2G/B2G Desktop
 == 589615-1a.xhtml 589615-1-ref.html
 == 589615-1b.html 589615-1-ref.html
 == 589672-1.html 589672-1-ref.html
 != 589682-1.html 589682-1-notref.html
 skip-if(!asyncPanZoom) == 593243-1.html 593243-1-ref.html # bug 593168
 skip-if(!asyncPanZoom) == 593243-2.html 593243-2-ref.html # bug 593168
@@ -1619,17 +1619,17 @@ fuzzy-if(Android&&AndroidVersion>=15,8,2
 == 604737.html 604737-ref.html
 == 605138-1.html 605138-1-ref.html
 == 605157-1.xhtml 605157-1-ref.xhtml
 == 607267-1.html 607267-1-ref.html
 == 608636-1.html 608636-1-ref.html
 == 608756-1a.html 608756-1-ref.html
 == 608756-1b.html 608756-1-ref.html
 == 608756-2.html 608756-2-ref.html
-== 609272-1.html 609272-1-ref.html
+fuzzy-if(Android,4,196) == 609272-1.html 609272-1-ref.html
 needs-focus == 613433-1.html 613433-1-ref.html
 needs-focus == 613433-1.html 613433-2-ref.html
 needs-focus == 613433-1.html 613433-3-ref.html
 needs-focus == 613433-2.html 613433-1-ref.html
 needs-focus == 613433-2.html 613433-2-ref.html
 needs-focus == 613433-2.html 613433-3-ref.html
 needs-focus == 613433-3.html 613433-1-ref.html
 needs-focus == 613433-3.html 613433-2-ref.html
--- a/layout/style/nsTransitionManager.cpp
+++ b/layout/style/nsTransitionManager.cpp
@@ -19,31 +19,42 @@
 #include "nsCSSPropertySet.h"
 #include "mozilla/EventDispatcher.h"
 #include "mozilla/ContentEvents.h"
 #include "mozilla/StyleAnimationValue.h"
 #include "mozilla/dom/Element.h"
 #include "nsIFrame.h"
 #include "Layers.h"
 #include "FrameLayerBuilder.h"
+#include "nsCSSProps.h"
 #include "nsDisplayList.h"
 #include "nsStyleChangeList.h"
 #include "nsStyleSet.h"
 #include "RestyleManager.h"
 #include "nsDOMMutationObserver.h"
 
 using mozilla::TimeStamp;
 using mozilla::TimeDuration;
 using mozilla::dom::AnimationPlayer;
 using mozilla::dom::Animation;
 
 using namespace mozilla;
 using namespace mozilla::layers;
 using namespace mozilla::css;
 
+const nsString&
+ElementPropertyTransition::Name() const
+{
+   if (!mName.Length()) {
+     const_cast<ElementPropertyTransition*>(this)->mName =
+       NS_ConvertUTF8toUTF16(nsCSSProps::GetStringValue(TransitionProperty()));
+   }
+   return dom::Animation::Name();
+}
+
 double
 ElementPropertyTransition::CurrentValuePortion() const
 {
   // It would be easy enough to handle finished transitions by using a time
   // fraction of 1 but currently we should not be called for finished
   // transitions.
   MOZ_ASSERT(!IsFinishedTransition(),
              "Getting the value portion of a finished transition");
@@ -419,23 +430,22 @@ nsTransitionManager::ConsiderStartingTra
                                      0.5, dummyValue);
 
   bool haveCurrentTransition = false;
   size_t currentIndex = nsTArray<ElementPropertyTransition>::NoIndex;
   const ElementPropertyTransition *oldPT = nullptr;
   if (aElementTransitions) {
     AnimationPlayerPtrArray& players = aElementTransitions->mPlayers;
     for (size_t i = 0, i_end = players.Length(); i < i_end; ++i) {
-      MOZ_ASSERT(players[i]->GetSource() &&
-                 players[i]->GetSource()->Properties().Length() == 1,
-                 "Should have one animation property for a transition");
-      if (players[i]->GetSource()->Properties()[0].mProperty == aProperty) {
+      const ElementPropertyTransition *iPt =
+        players[i]->GetSource()->AsTransition();
+      if (iPt->TransitionProperty() == aProperty) {
         haveCurrentTransition = true;
         currentIndex = i;
-        oldPT = players[currentIndex]->GetSource()->AsTransition();
+        oldPT = iPt;
         break;
       }
     }
   }
 
   // If we got a style change that changed the value to the endpoint
   // of the currently running transition, we don't want to interrupt
   // its timing function.
@@ -571,23 +581,21 @@ nsTransitionManager::ConsiderStartingTra
       NS_WARNING("allocating CommonAnimationManager failed");
       return;
     }
   }
 
   AnimationPlayerPtrArray& players = aElementTransitions->mPlayers;
 #ifdef DEBUG
   for (size_t i = 0, i_end = players.Length(); i < i_end; ++i) {
-    MOZ_ASSERT(players[i]->GetSource() &&
-               players[i]->GetSource()->Properties().Length() == 1,
-               "Should have one animation property for a transition");
     MOZ_ASSERT(
       i == currentIndex ||
       (players[i]->GetSource() &&
-       players[i]->GetSource()->Properties()[0].mProperty != aProperty),
+       players[i]->GetSource()->AsTransition()->TransitionProperty()
+         != aProperty),
       "duplicate transitions for property");
   }
 #endif
   if (haveCurrentTransition) {
     players[currentIndex]->Cancel();
     oldPT = nullptr; // Clear pointer so it doesn't dangle
     players[currentIndex] = player;
   } else {
@@ -802,19 +810,18 @@ nsTransitionManager::FlushTransitions(Fl
         --i;
         AnimationPlayer* player = collection->mPlayers[i];
         if (!player->GetSource()->IsFinishedTransition()) {
           MOZ_ASSERT(player->GetSource(),
                      "Transitions should have source content");
           ComputedTiming computedTiming =
             player->GetSource()->GetComputedTiming();
           if (computedTiming.mPhase == ComputedTiming::AnimationPhase_After) {
-            MOZ_ASSERT(player->GetSource()->Properties().Length() == 1,
-                       "Should have one animation property for a transition");
-            nsCSSProperty prop = player->GetSource()->Properties()[0].mProperty;
+            nsCSSProperty prop =
+              player->GetSource()->AsTransition()->TransitionProperty();
             TimeDuration duration =
               player->GetSource()->Timing().mIterationDuration;
             events.AppendElement(
               TransitionEventInfo(collection->mElement, prop,
                                   duration,
                                   collection->PseudoElement()));
 
             // Leave this transition in the list for one more refresh
--- a/layout/style/nsTransitionManager.h
+++ b/layout/style/nsTransitionManager.h
@@ -37,16 +37,25 @@ struct ElementPropertyTransition : publi
                             nsCSSPseudoElements::Type aPseudoType,
                             const AnimationTiming &aTiming)
     : dom::Animation(aDocument, aTarget, aPseudoType, aTiming, EmptyString())
   { }
 
   virtual ElementPropertyTransition* AsTransition() { return this; }
   virtual const ElementPropertyTransition* AsTransition() const { return this; }
 
+  virtual const nsString& Name() const;
+
+  nsCSSProperty TransitionProperty() const {
+    MOZ_ASSERT(Properties().Length() == 1,
+               "Transitions should have exactly one animation property. "
+               "Perhaps we are using an un-initialized transition?");
+    return Properties()[0].mProperty;
+  }
+
   // This is the start value to be used for a check for whether a
   // transition is being reversed.  Normally the same as
   // mProperties[0].mSegments[0].mFromValue, except when this transition
   // started as the reversal of another in-progress transition.
   // Needed so we can handle two reverses in a row.
   mozilla::StyleAnimationValue mStartForReversingTest;
   // Likewise, the portion (in value space) of the "full" reversed
   // transition that we're actually covering.  For example, if a :hover
--- a/layout/xul/nsImageBoxFrame.cpp
+++ b/layout/xul/nsImageBoxFrame.cpp
@@ -142,18 +142,18 @@ nsImageBoxFrame::AttributeChanged(int32_
     UpdateLoadFlags();
 
   return rv;
 }
 
 nsImageBoxFrame::nsImageBoxFrame(nsStyleContext* aContext):
   nsLeafBoxFrame(aContext),
   mIntrinsicSize(0,0),
+  mLoadFlags(nsIRequest::LOAD_NORMAL),
   mRequestRegistered(false),
-  mLoadFlags(nsIRequest::LOAD_NORMAL),
   mUseSrcAttr(false),
   mSuppressStyleCheck(false),
   mFireEventOnDecode(false)
 {
   MarkIntrinsicISizesDirty();
 }
 
 nsImageBoxFrame::~nsImageBoxFrame()
--- a/layout/xul/nsImageBoxFrame.h
+++ b/layout/xul/nsImageBoxFrame.h
@@ -110,25 +110,25 @@ private:
   nsresult OnLoadComplete(imgIRequest* aRequest, nsresult aStatus);
   nsresult OnImageIsAnimated(imgIRequest* aRequest);
   nsresult OnFrameUpdate(imgIRequest* aRequest);
 
   nsRect mSubRect; ///< If set, indicates that only the portion of the image specified by the rect should be used.
   nsSize mIntrinsicSize;
   nsSize mImageSize;
 
-  // Boolean variable to determine if the current image request has been
-  // registered with the refresh driver.
-  bool mRequestRegistered;
-
   nsRefPtr<imgRequestProxy> mImageRequest;
   nsCOMPtr<imgINotificationObserver> mListener;
 
   int32_t mLoadFlags;
 
+  // Boolean variable to determine if the current image request has been
+  // registered with the refresh driver.
+  bool mRequestRegistered;
+
   bool mUseSrcAttr; ///< Whether or not the image src comes from an attribute.
   bool mSuppressStyleCheck;
   bool mFireEventOnDecode;
 }; // class nsImageBoxFrame
 
 class nsDisplayXULImage : public nsDisplayImageContainer {
 public:
   nsDisplayXULImage(nsDisplayListBuilder* aBuilder,
--- a/layout/xul/tree/nsTreeBodyFrame.cpp
+++ b/layout/xul/tree/nsTreeBodyFrame.cpp
@@ -616,17 +616,17 @@ nsTreeBodyFrame::GetSelectionRegion(nsIS
     if (isSelected)
       region->UnionRect(x, y, rect.width, rowHeight);
     y += rowHeight;
   }
 
   // clip to the tree boundary in case one row extends past it
   region->IntersectRect(x, top, rect.width, rect.height);
 
-  NS_ADDREF(*aRegion = region);
+  region.forget(aRegion);
   return NS_OK;
 }
 
 nsresult
 nsTreeBodyFrame::Invalidate()
 {
   if (mUpdateBatchNest)
     return NS_OK;
--- a/media/libstagefright/binding/Adts.cpp
+++ b/media/libstagefright/binding/Adts.cpp
@@ -58,18 +58,18 @@ Adts::ConvertSample(uint16_t aChannelCou
 
   nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
   if (!writer->Prepend(&header[0], ArrayLength(header))) {
     return false;
   }
 
   if (aSample->mCrypto.mValid) {
     if (aSample->mCrypto.mPlainSizes.Length() == 0) {
-      aSample->mCrypto.mPlainSizes.AppendElement(kADTSHeaderSize);
-      aSample->mCrypto.mEncryptedSizes.AppendElement(aSample->mSize - kADTSHeaderSize);
+      writer->mCrypto.mPlainSizes.AppendElement(kADTSHeaderSize);
+      writer->mCrypto.mEncryptedSizes.AppendElement(aSample->mSize - kADTSHeaderSize);
     } else {
-      aSample->mCrypto.mPlainSizes[0] += kADTSHeaderSize;
+      writer->mCrypto.mPlainSizes[0] += kADTSHeaderSize;
     }
   }
 
   return true;
 }
 }
--- a/media/libstagefright/binding/DecoderData.cpp
+++ b/media/libstagefright/binding/DecoderData.cpp
@@ -98,76 +98,80 @@ CryptoFile::DoUpdate(const uint8_t* aDat
     if (!reader.ReadArray(psshInfo.data, length)) {
       return false;
     }
     pssh.AppendElement(psshInfo);
   }
   return true;
 }
 
-void
-TrackConfig::Update(const MetaData* aMetaData, const char* aMimeType)
+static void
+UpdateTrackInfo(mozilla::TrackInfo& aConfig,
+                const MetaData* aMetaData,
+                const char* aMimeType)
 {
-  mime_type = aMimeType;
-  duration = FindInt64(aMetaData, kKeyDuration);
-  media_time = FindInt64(aMetaData, kKeyMediaTime);
-  mTrackId = FindInt32(aMetaData, kKeyTrackID);
-  crypto.mValid = aMetaData->findInt32(kKeyCryptoMode, &crypto.mMode) &&
+  mozilla::CryptoTrack& crypto = aConfig.mCrypto;
+  aConfig.mMimeType = aMimeType;
+  aConfig.mDuration = FindInt64(aMetaData, kKeyDuration);
+  aConfig.mMediaTime = FindInt64(aMetaData, kKeyMediaTime);
+  aConfig.mTrackId = FindInt32(aMetaData, kKeyTrackID);
+  aConfig.mCrypto.mValid = aMetaData->findInt32(kKeyCryptoMode, &crypto.mMode) &&
     aMetaData->findInt32(kKeyCryptoDefaultIVSize, &crypto.mIVSize) &&
     FindData(aMetaData, kKeyCryptoKey, &crypto.mKeyId);
 }
 
 void
-AudioDecoderConfig::Update(const MetaData* aMetaData, const char* aMimeType)
+MP4AudioInfo::Update(const MetaData* aMetaData,
+                     const char* aMimeType)
 {
-  TrackConfig::Update(aMetaData, aMimeType);
-  channel_count = FindInt32(aMetaData, kKeyChannelCount);
-  bits_per_sample = FindInt32(aMetaData, kKeySampleSize);
-  samples_per_second = FindInt32(aMetaData, kKeySampleRate);
-  frequency_index = Adts::GetFrequencyIndex(samples_per_second);
-  aac_profile = FindInt32(aMetaData, kKeyAACProfile);
+  UpdateTrackInfo(*this, aMetaData, aMimeType);
+  mChannels = FindInt32(aMetaData, kKeyChannelCount);
+  mBitDepth = FindInt32(aMetaData, kKeySampleSize);
+  mRate = FindInt32(aMetaData, kKeySampleRate);
+  mProfile = FindInt32(aMetaData, kKeyAACProfile);
 
-  if (FindData(aMetaData, kKeyESDS, extra_data)) {
-    ESDS esds(extra_data->Elements(), extra_data->Length());
+  if (FindData(aMetaData, kKeyESDS, mExtraData)) {
+    ESDS esds(mExtraData->Elements(), mExtraData->Length());
 
     const void* data;
     size_t size;
     if (esds.getCodecSpecificInfo(&data, &size) == OK) {
       const uint8_t* cdata = reinterpret_cast<const uint8_t*>(data);
-      audio_specific_config->AppendElements(cdata, size);
+      mCodecSpecificConfig->AppendElements(cdata, size);
       if (size > 1) {
         ABitReader br(cdata, size);
-        extended_profile = br.getBits(5);
+        mExtendedProfile = br.getBits(5);
 
-        if (extended_profile == 31) {  // AAC-ELD => additional 6 bits
-          extended_profile = 32 + br.getBits(6);
+        if (mExtendedProfile == 31) {  // AAC-ELD => additional 6 bits
+          mExtendedProfile = 32 + br.getBits(6);
         }
       }
     }
   }
 }
 
 bool
-AudioDecoderConfig::IsValid()
+MP4AudioInfo::IsValid() const
 {
-  return channel_count > 0 && samples_per_second > 0 && frequency_index > 0 &&
-         (!mime_type.Equals(MEDIA_MIMETYPE_AUDIO_AAC) ||
-          aac_profile > 0 || extended_profile > 0);
+  return mChannels > 0 && mRate > 0 &&
+         (!mMimeType.Equals(MEDIA_MIMETYPE_AUDIO_AAC) ||
+          mProfile > 0 || mExtendedProfile > 0);
 }
 
 void
-VideoDecoderConfig::Update(const MetaData* aMetaData, const char* aMimeType)
+MP4VideoInfo::Update(const MetaData* aMetaData, const char* aMimeType)
 {
-  TrackConfig::Update(aMetaData, aMimeType);
-  display_width = FindInt32(aMetaData, kKeyDisplayWidth);
-  display_height = FindInt32(aMetaData, kKeyDisplayHeight);
-  image_width = FindInt32(aMetaData, kKeyWidth);
-  image_height = FindInt32(aMetaData, kKeyHeight);
+  UpdateTrackInfo(*this, aMetaData, aMimeType);
+  mDisplay.width = FindInt32(aMetaData, kKeyDisplayWidth);
+  mDisplay.height = FindInt32(aMetaData, kKeyDisplayHeight);
+  mImage.width = FindInt32(aMetaData, kKeyWidth);
+  mImage.height = FindInt32(aMetaData, kKeyHeight);
 
-  FindData(aMetaData, kKeyAVCC, extra_data);
+  FindData(aMetaData, kKeyAVCC, mExtraData);
 }
 
 bool
-VideoDecoderConfig::IsValid()
+MP4VideoInfo::IsValid() const
 {
-  return display_width > 0 && display_height > 0;
+  return mDisplay.width > 0 && mDisplay.height > 0;
 }
+
 }
--- a/media/libstagefright/binding/Index.cpp
+++ b/media/libstagefright/binding/Index.cpp
@@ -122,38 +122,38 @@ already_AddRefed<MediaRawData> SampleIte
     // The size comes from an 8 bit field
     nsAutoTArray<uint8_t, 256> cenc;
     cenc.SetLength(s->mCencRange.Length());
     if (!mIndex->mSource->ReadAt(s->mCencRange.mStart, cenc.Elements(), cenc.Length(),
                                  &bytesRead) || bytesRead != cenc.Length()) {
       return nullptr;
     }
     ByteReader reader(cenc);
-    sample->mCrypto.mValid = true;
-    sample->mCrypto.mIVSize = ivSize;
+    writer->mCrypto.mValid = true;
+    writer->mCrypto.mIVSize = ivSize;
 
-    if (!reader.ReadArray(sample->mCrypto.mIV, ivSize)) {
+    if (!reader.ReadArray(writer->mCrypto.mIV, ivSize)) {
       return nullptr;
     }
 
     if (reader.CanRead16()) {
       uint16_t count = reader.ReadU16();
 
       if (reader.Remaining() < count * 6) {
         return nullptr;
       }
 
       for (size_t i = 0; i < count; i++) {
-        sample->mCrypto.mPlainSizes.AppendElement(reader.ReadU16());
-        sample->mCrypto.mEncryptedSizes.AppendElement(reader.ReadU32());
+        writer->mCrypto.mPlainSizes.AppendElement(reader.ReadU16());
+        writer->mCrypto.mEncryptedSizes.AppendElement(reader.ReadU32());
       }
     } else {
       // No subsample information means the entire sample is encrypted.
-      sample->mCrypto.mPlainSizes.AppendElement(0);
-      sample->mCrypto.mEncryptedSizes.AppendElement(sample->mSize);
+      writer->mCrypto.mPlainSizes.AppendElement(0);
+      writer->mCrypto.mEncryptedSizes.AppendElement(sample->mSize);
     }
   }
 
   Next();
 
   return sample.forget();
 }
 
--- a/media/libstagefright/binding/include/mp4_demuxer/DecoderData.h
+++ b/media/libstagefright/binding/include/mp4_demuxer/DecoderData.h
@@ -1,16 +1,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef DECODER_DATA_H_
 #define DECODER_DATA_H_
 
 #include "MediaData.h"
+#include "MediaInfo.h"
 #include "mozilla/Types.h"
 #include "mozilla/Vector.h"
 #include "nsRefPtr.h"
 #include "nsString.h"
 #include "nsTArray.h"
 #include "nsString.h"
 
 namespace stagefright
@@ -47,106 +48,34 @@ public:
 
   bool valid;
   nsTArray<PsshInfo> pssh;
 
 private:
   bool DoUpdate(const uint8_t* aData, size_t aLength);
 };
 
-class TrackConfig
+class MP4AudioInfo : public mozilla::AudioInfo
 {
 public:
-  enum TrackType {
-    kUndefinedTrack,
-    kAudioTrack,
-    kVideoTrack,
-  };
-  explicit TrackConfig(TrackType aType)
-    : mTrackId(0)
-    , duration(0)
-    , media_time(0)
-    , mType(aType)
-  {
-  }
-
-  nsAutoCString mime_type;
-  uint32_t mTrackId;
-  int64_t duration;
-  int64_t media_time;
-  mozilla::CryptoTrack crypto;
-  TrackType mType;
-
-  bool IsAudioConfig() const
-  {
-    return mType == kAudioTrack;
-  }
-  bool IsVideoConfig() const
-  {
-    return mType == kVideoTrack;
-  }
-  void Update(const stagefright::MetaData* aMetaData,
-              const char* aMimeType);
-};
-
-class AudioDecoderConfig : public TrackConfig
-{
-public:
-  AudioDecoderConfig()
-    : TrackConfig(kAudioTrack)
-    , channel_count(0)
-    , bits_per_sample(0)
-    , samples_per_second(0)
-    , frequency_index(0)
-    , aac_profile(0)
-    , extended_profile(0)
-    , extra_data(new mozilla::DataBuffer)
-    , audio_specific_config(new mozilla::DataBuffer)
-  {
-  }
-
-  uint32_t channel_count;
-  uint32_t bits_per_sample;
-  uint32_t samples_per_second;
-  int8_t frequency_index;
-  int8_t aac_profile;
-  int8_t extended_profile;
-  nsRefPtr<mozilla::DataBuffer> extra_data;
-  nsRefPtr<mozilla::DataBuffer> audio_specific_config;
+  MP4AudioInfo() = default;
 
   void Update(const stagefright::MetaData* aMetaData,
               const char* aMimeType);
-  bool IsValid();
 
-private:
-  friend class MP4Demuxer;
+  virtual bool IsValid() const override;
 };
 
-class VideoDecoderConfig : public TrackConfig
+class MP4VideoInfo : public mozilla::VideoInfo
 {
 public:
-  VideoDecoderConfig()
-    : TrackConfig(kVideoTrack)
-    , display_width(0)
-    , display_height(0)
-    , image_width(0)
-    , image_height(0)
-    , extra_data(new mozilla::DataBuffer)
-  {
-  }
-
-  int32_t display_width;
-  int32_t display_height;
-
-  int32_t image_width;
-  int32_t image_height;
-
-  nsRefPtr<mozilla::DataBuffer> extra_data;   // Unparsed AVCDecoderConfig payload.
+  MP4VideoInfo() = default;
 
   void Update(const stagefright::MetaData* aMetaData,
               const char* aMimeType);
-  bool IsValid();
+
+  virtual bool IsValid() const override;
 };
 
 typedef int64_t Microseconds;
 }
 
 #endif
--- a/media/libstagefright/binding/include/mp4_demuxer/mp4_demuxer.h
+++ b/media/libstagefright/binding/include/mp4_demuxer/mp4_demuxer.h
@@ -58,18 +58,18 @@ public:
   void SeekVideo(Microseconds aTime);
 
   // DemuxAudioSample and DemuxVideoSample functions
   // return nullptr on end of stream or error.
   already_AddRefed<mozilla::MediaRawData> DemuxAudioSample();
   already_AddRefed<mozilla::MediaRawData> DemuxVideoSample();
 
   const CryptoFile& Crypto() { return mCrypto; }
-  const AudioDecoderConfig& AudioConfig() { return mAudioConfig; }
-  const VideoDecoderConfig& VideoConfig() { return mVideoConfig; }
+  const mozilla::AudioInfo& AudioConfig() { return mAudioConfig; }
+  const mozilla::VideoInfo& VideoConfig() { return mVideoConfig; }
 
   void UpdateIndex(const nsTArray<mozilla::MediaByteRange>& aByteRanges);
 
   void ConvertByteRangesToTime(
     const nsTArray<mozilla::MediaByteRange>& aByteRanges,
     nsTArray<Interval<Microseconds>>* aIntervals);
 
   int64_t GetEvictionOffset(Microseconds aTime);
@@ -78,18 +78,18 @@ public:
   // report this.
   Microseconds GetNextKeyframeTime();
 
 protected:
   ~MP4Demuxer();
 
 private:
   void UpdateCrypto(const stagefright::MetaData* aMetaData);
-  AudioDecoderConfig mAudioConfig;
-  VideoDecoderConfig mVideoConfig;
+  MP4AudioInfo mAudioConfig;
+  MP4VideoInfo mVideoConfig;
   CryptoFile mCrypto;
 
   nsAutoPtr<StageFrightPrivate> mPrivate;
   nsRefPtr<Stream> mSource;
   nsTArray<mozilla::MediaByteRange> mCachedByteRanges;
   nsTArray<Interval<Microseconds>> mCachedTimeRanges;
   Monitor* mMonitor;
   Microseconds mNextKeyframeTime;
--- a/media/libstagefright/binding/mp4_demuxer.cpp
+++ b/media/libstagefright/binding/mp4_demuxer.cpp
@@ -136,20 +136,20 @@ MP4Demuxer::Init()
       mPrivate->mIndexes.AppendElement(index);
       mPrivate->mVideoIterator = new SampleIterator(index);
     }
   }
   sp<MetaData> metaData = e->getMetaData();
   UpdateCrypto(metaData.get());
 
   int64_t movieDuration;
-  if (!mVideoConfig.duration && !mAudioConfig.duration &&
+  if (!mVideoConfig.mDuration && !mAudioConfig.mDuration &&
       metaData->findInt64(kKeyMovieDuration, &movieDuration)) {
     // No duration were found in either tracks, use movie extend header box one.
-    mVideoConfig.duration = mAudioConfig.duration = movieDuration;
+    mVideoConfig.mDuration = mAudioConfig.mDuration = movieDuration;
   }
   mPrivate->mCanSeek = e->flags() & MediaExtractor::CAN_SEEK;
 
   return mPrivate->mAudio.get() || mPrivate->mVideo.get();
 }
 
 void
 MP4Demuxer::UpdateCrypto(const MetaData* aMetaData)
@@ -179,17 +179,17 @@ MP4Demuxer::HasValidVideo()
   mMonitor->AssertCurrentThreadOwns();
   return mPrivate->mVideo.get() && mVideoConfig.IsValid();
 }
 
 Microseconds
 MP4Demuxer::Duration()
 {
   mMonitor->AssertCurrentThreadOwns();
-  return std::max(mVideoConfig.duration, mAudioConfig.duration);
+  return std::max(mVideoConfig.mDuration, mAudioConfig.mDuration);
 }
 
 bool
 MP4Demuxer::CanSeek()
 {
   mMonitor->AssertCurrentThreadOwns();
   return mPrivate->mCanSeek;
 }
@@ -217,37 +217,39 @@ MP4Demuxer::DemuxAudioSample()
 {
   mMonitor->AssertCurrentThreadOwns();
   if (!mPrivate->mAudioIterator) {
     return nullptr;
   }
   nsRefPtr<mozilla::MediaRawData> sample(mPrivate->mAudioIterator->GetNext());
   if (sample) {
     if (sample->mCrypto.mValid) {
-      sample->mCrypto.mMode = mAudioConfig.crypto.mMode;
-      sample->mCrypto.mIVSize = mAudioConfig.crypto.mIVSize;
-      sample->mCrypto.mKeyId.AppendElements(mAudioConfig.crypto.mKeyId);
+      nsAutoPtr<MediaRawDataWriter> writer(sample->CreateWriter());
+      writer->mCrypto.mMode = mAudioConfig.mCrypto.mMode;
+      writer->mCrypto.mIVSize = mAudioConfig.mCrypto.mIVSize;
+      writer->mCrypto.mKeyId.AppendElements(mAudioConfig.mCrypto.mKeyId);
     }
   }
   return sample.forget();
 }
 
 already_AddRefed<MediaRawData>
 MP4Demuxer::DemuxVideoSample()
 {
   mMonitor->AssertCurrentThreadOwns();
   if (!mPrivate->mVideoIterator) {
     return nullptr;
   }
   nsRefPtr<mozilla::MediaRawData> sample(mPrivate->mVideoIterator->GetNext());
   if (sample) {
-    sample->mExtraData = mVideoConfig.extra_data;
+    sample->mExtraData = mVideoConfig.mExtraData;
     if (sample->mCrypto.mValid) {
-      sample->mCrypto.mMode = mVideoConfig.crypto.mMode;
-      sample->mCrypto.mKeyId.AppendElements(mVideoConfig.crypto.mKeyId);
+      nsAutoPtr<MediaRawDataWriter> writer(sample->CreateWriter());
+      writer->mCrypto.mMode = mVideoConfig.mCrypto.mMode;
+      writer->mCrypto.mKeyId.AppendElements(mVideoConfig.mCrypto.mKeyId);
     }
     if (sample->mTime >= mNextKeyframeTime) {
       mNextKeyframeTime = mPrivate->mVideoIterator->GetNextKeyframeTime();
     }
   }
   return sample.forget();
 }
 
--- a/media/libyuv/source/compare_win.cc
+++ b/media/libyuv/source/compare_win.cc
@@ -13,17 +13,17 @@
 
 #ifdef __cplusplus
 namespace libyuv {
 extern "C" {
 #endif
 
 #if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && defined(_MSC_VER)
 
-__declspec(naked) __declspec(align(16))
+__declspec(naked)
 uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count) {
   __asm {
     mov        eax, [esp + 4]    // src_a
     mov        edx, [esp + 8]    // src_b
     mov        ecx, [esp + 12]   // count
     pxor       xmm0, xmm0
     pxor       xmm5, xmm5
 
@@ -55,17 +55,17 @@ uint32 SumSquareError_SSE2(const uint8* 
     ret
   }
 }
 
 // Visual C 2012 required for AVX2.
 #if _MSC_VER >= 1700
 // C4752: found Intel(R) Advanced Vector Extensions; consider using /arch:AVX.
 #pragma warning(disable: 4752)
-__declspec(naked) __declspec(align(16))
+__declspec(naked)
 uint32 SumSquareError_AVX2(const uint8* src_a, const uint8* src_b, int count) {
   __asm {
     mov        eax, [esp + 4]    // src_a
     mov        edx, [esp + 8]    // src_b
     mov        ecx, [esp + 12]   // count
     vpxor      ymm0, ymm0, ymm0  // sum
     vpxor      ymm5, ymm5, ymm5  // constant 0 for unpck
     sub        edx, eax
@@ -130,17 +130,17 @@ static uvec32 kHashMul3 = {
 // 27: 66 0F 38 40 C6     pmulld      xmm0,xmm6
 // 44: 66 0F 38 40 DD     pmulld      xmm3,xmm5
 // 59: 66 0F 38 40 E5     pmulld      xmm4,xmm5
 // 72: 66 0F 38 40 D5     pmulld      xmm2,xmm5
 // 83: 66 0F 38 40 CD     pmulld      xmm1,xmm5
 #define pmulld(reg) _asm _emit 0x66 _asm _emit 0x0F _asm _emit 0x38 \
     _asm _emit 0x40 _asm _emit reg
 
-__declspec(naked) __declspec(align(16))
+__declspec(naked)
 uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed) {
   __asm {
     mov        eax, [esp + 4]    // src
     mov        ecx, [esp + 8]    // count
     movd       xmm0, [esp + 12]  // seed
 
     pxor       xmm7, xmm7        // constant 0 for unpck
     movdqa     xmm6, kHash16x33
@@ -182,17 +182,17 @@ uint32 HashDjb2_SSE41(const uint8* src, 
 
     movd       eax, xmm0         // return hash
     ret
   }
 }
 
 // Visual C 2012 required for AVX2.
 #if _MSC_VER >= 1700
-__declspec(naked) __declspec(align(16))
+__declspec(naked)
 uint32 HashDjb2_AVX2(const uint8* src, int count, uint32 seed) {
   __asm {
     mov        eax, [esp + 4]    // src
     mov        ecx, [esp + 8]    // count
     movd       xmm0, [esp + 12]  // seed
     movdqa     xmm6, kHash16x33
 
     align      4
--- a/media/libyuv/source/rotate.cc
+++ b/media/libyuv/source/rotate.cc
@@ -71,17 +71,17 @@ void TransposeUVWx8_MIPS_DSPR2(const uin
                                uint8* dst_a, int dst_stride_a,
                                uint8* dst_b, int dst_stride_b,
                                int width);
 #endif  // defined(__mips__)
 
 #if !defined(LIBYUV_DISABLE_X86) && \
     defined(_M_IX86) && defined(_MSC_VER)
 #define HAS_TRANSPOSE_WX8_SSSE3
-__declspec(naked) __declspec(align(16))
+__declspec(naked)
 static void TransposeWx8_SSSE3(const uint8* src, int src_stride,
                                uint8* dst, int dst_stride, int width) {
   __asm {
     push      edi
     push      esi
     push      ebp
     mov       eax, [esp + 12 + 4]   // src
     mov       edi, [esp + 12 + 8]   // src_stride
@@ -163,17 +163,17 @@ static void TransposeWx8_SSSE3(const uin
     pop       ebp
     pop       esi
     pop       edi
     ret
   }
 }
 
 #define HAS_TRANSPOSE_UVWX8_SSE2
-__declspec(naked) __declspec(align(16))
+__declspec(naked)
 static void TransposeUVWx8_SSE2(const uint8* src, int src_stride,
                                 uint8* dst_a, int dst_stride_a,
                                 uint8* dst_b, int dst_stride_b,
                                 int w) {
   __asm {
     push      ebx
     push      esi
     push      edi
--- a/media/libyuv/source/row_win.cc
+++ b/media/libyuv/source/row_win.cc
@@ -139,17 +139,17 @@ static const uvec8 kShuffleMaskARGBToRGB
 };
 
 // Shuffle table for converting ARGB to RAW.
 static const uvec8 kShuffleMaskARGBToRAW_0 = {
   2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 128u, 128u, 128u, 128u, 8u, 14u, 13u, 12u
 };
 
 // Duplicates gray value 3 times and fills in alpha opaque.
-__declspec(naked) __declspec(align(16))
+__declspec(naked)
 void I400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) {
   __asm {
     mov        eax, [esp + 4]        // src_y
     mov        edx, [esp + 8]        // dst_argb
     mov        ecx, [esp + 12]       // pix
     pcmpeqb    xmm5, xmm5            // generate mask 0xff000000
     pslld      xmm5, 24
 
@@ -167,17 +167,17 @@ void I400ToARGBRow_SSE2(const uint8* src
     movdqa     [edx + 16], xmm1
     lea        edx, [edx + 32]
     sub        ecx, 8
     jg         convertloop
     ret
   }
 }
 
-__declspec(naked) __declspec(align(16))
+__declspec(naked)
 void I400ToARGBRow_Unaligned_SSE2(const uint8* src_y, uint8* dst_argb,
                                   int pix) {
   __asm {
     mov        eax, [esp + 4]        // src_y
     mov        edx, [esp + 8]        // dst_argb
     mov        ecx, [esp + 12]       // pix
     pcmpeqb    xmm5, xmm5            // generate mask 0xff000000
     pslld      xmm5, 24
@@ -196,17 +196,17 @@ void I400ToARGBRow_Unaligned_SSE2(const 
     movdqu     [edx + 16], xmm1
     lea        edx, [edx + 32]
     sub        ecx, 8
     jg         convertloop
     ret
   }
 }
 
-__declspec(naked) __declspec(align(16))
+__d