merge mozilla-inboudn to mozilla-central a=merge
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Fri, 05 Aug 2016 12:01:14 +0200
changeset 351820 d320ef56876f52db9bc0eb79554c7332d4793769
parent 351730 975ba208687a97ecb9fd439c1ee52bfa3350e25b (current diff)
parent 351819 f0725e82f2869baeb01cb86b8aa6bece0ca26cd6 (diff)
child 351827 b97dd7dd3cb9c679cd5ab95ece0aa933988d0840
child 351841 2c234f5a191673f6a4b0490435851e61852a54e8
child 351864 f63b7c68ac825598a2feae6d6e346c9e2976fd48
push id1324
push usermtabara@mozilla.com
push dateMon, 16 Jan 2017 13:07:44 +0000
treeherdermozilla-release@a01c49833940 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone51.0a1
first release with
nightly linux32
d320ef56876f / 51.0a1 / 20160805030444 / files
nightly linux64
d320ef56876f / 51.0a1 / 20160805030444 / files
nightly mac
d320ef56876f / 51.0a1 / 20160805030444 / files
nightly win32
d320ef56876f / 51.0a1 / 20160805030444 / files
nightly win64
d320ef56876f / 51.0a1 / 20160805030444 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
merge mozilla-inboudn to mozilla-central a=merge
browser/base/content/browser-social.js
browser/base/content/browser.js
gfx/gl/AndroidNativeWindow.cpp
gfx/gl/AndroidNativeWindow.h
gfx/thebes/gfxPlatform.cpp
testing/web-platform/meta/WebCryptoAPI/digest/digest.worker.js.ini
testing/web-platform/meta/WebCryptoAPI/digest/test_digest.html.ini
testing/web-platform/meta/fetch/api/cors/cors-preflight-worker.html.ini
testing/web-platform/meta/shadow-dom/untriaged/user-interaction/focus-navigation/test-001.html.ini
testing/web-platform/meta/shadow-dom/untriaged/user-interaction/focus-navigation/test-002.html.ini
testing/web-platform/meta/shadow-dom/untriaged/user-interaction/focus-navigation/test-003.html.ini
testing/web-platform/meta/shadow-dom/untriaged/user-interaction/focus-navigation/test-004.html.ini
testing/web-platform/meta/workers/constructors/Worker/expected-self-properties.html.ini
testing/web-platform/tests/XMLHttpRequest/XMLHttpRequest-withCredentials.html
testing/web-platform/tests/XMLHttpRequest/XMLHttpRequest-withCredentials.js
testing/web-platform/tests/XMLHttpRequest/XMLHttpRequest-withCredentials.worker.js
testing/web-platform/tests/encrypted-media/Google/test-encrypted-different-av-keys.webm
testing/web-platform/tests/encrypted-media/Google/test-encrypted.webm
testing/web-platform/tests/fetch/api/request/resources/get-host-info.sub.js
testing/web-platform/tests/pointerevents/pointerevent_button_attribute_mouse-manual.html
testing/web-platform/tests/pointerevents/pointerevent_properties_mouse-manual.html
testing/web-platform/tests/shadow-dom/untriaged/user-interaction/focus-navigation/test-001.html
testing/web-platform/tests/shadow-dom/untriaged/user-interaction/focus-navigation/test-002.html
testing/web-platform/tests/shadow-dom/untriaged/user-interaction/focus-navigation/test-003.html
testing/web-platform/tests/shadow-dom/untriaged/user-interaction/focus-navigation/test-004.html
testing/web-platform/tests/web-animations/interfaces/DocumentTimeline/idlharness.html
testing/web-platform/tests/web-animations/timing-model/timelines/default-document-timeline.html
testing/web-platform/tests/workers/constructors/Worker/DedicatedWorkerGlobalScope-members.html
testing/web-platform/tests/workers/constructors/Worker/expected-self-properties.html
testing/web-platform/tests/workers/constructors/Worker/unexpected-self-properties.html
testing/web-platform/tests/workers/workers.js
toolkit/components/extensions/test/mochitest/test_chrome_ext_native_messaging.html
toolkit/components/extensions/test/xpcshell/test_ext_native_messaging.js
widget/android/AndroidBridge.cpp
widget/android/AndroidBridge.h
widget/android/AndroidDirectTexture.cpp
widget/android/AndroidGraphicBuffer.cpp
widget/android/moz.build
--- a/accessible/generic/DocAccessible.cpp
+++ b/accessible/generic/DocAccessible.cpp
@@ -759,16 +759,21 @@ DocAccessible::AttributeChanged(nsIDocum
   Accessible* accessible = GetAccessible(aElement);
   if (!accessible) {
     if (mContent != aElement)
       return;
 
     accessible = this;
   }
 
+  if (!accessible->IsBoundToParent()) {
+    MOZ_ASSERT_UNREACHABLE("DOM attribute change on accessible detached from tree");
+    return;
+  }
+
   // Fire accessible events iff there's an accessible, otherwise we consider
   // the accessible state wasn't changed, i.e. its state is initial state.
   AttributeChangedImpl(accessible, aNameSpaceID, aAttribute);
 
   // Update dependent IDs cache. Take care of accessible elements because no
   // accessible element means either the element is not accessible at all or
   // its accessible will be created later. It doesn't make sense to keep
   // dependent IDs for non accessible elements. For the second case we'll update
--- a/browser/base/content/browser-social.js
+++ b/browser/base/content/browser-social.js
@@ -233,17 +233,17 @@ SocialUI = {
     if (shareButton) {
       if (canShare) {
         shareButton.removeAttribute("disabled")
       } else {
         shareButton.setAttribute("disabled", "true")
       }
     }
     // update the disabled state of the button based on the command
-    for (let node of SocialMarks.nodes) {
+    for (let node of SocialMarks.nodes()) {
       if (canShare) {
         node.removeAttribute("disabled")
       } else {
         node.setAttribute("disabled", "true")
       }
     }
   },
 
@@ -1295,31 +1295,31 @@ var SocialMarksWidgetListener = {
 }
 
 /**
  * SocialMarks
  *
  * Handles updates to toolbox and signals all buttons to update when necessary.
  */
 SocialMarks = {
-  get nodes() {
+  *nodes() {
     for (let p of Social.providers.filter(p => p.markURL)) {
       let widgetId = SocialMarks._toolbarHelper.idFromOrigin(p.origin);
       let widget = CustomizableUI.getWidget(widgetId);
       if (!widget)
         continue;
       let node = widget.forWindow(window).node;
       if (node)
         yield node;
     }
   },
   update: function() {
     // querySelectorAll does not work on the menu panel, so we have to do this
     // the hard way.
-    for (let node of this.nodes) {
+    for (let node of this.nodes()) {
       // xbl binding is not complete on startup when buttons are not in toolbar,
       // verify update is available
       if (node.update) {
         node.update();
       }
     }
   },
 
--- a/browser/base/content/browser.js
+++ b/browser/base/content/browser.js
@@ -7589,18 +7589,22 @@ var TabContextMenu = {
     let disabled = gBrowser.tabs.length == 1;
 
     var menuItems = aPopupMenu.getElementsByAttribute("tbattr", "tabbrowser-multiple");
     for (let menuItem of menuItems)
       menuItem.disabled = disabled;
 
     if (AppConstants.E10S_TESTING_ONLY) {
       menuItems = aPopupMenu.getElementsByAttribute("tbattr", "tabbrowser-remote");
-      for (let menuItem of menuItems)
+      for (let menuItem of menuItems) {
         menuItem.hidden = !gMultiProcessBrowser;
+        if (menuItem.id == "context_openNonRemoteWindow") {
+          menuItem.disabled = !!parseInt(this.contextTab.getAttribute("usercontextid"));
+        }
+      }
     }
 
     disabled = gBrowser.visibleTabs.length == 1;
     menuItems = aPopupMenu.getElementsByAttribute("tbattr", "tabbrowser-multiple-visible");
     for (let menuItem of menuItems)
       menuItem.disabled = disabled;
 
     // Session store
--- a/browser/base/content/test/general/browser_contextmenu.js
+++ b/browser/base/content/test/general/browser_contextmenu.js
@@ -877,17 +877,17 @@ add_task(function* test_plaintext_sendpa
                     "---",                  null,
                     "context-viewbgimage",  false,
                     "context-selectall",    true,
                     "---",                  null,
                     "context-viewsource",   true,
                     "context-viewinfo",     true
                    ];
   yield test_contextmenu("#test-text", plainTextItems, {
-      onContextMenuShown() {
+      *onContextMenuShown() {
         yield openMenuItemSubmenu("context-sendpagetodevice");
       }
     });
 
   restoreRemoteClients(oldGetter);
 });
 
 add_task(function* test_link_sendlinktodevice() {
@@ -913,17 +913,17 @@ add_task(function* test_link_sendlinktod
      "---",                  null,
      "context-sendlinktodevice", true,
       ["*Foo", true,
        "*Bar", true,
        "---", null,
        "*All Devices", true], null,
     ],
     {
-      onContextMenuShown() {
+      *onContextMenuShown() {
         yield openMenuItemSubmenu("context-sendlinktodevice");
       }
     });
 
   restoreRemoteClients(oldGetter);
 });
 
 add_task(function* test_cleanup() {
--- a/browser/components/extensions/.eslintrc
+++ b/browser/components/extensions/.eslintrc
@@ -1,18 +1,18 @@
 {
   "extends": "../../../toolkit/components/extensions/.eslintrc",
 
   "globals": {
     "AllWindowEvents": true,
     "currentWindow": true,
     "EventEmitter": true,
+    "IconDetails": true,
     "makeWidgetId": true,
     "pageActionFor": true,
-    "IconDetails": true,
     "PanelPopup": true,
     "TabContext": true,
     "ViewPopup": true,
     "WindowEventManager": true,
     "WindowListManager": true,
     "WindowManager": true,
   },
 }
--- a/browser/components/extensions/ext-browserAction.js
+++ b/browser/components/extensions/ext-browserAction.js
@@ -1,15 +1,19 @@
 /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */
 /* vim: set sts=2 sw=2 et tw=80: */
 "use strict";
 
 XPCOMUtils.defineLazyModuleGetter(this, "CustomizableUI",
                                   "resource:///modules/CustomizableUI.jsm");
 
+XPCOMUtils.defineLazyGetter(this, "colorUtils", () => {
+  return require("devtools/shared/css-color").colorUtils;
+});
+
 Cu.import("resource://devtools/shared/event-emitter.js");
 
 Cu.import("resource://gre/modules/ExtensionUtils.jsm");
 var {
   EventManager,
   IconDetails,
 } = ExtensionUtils;
 
@@ -137,18 +141,18 @@ BrowserAction.prototype = {
     } else {
       node.setAttribute("disabled", "true");
     }
 
     let badgeNode = node.ownerDocument.getAnonymousElementByAttribute(node,
                                         "class", "toolbarbutton-badge");
     if (badgeNode) {
       let color = tabData.badgeBackgroundColor;
-      if (Array.isArray(color)) {
-        color = `rgb(${color[0]}, ${color[1]}, ${color[2]})`;
+      if (color) {
+        color = `rgba(${color[0]}, ${color[1]}, ${color[2]}, ${color[3] / 255})`;
       }
       badgeNode.style.backgroundColor = color || "";
     }
 
     const LEGACY_CLASS = "toolbarbutton-legacy-addon";
     node.classList.remove(LEGACY_CLASS);
 
     let baseSize = 16;
@@ -328,19 +332,24 @@ extensions.registerSchemaAPI("browserAct
       getPopup: function(details) {
         let tab = details.tabId !== null ? TabManager.getTab(details.tabId) : null;
         let popup = BrowserAction.for(extension).getProperty(tab, "popup");
         return Promise.resolve(popup);
       },
 
       setBadgeBackgroundColor: function(details) {
         let tab = details.tabId !== null ? TabManager.getTab(details.tabId) : null;
-        BrowserAction.for(extension).setProperty(tab, "badgeBackgroundColor", details.color);
+        let color = details.color;
+        if (!Array.isArray(color)) {
+          let col = colorUtils.colorToRGBA(color);
+          color = col && [col.r, col.g, col.b, Math.round(col.a * 255)];
+        }
+        BrowserAction.for(extension).setProperty(tab, "badgeBackgroundColor", color);
       },
 
       getBadgeBackgroundColor: function(details, callback) {
         let tab = details.tabId !== null ? TabManager.getTab(details.tabId) : null;
         let color = BrowserAction.for(extension).getProperty(tab, "badgeBackgroundColor");
-        return Promise.resolve(color);
+        return Promise.resolve(color || [0xd9, 0, 0, 255]);
       },
     },
   };
 });
--- a/browser/components/extensions/test/browser/browser.ini
+++ b/browser/components/extensions/test/browser/browser.ini
@@ -39,16 +39,17 @@ support-files =
 [browser_ext_lastError.js]
 [browser_ext_optionsPage_privileges.js]
 [browser_ext_pageAction_context.js]
 [browser_ext_pageAction_popup.js]
 [browser_ext_pageAction_popup_resize.js]
 [browser_ext_pageAction_simple.js]
 [browser_ext_popup_api_injection.js]
 [browser_ext_runtime_openOptionsPage.js]
+[browser_ext_runtime_openOptionsPage_uninstall.js]
 [browser_ext_runtime_setUninstallURL.js]
 [browser_ext_simple.js]
 [browser_ext_tab_runtimeConnect.js]
 [browser_ext_tabs_audio.js]
 [browser_ext_tabs_captureVisibleTab.js]
 [browser_ext_tabs_create.js]
 [browser_ext_tabs_create_invalid_url.js]
 [browser_ext_tabs_detectLanguage.js]
--- a/browser/components/extensions/test/browser/browser_ext_browserAction_context.js
+++ b/browser/components/extensions/test/browser/browser_ext_browserAction_context.js
@@ -166,39 +166,41 @@ add_task(function* testTabSwitchContext(
 
       "default.png": imageBuffer,
       "default-2.png": imageBuffer,
       "1.png": imageBuffer,
       "2.png": imageBuffer,
     },
 
     getTests(tabs, expectDefaults) {
+      const DEFAULT_BADGE_COLOR = [0xd9, 0, 0, 255];
+
       let details = [
         {"icon": browser.runtime.getURL("default.png"),
          "popup": browser.runtime.getURL("default.html"),
          "title": "Default Title",
          "badge": "",
-         "badgeBackgroundColor": null},
+         "badgeBackgroundColor": DEFAULT_BADGE_COLOR},
         {"icon": browser.runtime.getURL("1.png"),
          "popup": browser.runtime.getURL("default.html"),
          "title": "Default Title",
          "badge": "",
-         "badgeBackgroundColor": null},
+         "badgeBackgroundColor": DEFAULT_BADGE_COLOR},
         {"icon": browser.runtime.getURL("2.png"),
          "popup": browser.runtime.getURL("2.html"),
          "title": "Title 2",
          "badge": "2",
          "badgeBackgroundColor": [0xff, 0, 0, 0xff],
-          "disabled": true},
+         "disabled": true},
         {"icon": browser.runtime.getURL("1.png"),
          "popup": browser.runtime.getURL("default-2.html"),
          "title": "Default Title 2",
          "badge": "d2",
          "badgeBackgroundColor": [0, 0xff, 0, 0xff],
-          "disabled": true},
+         "disabled": true},
         {"icon": browser.runtime.getURL("1.png"),
          "popup": browser.runtime.getURL("default-2.html"),
          "title": "Default Title 2",
          "badge": "d2",
          "badgeBackgroundColor": [0, 0xff, 0, 0xff],
          "disabled": false},
         {"icon": browser.runtime.getURL("default-2.png"),
          "popup": browser.runtime.getURL("default-2.html"),
@@ -232,17 +234,17 @@ add_task(function* testTabSwitchContext(
         },
         expect => {
           browser.test.log("Change properties. Expect new properties.");
           let tabId = tabs[1];
           browser.browserAction.setIcon({tabId, path: "2.png"});
           browser.browserAction.setPopup({tabId, popup: "2.html"});
           browser.browserAction.setTitle({tabId, title: "Title 2"});
           browser.browserAction.setBadgeText({tabId, text: "2"});
-          browser.browserAction.setBadgeBackgroundColor({tabId, color: [0xff, 0, 0, 0xff]});
+          browser.browserAction.setBadgeBackgroundColor({tabId, color: "#ff0000"});
           browser.browserAction.disable(tabId);
 
           expectDefaults(details[0]).then(() => {
             expect(details[2]);
           });
         },
         expect => {
           browser.test.log("Navigate to a new page. Expect no changes.");
@@ -327,36 +329,38 @@ add_task(function* testDefaultTitle() {
       "permissions": ["tabs"],
     },
 
     files: {
       "icon.png": imageBuffer,
     },
 
     getTests(tabs, expectDefaults) {
+      const DEFAULT_BADGE_COLOR = [0xd9, 0, 0, 255];
+
       let details = [
         {"title": "Foo Extension",
          "popup": "",
          "badge": "",
-         "badgeBackgroundColor": null,
+         "badgeBackgroundColor": DEFAULT_BADGE_COLOR,
          "icon": browser.runtime.getURL("icon.png")},
         {"title": "Foo Title",
          "popup": "",
          "badge": "",
-         "badgeBackgroundColor": null,
+         "badgeBackgroundColor": DEFAULT_BADGE_COLOR,
          "icon": browser.runtime.getURL("icon.png")},
         {"title": "Bar Title",
          "popup": "",
          "badge": "",
-         "badgeBackgroundColor": null,
+         "badgeBackgroundColor": DEFAULT_BADGE_COLOR,
          "icon": browser.runtime.getURL("icon.png")},
         {"title": "",
          "popup": "",
          "badge": "",
-         "badgeBackgroundColor": null,
+         "badgeBackgroundColor": DEFAULT_BADGE_COLOR,
          "icon": browser.runtime.getURL("icon.png")},
       ];
 
       return [
         expect => {
           browser.test.log("Initial state. Expect extension title as default title.");
           expectDefaults(details[0]).then(() => {
             expect(details[0]);
--- a/browser/components/extensions/test/browser/browser_ext_runtime_openOptionsPage.js
+++ b/browser/components/extensions/test/browser/browser_ext_runtime_openOptionsPage.js
@@ -252,71 +252,8 @@ add_task(function* test_options_no_manif
         browser.test.notifyFail("options-no-manifest");
       });
     },
   });
 
   yield extension.awaitFinish("options-no-manifest");
   yield extension.unload();
 });
-
-add_task(function* test_inline_options_uninstall() {
-  let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, "http://example.com/");
-
-  let extension = yield loadExtension({
-    manifest: {
-      "options_ui": {
-        "page": "options.html",
-      },
-    },
-
-    background: function() {
-      let _optionsPromise;
-      let awaitOptions = () => {
-        browser.test.assertFalse(_optionsPromise, "Should not be awaiting options already");
-
-        return new Promise(resolve => {
-          _optionsPromise = {resolve};
-        });
-      };
-
-      browser.runtime.onMessage.addListener((msg, sender) => {
-        if (msg == "options.html") {
-          if (_optionsPromise) {
-            _optionsPromise.resolve(sender.tab);
-            _optionsPromise = null;
-          } else {
-            browser.test.fail("Saw unexpected options page load");
-          }
-        }
-      });
-
-      let firstTab;
-      browser.tabs.query({currentWindow: true, active: true}).then(tabs => {
-        firstTab = tabs[0].id;
-
-        browser.test.log("Open options page. Expect fresh load.");
-        return Promise.all([
-          browser.runtime.openOptionsPage(),
-          awaitOptions(),
-        ]);
-      }).then(([, tab]) => {
-        browser.test.assertEq("about:addons", tab.url, "Tab contains AddonManager");
-        browser.test.assertTrue(tab.active, "Tab is active");
-        browser.test.assertTrue(tab.id != firstTab, "Tab is a new tab");
-
-        browser.test.sendMessage("options-ui-open");
-      }).catch(error => {
-        browser.test.fail(`Error: ${error} :: ${error.stack}`);
-      });
-    },
-  });
-
-  yield extension.awaitMessage("options-ui-open");
-  yield extension.unload();
-
-  is(gBrowser.selectedBrowser.currentURI.spec, "about:addons",
-     "Add-on manager tab should still be open");
-
-  yield BrowserTestUtils.removeTab(gBrowser.selectedTab);
-
-  yield BrowserTestUtils.removeTab(tab);
-});
copy from browser/components/extensions/test/browser/browser_ext_runtime_openOptionsPage.js
copy to browser/components/extensions/test/browser/browser_ext_runtime_openOptionsPage_uninstall.js
--- a/browser/components/extensions/test/browser/browser_ext_runtime_openOptionsPage.js
+++ b/browser/components/extensions/test/browser/browser_ext_runtime_openOptionsPage_uninstall.js
@@ -32,237 +32,16 @@ function* loadExtension(options) {
     background: options.background,
   });
 
   yield extension.startup();
 
   return extension;
 }
 
-add_task(function* test_inline_options() {
-  let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, "http://example.com/");
-
-  let extension = yield loadExtension({
-    manifest: {
-      "options_ui": {
-        "page": "options.html",
-      },
-    },
-
-    background: function() {
-      let _optionsPromise;
-      let awaitOptions = () => {
-        browser.test.assertFalse(_optionsPromise, "Should not be awaiting options already");
-
-        return new Promise(resolve => {
-          _optionsPromise = {resolve};
-        });
-      };
-
-      browser.runtime.onMessage.addListener((msg, sender) => {
-        if (msg == "options.html") {
-          if (_optionsPromise) {
-            _optionsPromise.resolve(sender.tab);
-            _optionsPromise = null;
-          } else {
-            browser.test.fail("Saw unexpected options page load");
-          }
-        }
-      });
-
-      let firstTab, optionsTab;
-      browser.tabs.query({currentWindow: true, active: true}).then(tabs => {
-        firstTab = tabs[0].id;
-
-        browser.test.log("Open options page. Expect fresh load.");
-        return Promise.all([
-          browser.runtime.openOptionsPage(),
-          awaitOptions(),
-        ]);
-      }).then(([, tab]) => {
-        browser.test.assertEq("about:addons", tab.url, "Tab contains AddonManager");
-        browser.test.assertTrue(tab.active, "Tab is active");
-        browser.test.assertTrue(tab.id != firstTab, "Tab is a new tab");
-
-        optionsTab = tab.id;
-
-        browser.test.log("Switch tabs.");
-        return browser.tabs.update(firstTab, {active: true});
-      }).then(() => {
-        browser.test.log("Open options page again. Expect tab re-selected, no new load.");
-
-        return browser.runtime.openOptionsPage();
-      }).then(() => {
-        return browser.tabs.query({currentWindow: true, active: true});
-      }).then(([tab]) => {
-        browser.test.assertEq(optionsTab, tab.id, "Tab is the same as the previous options tab");
-        browser.test.assertEq("about:addons", tab.url, "Tab contains AddonManager");
-
-        browser.test.log("Ping options page.");
-        return browser.runtime.sendMessage("ping");
-      }).then((pong) => {
-        browser.test.assertEq("pong", pong, "Got pong.");
-
-        browser.test.log("Remove options tab.");
-        return browser.tabs.remove(optionsTab);
-      }).then(() => {
-        browser.test.log("Open options page again. Expect fresh load.");
-        return Promise.all([
-          browser.runtime.openOptionsPage(),
-          awaitOptions(),
-        ]);
-      }).then(([, tab]) => {
-        browser.test.assertEq("about:addons", tab.url, "Tab contains AddonManager");
-        browser.test.assertTrue(tab.active, "Tab is active");
-        browser.test.assertTrue(tab.id != optionsTab, "Tab is a new tab");
-
-        return browser.tabs.remove(tab.id);
-      }).then(() => {
-        browser.test.notifyPass("options-ui");
-      }).catch(error => {
-        browser.test.log(`Error: ${error} :: ${error.stack}`);
-        browser.test.notifyFail("options-ui");
-      });
-    },
-  });
-
-  yield extension.awaitFinish("options-ui");
-  yield extension.unload();
-
-  yield BrowserTestUtils.removeTab(tab);
-});
-
-add_task(function* test_tab_options() {
-  let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, "http://example.com/");
-
-  let extension = yield loadExtension({
-    manifest: {
-      "options_ui": {
-        "page": "options.html",
-        "open_in_tab": true,
-      },
-    },
-
-    background: function() {
-      let _optionsPromise;
-      let awaitOptions = () => {
-        browser.test.assertFalse(_optionsPromise, "Should not be awaiting options already");
-
-        return new Promise(resolve => {
-          _optionsPromise = {resolve};
-        });
-      };
-
-      browser.runtime.onMessage.addListener((msg, sender) => {
-        if (msg == "options.html") {
-          if (_optionsPromise) {
-            _optionsPromise.resolve(sender.tab);
-            _optionsPromise = null;
-          } else {
-            browser.test.fail("Saw unexpected options page load");
-          }
-        }
-      });
-
-      let optionsURL = browser.extension.getURL("options.html");
-
-      let firstTab, optionsTab;
-      browser.tabs.query({currentWindow: true, active: true}).then(tabs => {
-        firstTab = tabs[0].id;
-
-        browser.test.log("Open options page. Expect fresh load.");
-        return Promise.all([
-          browser.runtime.openOptionsPage(),
-          awaitOptions(),
-        ]);
-      }).then(([, tab]) => {
-        browser.test.assertEq(optionsURL, tab.url, "Tab contains options.html");
-        browser.test.assertTrue(tab.active, "Tab is active");
-        browser.test.assertTrue(tab.id != firstTab, "Tab is a new tab");
-
-        optionsTab = tab.id;
-
-        browser.test.log("Switch tabs.");
-        return browser.tabs.update(firstTab, {active: true});
-      }).then(() => {
-        browser.test.log("Open options page again. Expect tab re-selected, no new load.");
-
-        return browser.runtime.openOptionsPage();
-      }).then(() => {
-        return browser.tabs.query({currentWindow: true, active: true});
-      }).then(([tab]) => {
-        browser.test.assertEq(optionsTab, tab.id, "Tab is the same as the previous options tab");
-        browser.test.assertEq(optionsURL, tab.url, "Tab contains options.html");
-
-        // Unfortunately, we can't currently do this, since onMessage doesn't
-        // currently support responses when there are multiple listeners.
-        //
-        // browser.test.log("Ping options page.");
-        // return new Promise(resolve => browser.runtime.sendMessage("ping", resolve));
-
-        browser.test.log("Remove options tab.");
-        return browser.tabs.remove(optionsTab);
-      }).then(() => {
-        browser.test.log("Open options page again. Expect fresh load.");
-        return Promise.all([
-          browser.runtime.openOptionsPage(),
-          awaitOptions(),
-        ]);
-      }).then(([, tab]) => {
-        browser.test.assertEq(optionsURL, tab.url, "Tab contains options.html");
-        browser.test.assertTrue(tab.active, "Tab is active");
-        browser.test.assertTrue(tab.id != optionsTab, "Tab is a new tab");
-
-        return browser.tabs.remove(tab.id);
-      }).then(() => {
-        browser.test.notifyPass("options-ui-tab");
-      }).catch(error => {
-        browser.test.log(`Error: ${error} :: ${error.stack}`);
-        browser.test.notifyFail("options-ui-tab");
-      });
-    },
-  });
-
-  yield extension.awaitFinish("options-ui-tab");
-  yield extension.unload();
-
-  yield BrowserTestUtils.removeTab(tab);
-});
-
-add_task(function* test_options_no_manifest() {
-  let extension = yield loadExtension({
-    manifest: {},
-
-    background: function() {
-      browser.test.log("Try to open options page when not specified in the manifest.");
-
-      browser.runtime.openOptionsPage().then(
-        () => {
-          browser.test.fail("Opening options page without one specified in the manifest generated an error");
-          browser.test.notifyFail("options-no-manifest");
-        },
-        error => {
-          let expected = "No `options_ui` declared";
-          browser.test.assertTrue(
-            error.message.includes(expected),
-            `Got expected error (got: '${error.message}', expected: '${expected}'`);
-        }
-      ).then(() => {
-        browser.test.notifyPass("options-no-manifest");
-      }).catch(error => {
-        browser.test.log(`Error: ${error} :: ${error.stack}`);
-        browser.test.notifyFail("options-no-manifest");
-      });
-    },
-  });
-
-  yield extension.awaitFinish("options-no-manifest");
-  yield extension.unload();
-});
-
 add_task(function* test_inline_options_uninstall() {
   let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, "http://example.com/");
 
   let extension = yield loadExtension({
     manifest: {
       "options_ui": {
         "page": "options.html",
       },
--- a/browser/config/tooltool-manifests/macosx64/releng.manifest
+++ b/browser/config/tooltool-manifests/macosx64/releng.manifest
@@ -3,18 +3,19 @@
 "version": "clang 3.8.0",
 "size": 133060926,
 "digest": "aff5ad3ac2d41db19d1ba0df5f97b189a7d7e1b6af8c56e22c2b0cced84d75fa98394ded6a4ba5713652e6684a0a46f47aeccf87991f9e849bf8d7d82e564f6f",
 "algorithm": "sha512",
 "filename": "clang.tar.bz2",
 "unpack": true
 },
 {
-"size": 93295855,
-"digest": "2b8fd0c1ba337a7035090c420305a7892e663ce6781569b100b36fa21cc26146e67f44a34c7715f0004f48bbe46c232bbbf2928c9d0595243d2584530770b504",
+"version": "rust 1.10 repack",
+"size": 150726627,
+"digest": "a30476113212895a837b2c4c18eb34d767c7192c3e327fba84c0138eaf7e671e84d5294e75370af3fe7e527a61e0938cd6cce20fba0aec94537070eb0094e27e",
 "algorithm": "sha512",
 "filename": "rustc.tar.bz2",
 "unpack": true
 },
 {
 "version": "cargo 0.13.0-nightly (664125b 2016-07-19)",
 "size": 2571167,
 "digest": "b2616459fbf15c75b54628a6bfe8cf89c0841ea08431f5096e72be4fac4c685785dfc7a2f18a03a5f7bd377e78d3c108e5029b12616842cbbd0497ff7363fdaf",
--- a/config/system-headers
+++ b/config/system-headers
@@ -711,16 +711,18 @@ mapiguid.h
 mapi.h
 mapitags.h
 mapiutil.h
 mapix.h
 Math64.h
 math.h
 mbstring.h
 #ifdef ANDROID
+android/native_window.h
+android/native_window_jni.h
 media/AudioEffect.h
 media/AudioSystem.h
 media/ICrypto.h
 media/IOMX.h
 media/MediaProfiles.h
 media/MediaRecorderBase.h
 media/openmax/OMX_Audio.h
 media/stagefright/AACWriter.h
--- a/dom/base/CustomElementsRegistry.cpp
+++ b/dom/base/CustomElementsRegistry.cpp
@@ -21,17 +21,17 @@ NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(
   NS_INTERFACE_MAP_ENTRY(nsISupports)
 NS_INTERFACE_MAP_END
 
 /* static */ bool
 CustomElementsRegistry::IsCustomElementsEnabled(JSContext* aCx, JSObject* aObject)
 {
   JS::Rooted<JSObject*> obj(aCx, aObject);
   if (Preferences::GetBool("dom.webcomponents.customelements.enabled") ||
-      nsDocument::IsWebComponentsEnabled(aCx, obj)) {
+      Preferences::GetBool("dom.webcomponents.enabled")) {
     return true;
   }
 
   return false;
 }
 
 /* static */ already_AddRefed<CustomElementsRegistry>
 CustomElementsRegistry::Create(nsPIDOMWindowInner* aWindow)
--- a/dom/base/nsGlobalWindow.cpp
+++ b/dom/base/nsGlobalWindow.cpp
@@ -299,27 +299,20 @@ int32_t gTimeoutCnt                     
 
 // The default shortest interval/timeout we permit
 #define DEFAULT_MIN_TIMEOUT_VALUE 4 // 4ms
 #define DEFAULT_MIN_BACKGROUND_TIMEOUT_VALUE 1000 // 1000ms
 static int32_t gMinTimeoutValue;
 static int32_t gMinBackgroundTimeoutValue;
 inline int32_t
 nsGlobalWindow::DOMMinTimeoutValue() const {
-  bool isBackground = !mOuterWindow || mOuterWindow->IsBackground();
-  if (isBackground) {
-    // Don't use the background timeout value when there are audio contexts with
-    // active nodes, so that background audio can keep running smoothly.
-    for (const AudioContext* ctx : mAudioContexts) {
-      if (ctx->ActiveNodeCount() > 0) {
-        isBackground = false;
-        break;
-      }
-    }
-  }
+  // Don't use the background timeout value when there are audio contexts
+  // present, so that baackground audio can keep running smoothly. (bug 1181073)
+  bool isBackground = mAudioContexts.IsEmpty() &&
+    (!mOuterWindow || mOuterWindow->IsBackground());
   return
     std::max(isBackground ? gMinBackgroundTimeoutValue : gMinTimeoutValue, 0);
 }
 
 // The number of nested timeouts before we start clamping. HTML5 says 1, WebKit
 // uses 5.
 #define DOM_CLAMP_TIMEOUT_NESTING_LEVEL 5
 
--- a/dom/base/nsNodeUtils.cpp
+++ b/dom/base/nsNodeUtils.cpp
@@ -565,17 +565,17 @@ nsNodeUtils::CloneAndAdopt(nsINode *aNod
     if (aReparentScope) {
       AutoJSContext cx;
       JS::Rooted<JSObject*> wrapper(cx);
       if ((wrapper = aNode->GetWrapper())) {
         MOZ_ASSERT(IsDOMObject(wrapper));
         JSAutoCompartment ac(cx, wrapper);
         rv = ReparentWrapper(cx, wrapper);
         if (NS_FAILED(rv)) {
-          aNode->mNodeInfo.swap(nodeInfo);
+          aNode->mNodeInfo.swap(newNodeInfo);
 
           return rv;
         }
       }
     }
   }
 
   if (aDeep && (!aClone || !aNode->IsNodeOfType(nsINode::eATTRIBUTE))) {
--- a/dom/base/nsObjectLoadingContent.cpp
+++ b/dom/base/nsObjectLoadingContent.cpp
@@ -3366,17 +3366,17 @@ nsObjectLoadingContent::GetRunID(uint32_
 
 static bool sPrefsInitialized;
 static uint32_t sSessionTimeoutMinutes;
 static uint32_t sPersistentTimeoutDays;
 
 bool
 nsObjectLoadingContent::ShouldBlockContent()
 {
-  if (mContentBlockingDisabled)
+  if (mContentBlockingDisabled || !mURI)
     return false;
 
   if (!IsFlashMIME(mContentType) || !Preferences::GetBool(kPrefBlockURIs)) {
     mContentBlockingDisabled = true;
     return false;
   }
 
   return true;
--- a/dom/bindings/Exceptions.cpp
+++ b/dom/bindings/Exceptions.cpp
@@ -634,16 +634,17 @@ NS_IMETHODIMP JSStackFrame::GetFormatted
     mFormattedStackInitialized = true;
   }
 
   return NS_OK;
 }
 
 NS_IMETHODIMP JSStackFrame::GetNativeSavedFrame(JS::MutableHandle<JS::Value> aSavedFrame)
 {
+  JS::ExposeObjectToActiveJS(mStack);
   aSavedFrame.setObjectOrNull(mStack);
   return NS_OK;
 }
 
 NS_IMETHODIMP JSStackFrame::ToString(JSContext* aCx, nsACString& _retval)
 {
   _retval.Truncate();
 
--- a/dom/camera/CameraPreviewMediaStream.cpp
+++ b/dom/camera/CameraPreviewMediaStream.cpp
@@ -1,16 +1,17 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "CameraPreviewMediaStream.h"
 #include "CameraCommon.h"
 #include "MediaStreamListener.h"
+#include "VideoFrameContainer.h"
 
 /**
  * Maximum number of outstanding invalidates before we start to drop frames;
  * if we hit this threshold, it is an indicator that the main thread is
  * either very busy or the device is busy elsewhere (e.g. encoding or
  * persisting video data).
  */
 #define MAX_INVALIDATE_PENDING 4
@@ -54,28 +55,28 @@ CameraPreviewMediaStream::SetAudioOutput
 }
 
 void
 CameraPreviewMediaStream::RemoveAudioOutput(void* aKey)
 {
 }
 
 void
-CameraPreviewMediaStream::AddVideoOutput(VideoFrameContainer* aContainer)
+CameraPreviewMediaStream::AddVideoOutput(MediaStreamVideoSink* aSink, TrackID aID)
 {
   MutexAutoLock lock(mMutex);
-  RefPtr<VideoFrameContainer> container = aContainer;
-  AddVideoOutputImpl(container.forget());
+  RefPtr<MediaStreamVideoSink> sink = aSink;
+  AddVideoOutputImpl(sink.forget(), aID);
 }
 
 void
-CameraPreviewMediaStream::RemoveVideoOutput(VideoFrameContainer* aContainer)
+CameraPreviewMediaStream::RemoveVideoOutput(MediaStreamVideoSink* aSink, TrackID aID)
 {
   MutexAutoLock lock(mMutex);
-  RemoveVideoOutputImpl(aContainer);
+  RemoveVideoOutputImpl(aSink, aID);
 }
 
 void
 CameraPreviewMediaStream::AddListener(MediaStreamListener* aListener)
 {
   MutexAutoLock lock(mMutex);
 
   MediaStreamListener* listener = *mListeners.AppendElement() = aListener;
@@ -120,18 +121,21 @@ CameraPreviewMediaStream::Destroy()
   DestroyImpl();
 }
 
 void
 CameraPreviewMediaStream::Invalidate()
 {
   MutexAutoLock lock(mMutex);
   --mInvalidatePending;
-  for (nsTArray<RefPtr<VideoFrameContainer> >::size_type i = 0; i < mVideoOutputs.Length(); ++i) {
-    VideoFrameContainer* output = mVideoOutputs[i];
+  for (const TrackBound<MediaStreamVideoSink>& sink : mVideoOutputs) {
+    VideoFrameContainer* output = sink.mListener->AsVideoFrameContainer();
+    if (!output) {
+      continue;
+    }
     output->Invalidate();
   }
 }
 
 void
 CameraPreviewMediaStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                        uint32_t aFlags)
 {
@@ -159,32 +163,38 @@ CameraPreviewMediaStream::SetCurrentFram
       }
 
       DOM_CAMERA_LOGI("Update preview frame, %d invalidation(s) pending",
         mInvalidatePending);
     }
     mDiscardedFrames = 0;
 
     TimeStamp now = TimeStamp::Now();
-    for (nsTArray<RefPtr<VideoFrameContainer> >::size_type i = 0; i < mVideoOutputs.Length(); ++i) {
-      VideoFrameContainer* output = mVideoOutputs[i];
+    for (const TrackBound<MediaStreamVideoSink>& sink : mVideoOutputs) {
+      VideoFrameContainer* output = sink.mListener->AsVideoFrameContainer();
+      if (!output) {
+        continue;
+      }
       output->SetCurrentFrame(aIntrinsicSize, aImage, now);
     }
 
     ++mInvalidatePending;
   }
 
   NS_DispatchToMainThread(NewRunnableMethod(this, &CameraPreviewMediaStream::Invalidate));
 }
 
 void
 CameraPreviewMediaStream::ClearCurrentFrame()
 {
   MutexAutoLock lock(mMutex);
 
-  for (nsTArray<RefPtr<VideoFrameContainer> >::size_type i = 0; i < mVideoOutputs.Length(); ++i) {
-    VideoFrameContainer* output = mVideoOutputs[i];
+  for (const TrackBound<MediaStreamVideoSink>& sink : mVideoOutputs) {
+    VideoFrameContainer* output = sink.mListener->AsVideoFrameContainer();
+    if (!output) {
+      continue;
+    }
     output->ClearCurrentFrame();
     NS_DispatchToMainThread(NewRunnableMethod(output, &VideoFrameContainer::Invalidate));
   }
 }
 
 } // namespace mozilla
--- a/dom/camera/CameraPreviewMediaStream.h
+++ b/dom/camera/CameraPreviewMediaStream.h
@@ -1,21 +1,22 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef DOM_CAMERA_CAMERAPREVIEWMEDIASTREAM_H
 #define DOM_CAMERA_CAMERAPREVIEWMEDIASTREAM_H
 
-#include "VideoFrameContainer.h"
 #include "MediaStreamGraph.h"
 #include "mozilla/Mutex.h"
 
 namespace mozilla {
 
+class MediaStreamVideoSink;
+
 class FakeMediaStreamGraph : public MediaStreamGraph
 {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(FakeMediaStreamGraph)
 public:
   FakeMediaStreamGraph()
     : MediaStreamGraph(16000)
   {
   }
@@ -37,26 +38,26 @@ protected:
  */
 class CameraPreviewMediaStream : public ProcessedMediaStream
 {
   typedef mozilla::layers::Image Image;
 
 public:
   CameraPreviewMediaStream();
 
-  virtual void AddAudioOutput(void* aKey) override;
-  virtual void SetAudioOutputVolume(void* aKey, float aVolume) override;
-  virtual void RemoveAudioOutput(void* aKey) override;
-  virtual void AddVideoOutput(VideoFrameContainer* aContainer) override;
-  virtual void RemoveVideoOutput(VideoFrameContainer* aContainer) override;
-  virtual void Suspend() override {}
-  virtual void Resume() override {}
-  virtual void AddListener(MediaStreamListener* aListener) override;
-  virtual void RemoveListener(MediaStreamListener* aListener) override;
-  virtual void Destroy() override;
+  void AddAudioOutput(void* aKey) override;
+  void SetAudioOutputVolume(void* aKey, float aVolume) override;
+  void RemoveAudioOutput(void* aKey) override;
+  void AddVideoOutput(MediaStreamVideoSink* aSink, TrackID aID) override;
+  void RemoveVideoOutput(MediaStreamVideoSink* aSink, TrackID aID) override;
+  void Suspend() override {}
+  void Resume() override {}
+  void AddListener(MediaStreamListener* aListener) override;
+  void RemoveListener(MediaStreamListener* aListener) override;
+  void Destroy() override;
   void OnPreviewStateChange(bool aActive);
 
   void Invalidate();
 
   void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
 
   // Call these on any thread.
   void SetCurrentFrame(const gfx::IntSize& aIntrinsicSize, Image* aImage);
--- a/dom/canvas/WebGLTexture.cpp
+++ b/dom/canvas/WebGLTexture.cpp
@@ -798,32 +798,47 @@ WebGLTexture::GetTexParameter(TexTarget 
 {
     mContext->MakeContextCurrent();
 
     GLint i = 0;
     GLfloat f = 0.0f;
 
     switch (pname) {
     case LOCAL_GL_TEXTURE_MIN_FILTER:
+        return JS::NumberValue(uint32_t(mMinFilter.get()));
+
     case LOCAL_GL_TEXTURE_MAG_FILTER:
+        return JS::NumberValue(uint32_t(mMagFilter.get()));
+
     case LOCAL_GL_TEXTURE_WRAP_S:
+        return JS::NumberValue(uint32_t(mWrapS.get()));
+
     case LOCAL_GL_TEXTURE_WRAP_T:
+        return JS::NumberValue(uint32_t(mWrapT.get()));
+
     case LOCAL_GL_TEXTURE_BASE_LEVEL:
-    case LOCAL_GL_TEXTURE_COMPARE_FUNC:
+        return JS::NumberValue(mBaseMipmapLevel);
+
     case LOCAL_GL_TEXTURE_COMPARE_MODE:
+        return JS::NumberValue(uint32_t(mTexCompareMode));
+
+    case LOCAL_GL_TEXTURE_MAX_LEVEL:
+        return JS::NumberValue(mMaxMipmapLevel);
+
+    case LOCAL_GL_TEXTURE_IMMUTABLE_FORMAT:
+        return JS::BooleanValue(mImmutable);
+
     case LOCAL_GL_TEXTURE_IMMUTABLE_LEVELS:
-    case LOCAL_GL_TEXTURE_MAX_LEVEL:
+        return JS::NumberValue(uint32_t(mImmutableLevelCount));
+
+    case LOCAL_GL_TEXTURE_COMPARE_FUNC:
     case LOCAL_GL_TEXTURE_WRAP_R:
         mContext->gl->fGetTexParameteriv(texTarget.get(), pname, &i);
         return JS::NumberValue(uint32_t(i));
 
-    case LOCAL_GL_TEXTURE_IMMUTABLE_FORMAT:
-        mContext->gl->fGetTexParameteriv(texTarget.get(), pname, &i);
-        return JS::BooleanValue(bool(i));
-
     case LOCAL_GL_TEXTURE_MAX_ANISOTROPY_EXT:
     case LOCAL_GL_TEXTURE_MAX_LOD:
     case LOCAL_GL_TEXTURE_MIN_LOD:
         mContext->gl->fGetTexParameterfv(texTarget.get(), pname, &f);
         return JS::NumberValue(float(f));
 
     default:
         MOZ_CRASH("GFX: Unhandled pname.");
@@ -994,21 +1009,23 @@ WebGLTexture::TexParameter(TexTarget tex
 
     ////////////////
     // Store any needed values
 
     switch (pname) {
     case LOCAL_GL_TEXTURE_BASE_LEVEL:
         mBaseMipmapLevel = intParam;
         ClampLevelBaseAndMax();
+        intParam = mBaseMipmapLevel;
         break;
 
     case LOCAL_GL_TEXTURE_MAX_LEVEL:
         mMaxMipmapLevel = intParam;
         ClampLevelBaseAndMax();
+        intParam = mMaxMipmapLevel;
         break;
 
     case LOCAL_GL_TEXTURE_MIN_FILTER:
         mMinFilter = intParam;
         break;
 
     case LOCAL_GL_TEXTURE_MAG_FILTER:
         mMagFilter = intParam;
--- a/dom/canvas/WebGLTextureUpload.cpp
+++ b/dom/canvas/WebGLTextureUpload.cpp
@@ -1224,16 +1224,17 @@ WebGLTexture::TexStorage(const char* fun
     const bool isDataInitialized = false;
     const WebGLTexture::ImageInfo newInfo(dstUsage, width, height, depth,
                                           isDataInitialized);
     SetImageInfosAtLevel(0, newInfo);
 
     PopulateMipChain(0, levels-1);
 
     mImmutable = true;
+    mImmutableLevelCount = levels;
 }
 
 ////////////////////////////////////////
 // Tex(Sub)Image
 
 void
 WebGLTexture::TexImage(const char* funcName, TexImageTarget target, GLint level,
                        GLenum internalFormat, const webgl::PackingInfo& pi,
new file mode 100644
--- /dev/null
+++ b/dom/canvas/crashtests/1290628-1.html
@@ -0,0 +1,13 @@
+<!DOCTYPE html>
+<html>
+<body id='test_body'>
+<canvas id='i0'></canvas>
+<script>
+var c=document.getElementById('i0').getContext('2d');
+c.shadowOffsetX=73;
+c.scale(5,55);
+c.setLineDash([48,81,88,41,32767,8]);
+c.shadowColor='hsl(1.0,1%,1%)';
+c.miterLimit=Number.MIN_VALUE;
+c.strokeRect(0,58,8589934591,0);
+</script>
\ No newline at end of file
--- a/dom/canvas/crashtests/crashtests.list
+++ b/dom/canvas/crashtests/crashtests.list
@@ -29,9 +29,10 @@ skip-if(azureCairo) load 1229983-1.html
 load 1229932-1.html
 load 1244850-1.html
 load 1246775-1.html
 load 1284356-1.html
 load 1284578-1.html
 skip-if(d2d) load 1287515-1.html
 load 1287652-1.html
 load 1288872-1.html
+load 1290628-1.html
 
--- a/dom/canvas/test/webgl-conf/checkout/js/tests/gl-object-get-calls.js
+++ b/dom/canvas/test/webgl-conf/checkout/js/tests/gl-object-get-calls.js
@@ -102,57 +102,66 @@ for (var run = 0; run < testCases.length
 
   if (testCases[run].contextStencil) {
     gl = wtu.create3DContext(null, {stencil: true}, contextVersion);
   } else {
     gl = wtu.create3DContext(null, {stencil: false}, contextVersion);
   }
 
   var texture = gl.createTexture();
+  var anotherTexture = gl.createTexture();
   gl.bindTexture(gl.TEXTURE_2D, texture);
   gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 2, 2, 0, gl.RGBA, gl.UNSIGNED_BYTE,
                 new Uint8Array([
                     0, 0, 0, 255,
                     255, 255, 255, 255,
                     255, 255, 255, 255,
                     0, 0, 0, 255]));
   gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
   gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
   gl.bindTexture(gl.TEXTURE_2D, null);
   var framebuffer = gl.createFramebuffer();
   gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer);
   gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0);
   var colorAttachmentsNum = 1;
   if (contextVersion > 1) {
+    gl.bindTexture(gl.TEXTURE_2D, anotherTexture);
+    gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 2, 2, 0, gl.RGBA, gl.UNSIGNED_BYTE,
+                  new Uint8Array([
+                      0, 0, 0, 255,
+                      255, 255, 255, 255,
+                      255, 255, 255, 255,
+                      0, 0, 0, 255]));
+    gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
+    gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
+    gl.bindTexture(gl.TEXTURE_2D, null);
     colorAttachmentsNum = gl.getParameter(gl.MAX_COLOR_ATTACHMENTS);
-    gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + colorAttachmentsNum - 1, gl.TEXTURE_2D, texture, 0);
+    gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + colorAttachmentsNum - 1, gl.TEXTURE_2D, anotherTexture, 0);
   }
   var renderbuffer = gl.createRenderbuffer();
   wtu.glErrorShouldBe(gl, gl.NO_ERROR);
   gl.bindRenderbuffer(gl.RENDERBUFFER, renderbuffer);
   wtu.glErrorShouldBe(gl, gl.NO_ERROR);
   if (contextVersion == 1)
     gl.renderbufferStorage(gl.RENDERBUFFER, gl.DEPTH_COMPONENT16, 2, 2);
   else
     gl.renderbufferStorage(gl.RENDERBUFFER, gl.DEPTH24_STENCIL8, 2, 2);
   wtu.glErrorShouldBe(gl, gl.NO_ERROR);
   gl.framebufferRenderbuffer(gl.FRAMEBUFFER, gl.DEPTH_ATTACHMENT, gl.RENDERBUFFER, renderbuffer);
   if (contextVersion > 1)
     gl.framebufferRenderbuffer(gl.FRAMEBUFFER, gl.STENCIL_ATTACHMENT, gl.RENDERBUFFER, renderbuffer);
-  // FIXME: on some machines (in particular the WebKit commit bots) the
-  // framebuffer status is FRAMEBUFFER_UNSUPPORTED; more investigation
-  // is needed why this is the case, because the FBO allocated
-  // internally by the WebKit implementation has almost identical
-  // parameters to this one. See https://bugs.webkit.org/show_bug.cgi?id=31843.
   shouldBe('gl.checkFramebufferStatus(gl.FRAMEBUFFER)', 'gl.FRAMEBUFFER_COMPLETE');
   // The for loop tests two color attachments for WebGL 2: the first one (gl.COLOR_ATTACHMENT0)
   // and the last one (gl.COLOR_ATTACHMENT0 + gl.MAX_COLOR_ATTACHMENTS - 1).
   for (var ii = 0; ii < colorAttachmentsNum; ii += (colorAttachmentsNum > 1 ? colorAttachmentsNum - 1 : 1)) {
     shouldBe('gl.getFramebufferAttachmentParameter(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + ' + ii + ', gl.FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE)', 'gl.TEXTURE');
-    shouldBe('gl.getFramebufferAttachmentParameter(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + ' + ii + ', gl.FRAMEBUFFER_ATTACHMENT_OBJECT_NAME)', 'texture');
+    if (ii == 0)
+      shouldBe('gl.getFramebufferAttachmentParameter(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + ' + ii + ', gl.FRAMEBUFFER_ATTACHMENT_OBJECT_NAME)', 'texture');
+    else
+      shouldBe('gl.getFramebufferAttachmentParameter(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + ' + ii + ', gl.FRAMEBUFFER_ATTACHMENT_OBJECT_NAME)', 'anotherTexture');
     shouldBe('gl.getFramebufferAttachmentParameter(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + ' + ii + ', gl.FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL)', '0');
     shouldBe('gl.getFramebufferAttachmentParameter(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + ' + ii + ', gl.FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE)', '0');
     if (contextVersion > 1) {
       shouldBeNonZero('gl.getFramebufferAttachmentParameter(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + ' + ii + ', gl.FRAMEBUFFER_ATTACHMENT_RED_SIZE)');
       shouldBeNonZero('gl.getFramebufferAttachmentParameter(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + ' + ii + ', gl.FRAMEBUFFER_ATTACHMENT_GREEN_SIZE)');
       shouldBeNonZero('gl.getFramebufferAttachmentParameter(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + ' + ii + ', gl.FRAMEBUFFER_ATTACHMENT_BLUE_SIZE)');
       shouldBeNonZero('gl.getFramebufferAttachmentParameter(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + ' + ii + ', gl.FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE)');
       shouldBeNonZero('gl.getFramebufferAttachmentParameter(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + ' + ii + ', gl.FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE)');
@@ -852,17 +861,29 @@ if (contextVersion > 1) {
         function(pname) {
 	    return gl.getSamplerParameter(sampler, pname);
     });
 
     debug("");
     debug("Test getSyncParameter");
     var sync = gl.fenceSync(gl.SYNC_GPU_COMMANDS_COMPLETE, 0);
     shouldBe('gl.getSyncParameter(sync, gl.OBJECT_TYPE)', 'gl.SYNC_FENCE');
-    shouldBe('gl.getSyncParameter(sync, gl.SYNC_STATUS)', 'gl.UNSIGNALED');
+    var sync_status = gl.getSyncParameter(sync, gl.SYNC_STATUS);
+    switch (sync_status) {
+      case gl.UNSIGNALED:
+        testPassed('gl.getSyncParameter(sync, gl.SYNC_CONDITION) is gl.UNSIGNALED');
+        break;
+      case gl.SIGNALED:
+        testPassed('gl.getSyncParameter(sync, gl.SYNC_CONDITION) is gl.SIGNALED');
+        break;
+      default:
+        testFailed('gl.getSyncParameter(sync, gl.SYNC_CONDITION) was ' + sync_status +
+                   ', expected gl.UNSIGNALED or gl.SIGNALED');
+        break;
+    }
     shouldBe('gl.getSyncParameter(sync, gl.SYNC_CONDITION)', 'gl.SYNC_GPU_COMMANDS_COMPLETE');
     shouldBe('gl.getSyncParameter(sync, gl.SYNC_FLAGS)', '0');
     var validArrayForSyncParameter = new Array(
         gl.OBJECT_TYPE,
         gl.SYNC_STATUS,
         gl.SYNC_CONDITION,
         gl.SYNC_FLAGS
     );
--- a/dom/canvas/test/webgl-conf/generated-mochitest.ini
+++ b/dom/canvas/test/webgl-conf/generated-mochitest.ini
@@ -4587,17 +4587,17 @@ skip-if = (os == 'android' || os == 'lin
 [generated/test_2_conformance2__state__gl-enum-tests.html]
 skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance2__state__gl-get-calls.html]
 fail-if = (os == 'mac')
 skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance2__state__gl-getstring.html]
 skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance2__state__gl-object-get-calls.html]
-skip-if = (os == 'mac') || (os == 'win') || (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
+skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance2__transform_feedback__transform_feedback.html]
 skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance2__vertex_arrays__vertex-array-object.html]
 fail-if = (os == 'mac') || (os == 'win')
 skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance__attribs__gl-bindAttribLocation-aliasing.html]
 skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance__attribs__gl-bindAttribLocation-matrix.html]
@@ -5894,16 +5894,17 @@ skip-if = (os == 'android' || os == 'lin
 [generated/test_2_conformance__typedarrays__array-large-array-tests.html]
 skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance__typedarrays__array-unit-tests.html]
 skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance__typedarrays__data-view-crash.html]
 skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance__typedarrays__data-view-test.html]
 fail-if = 1
+skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance__typedarrays__typed-arrays-in-workers.html]
 skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance__uniforms__gl-uniform-arrays.html]
 skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance__uniforms__gl-uniform-bool.html]
 skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
 [generated/test_2_conformance__uniforms__gl-uniformmatrix4fv.html]
 skip-if = (os == 'android' || os == 'linux' || (os == 'win' && os_version == '5.1') || (os == 'win' && os_version == '6.2'))
--- a/dom/canvas/test/webgl-conf/mochitest-errata.ini
+++ b/dom/canvas/test/webgl-conf/mochitest-errata.ini
@@ -165,19 +165,16 @@ fail-if = (os == 'android') || (os == 'l
 
 [generated/test_conformance__textures__misc__texture-size-limit.html]
 fail-if = (os == 'linux') || (os == 'android')
 skip-if = (os == 'linux' && asan)
 [generated/test_2_conformance2__reading__read-pixels-from-fbo-test.html]
 skip-if = (os == 'mac') || (os == 'win')
 [generated/test_2_conformance2__misc__uninitialized-test-2.html]
 skip-if = (os == 'mac') || (os == 'win')
-[generated/test_2_conformance2__state__gl-object-get-calls.html]
-# Hit MOZ_GL_DEBUG_ABORT_ON_ERROR on debug build
-skip-if = (os == 'mac') || (os == 'win')
 [generated/test_2_conformance__misc__bad-arguments-test.html]
 # skip because some result logged after SimpleTest.finish()
 skip-if = (os == 'mac') || (os == 'win')
 [generated/test_conformance__misc__bad-arguments-test.html]
 # skip because some result logged after SimpleTest.finish()
 skip-if = (os == 'mac') || (os == 'win') || (os == 'linux') || (os == 'android')
 [generated/test_2_conformance__glsl__constructors__glsl-construct-vec-mat-index.html]
 # skip this test because finish() was called multiple times
--- a/dom/html/HTMLInputElement.cpp
+++ b/dom/html/HTMLInputElement.cpp
@@ -203,16 +203,17 @@ static const nsAttrValue::EnumTable kInp
 };
 
 // Default inputmode value is "auto".
 static const nsAttrValue::EnumTable* kInputDefaultInputmode = &kInputInputmodeTable[0];
 
 const Decimal HTMLInputElement::kStepScaleFactorDate = Decimal(86400000);
 const Decimal HTMLInputElement::kStepScaleFactorNumberRange = Decimal(1);
 const Decimal HTMLInputElement::kStepScaleFactorTime = Decimal(1000);
+const Decimal HTMLInputElement::kStepScaleFactorMonth = Decimal(1);
 const Decimal HTMLInputElement::kDefaultStepBase = Decimal(0);
 const Decimal HTMLInputElement::kDefaultStep = Decimal(1);
 const Decimal HTMLInputElement::kDefaultStepTime = Decimal(60);
 const Decimal HTMLInputElement::kStepAny = Decimal(0);
 
 const double HTMLInputElement::kMaximumYear = 275760;
 const double HTMLInputElement::kMinimumYear = 1;
 
@@ -2279,16 +2280,17 @@ HTMLInputElement::GetMaximum() const
 }
 
 Decimal
 HTMLInputElement::GetStepBase() const
 {
   MOZ_ASSERT(mType == NS_FORM_INPUT_NUMBER ||
              mType == NS_FORM_INPUT_DATE ||
              mType == NS_FORM_INPUT_TIME ||
+             mType == NS_FORM_INPUT_MONTH ||
              mType == NS_FORM_INPUT_RANGE,
              "Check that kDefaultStepBase is correct for this new type");
 
   Decimal stepBase;
 
   // Do NOT use GetMinimum here - the spec says to use "the min content
   // attribute", not "the minimum".
   nsAutoString minStr;
@@ -6888,17 +6890,17 @@ HTMLInputElement::GetStep() const
   }
 
   Decimal step = StringToDecimal(stepStr);
   if (!step.isFinite() || step <= Decimal(0)) {
     step = GetDefaultStep();
   }
 
   // For input type=date, we round the step value to have a rounded day.
-  if (mType == NS_FORM_INPUT_DATE) {
+  if (mType == NS_FORM_INPUT_DATE || mType == NS_FORM_INPUT_MONTH) {
     step = std::max(step.round(), Decimal(1));
   }
 
   return step * GetStepScaleFactor();
 }
 
 // nsIConstraintValidation
 
@@ -7965,29 +7967,32 @@ HTMLInputElement::GetStepScaleFactor() c
   switch (mType) {
     case NS_FORM_INPUT_DATE:
       return kStepScaleFactorDate;
     case NS_FORM_INPUT_NUMBER:
     case NS_FORM_INPUT_RANGE:
       return kStepScaleFactorNumberRange;
     case NS_FORM_INPUT_TIME:
       return kStepScaleFactorTime;
+    case NS_FORM_INPUT_MONTH:
+      return kStepScaleFactorMonth;
     default:
       MOZ_ASSERT(false, "Unrecognized input type");
       return Decimal::nan();
   }
 }
 
 Decimal
 HTMLInputElement::GetDefaultStep() const
 {
   MOZ_ASSERT(DoesStepApply());
 
   switch (mType) {
     case NS_FORM_INPUT_DATE:
+    case NS_FORM_INPUT_MONTH:
     case NS_FORM_INPUT_NUMBER:
     case NS_FORM_INPUT_RANGE:
       return kDefaultStep;
     case NS_FORM_INPUT_TIME:
       return kDefaultStepTime;
     default:
       MOZ_ASSERT(false, "Unrecognized input type");
       return Decimal::nan();
--- a/dom/html/HTMLInputElement.h
+++ b/dom/html/HTMLInputElement.h
@@ -1018,21 +1018,17 @@ protected:
   /**
    * Returns if the min and max attributes apply for the current type.
    */
   bool DoesMinMaxApply() const;
 
   /**
    * Returns if the step attribute apply for the current type.
    */
-  bool DoesStepApply() const
-  {
-    // TODO: this is temporary until bug 888324 is fixed.
-    return DoesMinMaxApply() && mType != NS_FORM_INPUT_MONTH;
-  }
+  bool DoesStepApply() const { return DoesMinMaxApply(); }
 
   /**
    * Returns if stepDown and stepUp methods apply for the current type.
    */
   bool DoStepDownStepUpApply() const { return DoesStepApply(); }
 
   /**
    * Returns if valueAsNumber attribute applies for the current type.
@@ -1423,16 +1419,17 @@ protected:
    * nsTextEditorState cannot do its job.
    */
   nsTextEditorState::SelectionProperties mSelectionProperties;
 
   // Step scale factor values, for input types that have one.
   static const Decimal kStepScaleFactorDate;
   static const Decimal kStepScaleFactorNumberRange;
   static const Decimal kStepScaleFactorTime;
+  static const Decimal kStepScaleFactorMonth;
 
   // Default step base value when a type do not have specific one.
   static const Decimal kDefaultStepBase;
 
   // Default step used when there is no specified step.
   static const Decimal kDefaultStep;
   static const Decimal kDefaultStepTime;
 
--- a/dom/html/HTMLMediaElement.cpp
+++ b/dom/html/HTMLMediaElement.cpp
@@ -70,16 +70,17 @@
 #include "MediaMetadataManager.h"
 #include "MediaSourceDecoder.h"
 #include "MediaStreamListener.h"
 #include "DOMMediaStream.h"
 #include "AudioStreamTrack.h"
 #include "VideoStreamTrack.h"
 #include "MediaTrackList.h"
 #include "MediaStreamError.h"
+#include "VideoFrameContainer.h"
 
 #include "AudioChannelService.h"
 
 #include "mozilla/dom/power/PowerManagerService.h"
 #include "mozilla/dom/WakeLock.h"
 
 #include "mozilla/dom/AudioTrack.h"
 #include "mozilla/dom/AudioTrackList.h"
@@ -3632,29 +3633,43 @@ void HTMLMediaElement::UpdateSrcMediaStr
 
     mWatchManager.Watch(*mMediaStreamListener,
         &HTMLMediaElement::UpdateReadyStateInternal);
 
     stream->AddAudioOutput(this);
     SetVolumeInternal();
 
     VideoFrameContainer* container = GetVideoFrameContainer();
-    if (container) {
-      stream->AddVideoOutput(container);
+    if (mSelectedVideoStreamTrack && container) {
+      mSelectedVideoStreamTrack->AddVideoOutput(container);
+    }
+    VideoTrack* videoTrack = VideoTracks()->GetSelectedTrack();
+    if (videoTrack) {
+      VideoStreamTrack* videoStreamTrack = videoTrack->GetVideoStreamTrack();
+      if (videoStreamTrack && container) {
+        videoStreamTrack->AddVideoOutput(container);
+      }
     }
   } else {
     if (stream) {
       mSrcStreamPausedCurrentTime = CurrentTime();
 
       stream->RemoveListener(mMediaStreamListener);
 
       stream->RemoveAudioOutput(this);
       VideoFrameContainer* container = GetVideoFrameContainer();
-      if (container) {
-        stream->RemoveVideoOutput(container);
+      if (mSelectedVideoStreamTrack && container) {
+        mSelectedVideoStreamTrack->RemoveVideoOutput(container);
+      }
+      VideoTrack* videoTrack = VideoTracks()->GetSelectedTrack();
+      if (videoTrack) {
+        VideoStreamTrack* videoStreamTrack = videoTrack->GetVideoStreamTrack();
+        if (videoStreamTrack && container) {
+          videoStreamTrack->RemoveVideoOutput(container);
+        }
       }
     }
     // If stream is null, then DOMMediaStream::Destroy must have been
     // called and that will remove all listeners/outputs.
 
     mWatchManager.Unwatch(*mMediaStreamListener,
         &HTMLMediaElement::UpdateReadyStateInternal);
 
@@ -3782,16 +3797,19 @@ void HTMLMediaElement::ConstructMediaTra
     // must be selected.
     int index = firstEnabledVideo >= 0 ? firstEnabledVideo : 0;
     (*VideoTracks())[index]->SetEnabledInternal(true, MediaTrack::FIRE_NO_EVENTS);
     VideoTrack* track = (*VideoTracks())[index];
     VideoStreamTrack* streamTrack = track->GetVideoStreamTrack();
     mMediaStreamSizeListener = new StreamSizeListener(this);
     streamTrack->AddDirectListener(mMediaStreamSizeListener);
     mSelectedVideoStreamTrack = streamTrack;
+    if (GetVideoFrameContainer()) {
+      mSelectedVideoStreamTrack->AddVideoOutput(GetVideoFrameContainer());
+    }
   }
 }
 
 void
 HTMLMediaElement::NotifyMediaStreamTrackAdded(const RefPtr<MediaStreamTrack>& aTrack)
 {
   MOZ_ASSERT(aTrack);
 
@@ -3814,16 +3832,20 @@ HTMLMediaElement::NotifyMediaStreamTrack
     // New MediaStreamTrack added, set the new added video track as selected
     // video track when there is no selected track.
     if (selectedIndex == -1) {
       MOZ_ASSERT(!mSelectedVideoStreamTrack);
       videoTrack->SetEnabledInternal(true, MediaTrack::FIRE_NO_EVENTS);
       mMediaStreamSizeListener = new StreamSizeListener(this);
       t->AddDirectListener(mMediaStreamSizeListener);
       mSelectedVideoStreamTrack = t;
+      VideoFrameContainer* container = GetVideoFrameContainer();
+      if (mSrcStreamIsPlaying && container) {
+        mSelectedVideoStreamTrack->AddVideoOutput(container);
+      }
     }
 
   }
 }
 
 void
 HTMLMediaElement::NotifyMediaStreamTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack)
 {
@@ -3843,16 +3865,20 @@ HTMLMediaElement::NotifyMediaStreamTrack
     // If the removed media stream track is selected video track and there are
     // still video tracks, change the selected video track to the first
     // remaining track.
     if (aTrack == mSelectedVideoStreamTrack) {
       // The mMediaStreamSizeListener might already reset to nullptr.
       if (mMediaStreamSizeListener) {
         mSelectedVideoStreamTrack->RemoveDirectListener(mMediaStreamSizeListener);
       }
+      VideoFrameContainer* container = GetVideoFrameContainer();
+      if (mSrcStreamIsPlaying && container) {
+        mSelectedVideoStreamTrack->RemoveVideoOutput(container);
+      }
       mSelectedVideoStreamTrack = nullptr;
       MOZ_ASSERT(mSrcStream);
       nsTArray<RefPtr<VideoStreamTrack>> tracks;
       mSrcStream->GetVideoTracks(tracks);
 
       for (const RefPtr<VideoStreamTrack>& track : tracks) {
         if (track->Ended()) {
           continue;
@@ -3866,16 +3892,19 @@ HTMLMediaElement::NotifyMediaStreamTrack
         MediaTrack* videoTrack = VideoTracks()->GetTrackById(trackId);
         MOZ_ASSERT(videoTrack);
 
         videoTrack->SetEnabledInternal(true, MediaTrack::FIRE_NO_EVENTS);
         if (mMediaStreamSizeListener) {
           track->AddDirectListener(mMediaStreamSizeListener);
         }
         mSelectedVideoStreamTrack = track;
+        if (container) {
+          mSelectedVideoStreamTrack->AddVideoOutput(container);
+        }
         return;
       }
 
       // There is no enabled video track existing, clean the
       // mMediaStreamSizeListener.
       if (mMediaStreamSizeListener) {
         mMediaStreamSizeListener->Forget();
         mMediaStreamSizeListener = nullptr;
--- a/dom/html/HTMLVideoElement.cpp
+++ b/dom/html/HTMLVideoElement.cpp
@@ -12,16 +12,17 @@
 #include "nsSize.h"
 #include "nsError.h"
 #include "nsNodeInfoManager.h"
 #include "plbase64.h"
 #include "nsXPCOMStrings.h"
 #include "prlock.h"
 #include "nsThreadUtils.h"
 #include "ImageContainer.h"
+#include "VideoFrameContainer.h"
 
 #include "nsIScriptSecurityManager.h"
 #include "nsIXPConnect.h"
 
 #include "nsITimer.h"
 
 #include "MediaError.h"
 #include "MediaDecoder.h"
--- a/dom/html/test/forms/test_step_attribute.html
+++ b/dom/html/test/forms/test_step_attribute.html
@@ -24,18 +24,17 @@ var data = [
   { type: 'text', apply: false },
   { type: 'search', apply: false },
   { type: 'tel', apply: false },
   { type: 'url', apply: false },
   { type: 'email', apply: false },
   { type: 'password', apply: false },
   { type: 'datetime', apply: true, todo: true },
   { type: 'date', apply: true },
-  // TODO: temporary set to false until bug 888324 is fixed.
-  { type: 'month', apply: false },
+  { type: 'month', apply: true },
   { type: 'week', apply: true, todo: true },
   { type: 'time', apply: true },
   { type: 'datetime-local', apply: true, todo: true },
   { type: 'number', apply: true },
   { type: 'range', apply: true },
   { type: 'color', apply: false },
   { type: 'checkbox', apply: false },
   { type: 'radio', apply: false },
@@ -717,17 +716,132 @@ for (var test of data) {
         } else {
           checkValidity(input, false, apply,
                         { low: test.low, high: test.high });
         }
       }
 
       break;
     case 'month':
-      // TODO: this is temporary until bug 888324 is fixed.
+      // When step is invalid, every date is valid
+      input.step = 0;
+      input.value = '2016-07';
+      checkValidity(input, true, apply);
+
+      input.step = 'foo';
+      input.value = '1970-01';
+      checkValidity(input, true, apply);
+
+      input.step = '-1';
+      input.value = '1970-01';
+      checkValidity(input, true, apply);
+
+      input.removeAttribute('step');
+      input.value = '1500-01';
+      checkValidity(input, true, apply);
+
+      input.step = 'any';
+      input.value = '1966-12';
+      checkValidity(input, true, apply);
+
+      input.step = 'ANY';
+      input.value = '2013-02';
+      checkValidity(input, true, apply);
+
+      // When min is set to a valid month, there is a step base.
+      input.min = '2000-01';
+      input.step = '2';
+      input.value = '2000-03';
+      checkValidity(input, true, apply);
+
+      input.value = '2000-02';
+      checkValidity(input, false, apply, { low: "2000-01", high: "2000-03" });
+
+      input.min = '2012-12';
+      input.value = '2013-01';
+      checkValidity(input, false, apply, { low: "2012-12", high: "2013-02" });
+
+      input.min = '2010-10';
+      input.value = '2010-11';
+      checkValidity(input, false, apply, { low: "2010-10", high: "2010-12" });
+
+      input.min = '2010-01';
+      input.step = '1.1';
+      input.value = '2010-02';
+      checkValidity(input, true, apply);
+
+      input.min = '2010-05';
+      input.step = '1.9';
+      input.value = '2010-06';
+      checkValidity(input, false, apply, { low: "2010-05", high: "2010-07" });
+
+      // Without any step attribute the date is valid
+      input.removeAttribute('step');
+      checkValidity(input, true, apply);
+
+      input.min = '1950-01';
+      input.step = '13';
+      input.value = '1951-01';
+      checkValidity(input, false, apply, { low: "1950-01", high: "1951-02" });
+
+      input.min = '1951-01';
+      input.step = '12';
+      input.value = '1952-01';
+      checkValidity(input, true, apply);
+
+      input.step = '0.9';
+      input.value = '1951-02';
+      checkValidity(input, true, apply);
+
+      input.step = '1.5';
+      input.value = '1951-04';
+      checkValidity(input, false, apply, { low: "1951-03", high: "1951-05" });
+
+      input.value = '1951-08';
+      checkValidity(input, false, apply, { low: "1951-07", high: "1951-09" });
+
+      input.step = '300';
+      input.min= '1968-01';
+      input.value = '1968-05';
+      checkValidity(input, false, apply, { low: "1968-01", high: "1993-01" });
+
+      input.value = '1971-01';
+      checkValidity(input, false, apply, { low: "1968-01", high: "1993-01" });
+
+      input.value = '1994-01';
+      checkValidity(input, false, apply, { low: "1993-01", high: "2018-01" });
+
+      input.value = '2018-01';
+      checkValidity(input, true, apply);
+
+      input.value = '2043-01';
+      checkValidity(input, true, apply);
+
+      input.step = '2.1';
+      input.min = '1991-01';
+      input.value = '1991-01';
+      checkValidity(input, true, apply);
+
+      input.value = '1991-02';
+      checkValidity(input, false, apply, { low: "1991-01", high: "1991-03" });
+
+      input.value = '1991-03';
+      checkValidity(input, true, apply);
+
+      input.step = '2.1';
+      input.min = '1969-12';
+      input.value = '1969-12';
+      checkValidity(input, true, apply);
+
+      input.value = '1970-01';
+      checkValidity(input, false, apply, { low: "1969-12", high: "1970-02" });
+
+      input.value = '1970-02';
+      checkValidity(input, true, apply);
+
       break;
     default:
       ok(false, "Implement the tests for <input type='" + test.type + " >");
       break;
   }
 }
 
 </script>
--- a/dom/html/test/forms/test_stepup_stepdown.html
+++ b/dom/html/test/forms/test_stepup_stepdown.html
@@ -45,23 +45,23 @@ function checkAvailability()
     ["submit", false],
     ["image", false],
     ["reset", false],
     ["button", false],
     ["number", true],
     ["range", true],
     ["date", true],
     ["time", true],
+    ["month", true],
     ["color", false],
   ];
 
   var todoList =
   [
     ["datetime", true],
-    ["month", true],
     ["week", true],
     ["datetime-local", true],
   ];
 
   var element = document.createElement("input");
   element.setAttribute('value', '0');
 
   for (data of testData) {
@@ -376,16 +376,80 @@ function checkStepDown()
     // value = "" (NaN).
     [ '',   null,   null,  null,  null, '23:59',    false ],
     // With step = 'any'.
     [ '17:26',  'any',  null,  null,  1,  null,  true ],
     [ '17:26',  'ANY',  null,  null,  1,  null,  true ],
     [ '17:26',  'AnY',  null,  null,  1,  null,  true ],
     [ '17:26',  'aNy',  null,  null,  1,  null,  true ],
   ]},
+  { type: 'month', data: [
+    // Regular case.
+    [ '2016-08',  null,  null,  null,  null, '2016-07',   false ],
+    // Argument testing.
+    [ '2016-08',  null,  null,  null,  1,    '2016-07',   false ],
+    [ '2016-08',  null,  null,  null,  5,    '2016-03',   false ],
+    [ '2016-08',  null,  null,  null,  -1,   '2016-09',   false ],
+    [ '2016-08',  null,  null,  null,  0,    '2016-08',   false ],
+    // Month/Year wrapping.
+    [ '2016-01',  null,  null,  null,  1,    '2015-12',   false ],
+    [ '1969-02',  null,  null,  null,  4,    '1968-10',   false ],
+    [ '1969-01',  null,  null,  null,  -12,  '1970-01',   false ],
+    // Float values are rounded to integer (1.1 -> 1).
+    [ '2016-08',  null,  null,  null,  1.1,  '2016-07',   false ],
+    [ '2016-01',  null,  null,  null,  1.9,  '2015-12',   false ],
+    // With step values.
+    [ '2016-03',  '0.5', null,  null,  null,      '2016-02',   false ],
+    [ '2016-03',  '2',   null,  null,  null,      '2016-01',   false ],
+    [ '2016-03',  '0.25',null,  null,  4,         '2015-11',   false ],
+    [ '2016-12',  '1.1',  '2016-01', null,  1,    '2016-11',   false ],
+    [ '2016-12',  '1.1',  '2016-01', null,  2,    '2016-10',   false ],
+    [ '2016-12',  '1.1',  '2016-01', null,  10,   '2016-02',   false ],
+    [ '2016-12',  '1.1',  '2016-01', null,  12,   '2016-01',   false ],
+    [ '1968-12',  '1.1',  '1968-01', null,  8,    '1968-04',   false ],
+    // step = 0 isn't allowed (-> step = 1).
+    [ '2016-02',  '0',   null,  null,  null, '2016-01',   false ],
+    // step < 0 isn't allowed (-> step = 1).
+    [ '2016-02',  '-1',  null,  null,  null, '2016-01',   false ],
+    // step = NaN isn't allowed (-> step = 1).
+    [ '2016-02',  'foo', null,  null,  null, '2016-01',   false ],
+    // Min values testing.
+    [ '2016-03',  '1',    'foo',     null,      2,     '2016-01',  false ],
+    [ '2016-02',  '1',    '2016-01', null,      null,  '2016-01',  false ],
+    [ '2016-01',  '1',    '2016-01', null,      null,  '2016-01',  false ],
+    [ '2016-01',  '1',    '2016-01', null,      1,     '2016-01',  false ],
+    [ '2016-05',  '3',    '2016-01', null,      null,  '2016-04',  false ],
+    [ '1969-01',  '5',    '1969-01', '1969-02', null,  '1969-01',  false ],
+    // Max values testing.
+    [ '2016-02',  '1',    null,  'foo',      null,  '2016-01',  false ],
+    [ '2016-02',  null,   null,  '2016-05',  null,  '2016-01',  false ],
+    [ '2016-03',  null,   null,  '2016-03',  null,  '2016-02',  false ],
+    [ '2016-07',  null,   null,  '2016-04',  4,     '2016-03',  false ],
+    [ '2016-07',  '2',    null,  '2016-04',  3,     '2016-01',  false ],
+    // Step mismatch.
+    [ '2016-04',  '2',    '2016-01',  null,         null,  '2016-03',  false ],
+    [ '2016-06',  '2',    '2016-01',  null,         2,     '2016-03',  false ],
+    [ '2016-05',  '2',    '2016-04',  '2016-08',    null,  '2016-04',  false ],
+    [ '1970-04',  '2',    null,       null,         null,  '1970-02',  false ],
+    [ '1970-09',  '3',    null,       null,         null,  '1970-06',  false ],
+    // Clamping.
+    [ '2016-05',  null,   null,       '2016-01',    null,  '2016-01',  false ],
+    [ '1970-05',  '2',    '1970-02',  '1970-05',    null,  '1970-04',  false ],
+    [ '1970-01',  '5',    '1970-02',  '1970-09',    10,    '1970-01',  false ],
+    [ '1970-07',  '5',    '1969-12',  '1970-10',    2,     '1969-12',  false ],
+    [ '1970-08',  '3',    '1970-01',  '1970-07',    15,    '1970-01',  false ],
+    [ '1970-10',  '3',    '1970-01',  '1970-06',    2,     '1970-04',  false ],
+    // value = "" (NaN).
+    [ '',   null,   null,  null,  null, '1969-12',    false ],
+    // With step = 'any'.
+    [ '2016-01',  'any',  null,  null,  1,  null,  true ],
+    [ '2016-01',  'ANY',  null,  null,  1,  null,  true ],
+    [ '2016-01',  'AnY',  null,  null,  1,  null,  true ],
+    [ '2016-01',  'aNy',  null,  null,  1,  null,  true ],
+  ]},
   ];
 
   for (var test of testData) {
     for (var data of test.data) {
       var element = document.createElement("input");
       element.type = test.type;
 
       if (data[1] != null) {
@@ -589,17 +653,16 @@ function checkStepUp()
     // With step values.
     [ '2012-01-01',  '0.5',  null,         null,  null, '2012-01-02',   false ],
     [ '2012-01-01',  '2',    null,         null,  null, '2012-01-03',   false ],
     [ '2012-01-01',  '0.25', null,         null,  4,    '2012-01-05',   false ],
     [ '2012-01-01',  '1.1',  '2012-01-01', null,  1,    '2012-01-02',   false ],
     [ '2012-01-01',  '1.1',  '2012-01-01', null,  2,    '2012-01-03',   false ],
     [ '2012-01-01',  '1.1',  '2012-01-01', null,  10,   '2012-01-11',   false ],
     [ '2012-01-01',  '1.1',  '2012-01-01', null,  11,   '2012-01-12',   false ],
-    [ '1968-01-01',  '1.1',  '1968-01-01', null,  8,    '1968-01-09',   false ],
     // step = 0 isn't allowed (-> step = 1).
     [ '2012-01-01',  '0',   null,  null,  null, '2012-01-02',   false ],
     // step < 0 isn't allowed (-> step = 1).
     [ '2012-01-01',  '-1',  null,  null,  null, '2012-01-02',   false ],
     // step = NaN isn't allowed (-> step = 1).
     [ '2012-01-01',  'foo', null,  null,  null, '2012-01-02',   false ],
     // Min values testing.
     [ '2012-01-01',  '1',   'foo',         null,  null,  '2012-01-02',  false ],
@@ -608,17 +671,16 @@ function checkStepUp()
     [ '2012-01-01',  null,  '2012-01-01',  null,  null,  '2012-01-02',  false ],
     [ '2012-01-01',  null,  '2012-01-04',  null,  4,     '2012-01-05',  false ],
     [ '2012-01-01',  '2',   '2012-01-04',  null,  3,     '2012-01-06',  false ],
     // Max values testing.
     [ '2012-01-01',  '1',    null,  'foo',        2,     '2012-01-03',  false ],
     [ '2012-01-01',  '1',    null,  '2012-01-10', 1,     '2012-01-02',  false ],
     [ '2012-01-02',  null,   null,  '2012-01-01', null,  '2012-01-02',  false ],
     [ '2012-01-02',  null,   null,  '2012-01-02', null,  '2012-01-02',  false ],
-    [ '2012-01-02',  null,   null,  '2012-01-02', null,  '2012-01-02',  false ],
     [ '1969-01-02',  '5',    '1969-01-01',  '1969-01-02', null,  '1969-01-02',  false ],
     // Step mismatch.
     [ '2012-01-02',  '2',    '2012-01-01',  null,         null,  '2012-01-03',  false ],
     [ '2012-01-02',  '2',    '2012-01-01',  null,         2,     '2012-01-05',  false ],
     [ '2012-01-05',  '2',    '2012-01-01',  '2012-01-06', null,  '2012-01-05',  false ],
     [ '1970-01-02',  '2',    null,          null,         null,  '1970-01-04',  false ],
     [ '1970-01-05',  '3',    null,          null,         null,  '1970-01-08',  false ],
     [ '1970-01-03',  '3',    null,          null,         null,  '1970-01-06',  false ],
@@ -697,16 +759,81 @@ function checkStepUp()
     // value = "" (NaN).
     [ '',   null,   null,  null,  null, '00:01',    false ],
     // With step = 'any'.
     [ '17:26',  'any',  null,  null,  1,  null,  true ],
     [ '17:26',  'ANY',  null,  null,  1,  null,  true ],
     [ '17:26',  'AnY',  null,  null,  1,  null,  true ],
     [ '17:26',  'aNy',  null,  null,  1,  null,  true ],
   ]},
+  { type: 'month', data: [
+    // Regular case.
+    [ '2016-08',  null,  null,  null,  null, '2016-09',   false ],
+    // Argument testing.
+    [ '2016-08',  null,  null,  null,  1,    '2016-09',   false ],
+    [ '2016-08',  null,  null,  null,  9,    '2017-05',   false ],
+    [ '2016-08',  null,  null,  null,  -1,   '2016-07',   false ],
+    [ '2016-08',  null,  null,  null,  0,    '2016-08',   false ],
+    // Month/Year wrapping.
+    [ '2015-12',  null,  null,  null,  1,    '2016-01',   false ],
+    [ '1968-12',  null,  null,  null,  4,    '1969-04',   false ],
+    [ '1970-01',  null,  null,  null,  -12,  '1969-01',   false ],
+    // Float values are rounded to integer (1.1 -> 1).
+    [ '2016-01',  null,  null,  null,  1.1,  '2016-02',   false ],
+    [ '2016-01',  null,  null,  null,  1.9,  '2016-02',   false ],
+    // With step values.
+    [ '2016-01',  '0.5',  null,         null,  null, '2016-02',   false ],
+    [ '2016-01',  '2',    null,         null,  null, '2016-03',   false ],
+    [ '2016-01',  '0.25', null,         null,  4,    '2016-05',   false ],
+    [ '2016-01',  '1.1',  '2016-01',    null,  1,    '2016-02',   false ],
+    [ '2016-01',  '1.1',  '2016-01',    null,  2,    '2016-03',   false ],
+    [ '2016-01',  '1.1',  '2016-01',    null,  10,   '2016-11',   false ],
+    [ '2016-01',  '1.1',  '2016-01',    null,  11,   '2016-12',   false ],
+    // step = 0 isn't allowed (-> step = 1).
+    [ '2016-01',  '0',   null,  null,  null, '2016-02',   false ],
+    // step < 0 isn't allowed (-> step = 1).
+    [ '2016-01',  '-1',  null,  null,  null, '2016-02',   false ],
+    // step = NaN isn't allowed (-> step = 1).
+    [ '2016-01',  'foo', null,  null,  null, '2016-02',   false ],
+    // Min values testing.
+    [ '2016-01',  '1',   'foo',      null,  null,  '2016-02',  false ],
+    [ '2016-01',  null,  '2015-12',  null,  null,  '2016-02',  false ],
+    [ '2016-01',  null,  '2016-02',  null,  null,  '2016-02',  false ],
+    [ '2016-01',  null,  '2016-01',  null,  null,  '2016-02',  false ],
+    [ '2016-01',  null,  '2016-04',  null,  4,     '2016-05',  false ],
+    [ '2016-01',  '2',   '2016-04',  null,  3,     '2016-06',  false ],
+    // Max values testing.
+    [ '2016-01',  '1',    null,       'foo',        2,     '2016-03',  false ],
+    [ '2016-01',  '1',    null,       '2016-02',    1,     '2016-02',  false ],
+    [ '2016-02',  null,   null,       '2016-01',    null,  '2016-02',  false ],
+    [ '2016-02',  null,   null,       '2016-02',    null,  '2016-02',  false ],
+    [ '1969-02',  '5',    '1969-01',  '1969-02',    null,  '1969-02',  false ],
+    // Step mismatch.
+    [ '2016-02',  '2',    '2016-01',  null,         null,  '2016-03',  false ],
+    [ '2016-02',  '2',    '2016-01',  null,         2,     '2016-05',  false ],
+    [ '2016-05',  '2',    '2016-01',  '2016-06',    null,  '2016-05',  false ],
+    [ '1970-02',  '2',    null,       null,         null,  '1970-04',  false ],
+    [ '1970-05',  '3',    null,       null,         null,  '1970-08',  false ],
+    [ '1970-03',  '3',    null,       null,         null,  '1970-06',  false ],
+    [ '1970-03',  '3',    '1970-02',  null,         null,  '1970-05',  false ],
+    // Clamping.
+    [ '2016-01',  null,   '2016-12',  null,         null,  '2016-12',  false ],
+    [ '1970-02',  '2',    '1970-01',  '1970-04',    null,  '1970-03',  false ],
+    [ '1970-01',  '5',    '1970-02',  '1970-09',    10,    '1970-07',  false ],
+    [ '1969-11',  '5',    '1969-12',  '1970-06',    3,     '1970-05',  false ],
+    [ '1970-01',  '3',    '1970-02',  '1971-07',    15,    '1971-05',  false ],
+    [ '1970-01',  '3',    '1970-01',  '1970-06',    2,     '1970-04',  false ],
+    // value = "" (NaN).
+    [ '',   null,   null,  null,  null, '1970-02',    false ],
+    // With step = 'any'.
+    [ '2016-01',  'any',  null,  null,  1,  null,  true ],
+    [ '2016-01',  'ANY',  null,  null,  1,  null,  true ],
+    [ '2016-01',  'AnY',  null,  null,  1,  null,  true ],
+    [ '2016-01',  'aNy',  null,  null,  1,  null,  true ],
+  ]},
   ];
 
   for (var test of testData) {
     for (var data of test.data) {
       var element = document.createElement("input");
       element.type = test.type;
 
       if (data[1] != null) {
--- a/dom/ipc/ContentChild.cpp
+++ b/dom/ipc/ContentChild.cpp
@@ -30,16 +30,17 @@
 #include "mozilla/dom/ExternalHelperAppChild.h"
 #include "mozilla/dom/FlyWebPublishedServerIPC.h"
 #include "mozilla/dom/GetFilesHelper.h"
 #include "mozilla/dom/PCrashReporterChild.h"
 #include "mozilla/dom/ProcessGlobal.h"
 #include "mozilla/dom/Promise.h"
 #include "mozilla/dom/workers/ServiceWorkerManager.h"
 #include "mozilla/dom/nsIContentChild.h"
+#include "mozilla/gfx/gfxVars.h"
 #include "mozilla/psm/PSMContentListener.h"
 #include "mozilla/hal_sandbox/PHalChild.h"
 #include "mozilla/ipc/BackgroundChild.h"
 #include "mozilla/ipc/FileDescriptorSetChild.h"
 #include "mozilla/ipc/FileDescriptorUtils.h"
 #include "mozilla/ipc/GeckoChildProcessHost.h"
 #include "mozilla/ipc/ProcessChild.h"
 #include "mozilla/ipc/PSendStreamChild.h"
@@ -2271,16 +2272,23 @@ ContentChild::RecvSystemMemoryAvailable(
 bool
 ContentChild::RecvPreferenceUpdate(const PrefSetting& aPref)
 {
   Preferences::SetPreference(aPref);
   return true;
 }
 
 bool
+ContentChild::RecvVarUpdate(const GfxVarUpdate& aVar)
+{
+  gfx::gfxVars::ApplyUpdate(aVar);
+  return true;
+}
+
+bool
 ContentChild::RecvDataStoragePut(const nsString& aFilename,
                                  const DataStorageItem& aItem)
 {
   RefPtr<DataStorage> storage = DataStorage::GetIfExists(aFilename);
   if (storage) {
     storage->Put(aItem.key(), aItem.value(), aItem.type());
   }
   return true;
--- a/dom/ipc/ContentChild.h
+++ b/dom/ipc/ContentChild.h
@@ -404,16 +404,17 @@ public:
 
   // auto remove when alertfinished is received.
   nsresult AddRemoteAlertObserver(const nsString& aData, nsIObserver* aObserver);
 
   virtual bool RecvSystemMemoryAvailable(const uint64_t& aGetterId,
                                          const uint32_t& aMemoryAvailable) override;
 
   virtual bool RecvPreferenceUpdate(const PrefSetting& aPref) override;
+  virtual bool RecvVarUpdate(const GfxVarUpdate& pref) override;
 
   virtual bool RecvDataStoragePut(const nsString& aFilename,
                                   const DataStorageItem& aItem) override;
 
   virtual bool RecvDataStorageRemove(const nsString& aFilename,
                                      const nsCString& aKey,
                                      const DataStorageType& aType) override;
 
--- a/dom/ipc/ContentParent.cpp
+++ b/dom/ipc/ContentParent.cpp
@@ -69,16 +69,17 @@
 #include "mozilla/dom/PresentationParent.h"
 #include "mozilla/dom/PPresentationParent.h"
 #include "mozilla/dom/FlyWebPublishedServerIPC.h"
 #include "mozilla/dom/quota/QuotaManagerService.h"
 #include "mozilla/dom/telephony/TelephonyParent.h"
 #include "mozilla/dom/time/DateCacheCleaner.h"
 #include "mozilla/dom/voicemail/VoicemailParent.h"
 #include "mozilla/embedding/printingui/PrintingParent.h"
+#include "mozilla/gfx/gfxVars.h"
 #include "mozilla/gfx/GPUProcessManager.h"
 #include "mozilla/hal_sandbox/PHalParent.h"
 #include "mozilla/ipc/BackgroundChild.h"
 #include "mozilla/ipc/BackgroundParent.h"
 #include "mozilla/ipc/FileDescriptorSetParent.h"
 #include "mozilla/ipc/FileDescriptorUtils.h"
 #include "mozilla/ipc/PFileDescriptorSetParent.h"
 #include "mozilla/ipc/PSendStreamParent.h"
@@ -1845,16 +1846,17 @@ ContentParent::ActorDestroy(ActorDestroy
     for (size_t i = 0; i < length; ++i) {
       obs->RemoveObserver(static_cast<nsIObserver*>(this),
                           sObserverTopics[i]);
     }
   }
 
   // remove the global remote preferences observers
   Preferences::RemoveObserver(this, "");
+  gfxVars::RemoveReceiver(this);
 
   RecvRemoveGeolocationListener();
 
   mConsoleService = nullptr;
 
 #ifdef MOZ_ENABLE_PROFILER_SPS
   if (mGatherer && !mProfile.IsEmpty()) {
     mGatherer->OOPExitProfile(mProfile);
@@ -2394,16 +2396,39 @@ ContentParent::Pid() const
 bool
 ContentParent::RecvReadPrefsArray(InfallibleTArray<PrefSetting>* aPrefs)
 {
   Preferences::GetPreferences(aPrefs);
   return true;
 }
 
 bool
+ContentParent::RecvGetGfxVars(InfallibleTArray<GfxVarUpdate>* aVars)
+{
+  // Ensure gfxVars is initialized (for xpcshell tests).
+  gfxVars::Initialize();
+
+  *aVars = gfxVars::FetchNonDefaultVars();
+
+  // Now that content has initialized gfxVars, we can start listening for
+  // updates.
+  gfxVars::AddReceiver(this);
+  return true;
+}
+
+void
+ContentParent::OnVarChanged(const GfxVarUpdate& aVar)
+{
+  if (!mIPCOpen) {
+    return;
+  }
+  Unused << SendVarUpdate(aVar);
+}
+
+bool
 ContentParent::RecvReadFontList(InfallibleTArray<FontListEntry>* retValue)
 {
 #ifdef ANDROID
   gfxAndroidPlatform::GetPlatform()->GetSystemFontList(retValue);
 #endif
   return true;
 }
 
--- a/dom/ipc/ContentParent.h
+++ b/dom/ipc/ContentParent.h
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef mozilla_dom_ContentParent_h
 #define mozilla_dom_ContentParent_h
 
 #include "mozilla/dom/PContentParent.h"
 #include "mozilla/dom/nsIContentParent.h"
+#include "mozilla/gfx/gfxVarReceiver.h"
 #include "mozilla/ipc/GeckoChildProcessHost.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/FileUtils.h"
 #include "mozilla/HalTypes.h"
 #include "mozilla/LinkedList.h"
 #include "mozilla/StaticPtr.h"
 #include "mozilla/UniquePtr.h"
 
@@ -84,16 +85,17 @@ class TabContext;
 class ContentBridgeParent;
 class GetFilesHelper;
 
 class ContentParent final : public PContentParent
                           , public nsIContentParent
                           , public nsIObserver
                           , public nsIDOMGeoPositionCallback
                           , public nsIDOMGeoPositionErrorCallback
+                          , public gfx::gfxVarReceiver
                           , public mozilla::LinkedListElement<ContentParent>
 {
   typedef mozilla::ipc::GeckoChildProcessHost GeckoChildProcessHost;
   typedef mozilla::ipc::OptionalURIParams OptionalURIParams;
   typedef mozilla::ipc::PFileDescriptorSetParent PFileDescriptorSetParent;
   typedef mozilla::ipc::TestShellParent TestShellParent;
   typedef mozilla::ipc::URIParams URIParams;
   typedef mozilla::ipc::PrincipalInfo PrincipalInfo;
@@ -563,16 +565,18 @@ public:
 
 protected:
   void OnChannelConnected(int32_t pid) override;
 
   virtual void ActorDestroy(ActorDestroyReason why) override;
 
   bool ShouldContinueFromReplyTimeout() override;
 
+  void OnVarChanged(const GfxVarUpdate& aVar) override;
+
 private:
   static nsDataHashtable<nsStringHashKey, ContentParent*> *sAppContentParents;
   static nsTArray<ContentParent*>* sNonAppContentParents;
   static nsTArray<ContentParent*>* sPrivateContent;
   static StaticAutoPtr<LinkedList<ContentParent> > sContentParents;
 
   static void JoinProcessesIOThread(const nsTArray<ContentParent*>* aProcesses,
                                     Monitor* aMonitor, bool* aDone);
@@ -903,16 +907,17 @@ private:
   virtual PWebBrowserPersistDocumentParent*
   AllocPWebBrowserPersistDocumentParent(PBrowserParent* aBrowser,
                                         const uint64_t& aOuterWindowID) override;
 
   virtual bool
   DeallocPWebBrowserPersistDocumentParent(PWebBrowserPersistDocumentParent* aActor) override;
 
   virtual bool RecvReadPrefsArray(InfallibleTArray<PrefSetting>* aPrefs) override;
+  virtual bool RecvGetGfxVars(InfallibleTArray<GfxVarUpdate>* aVars) override;
 
   virtual bool RecvReadFontList(InfallibleTArray<FontListEntry>* retValue) override;
 
   virtual bool RecvReadDataStorageArray(const nsString& aFilename,
                                         InfallibleTArray<DataStorageItem>* aValues) override;
 
   virtual bool RecvReadPermissions(InfallibleTArray<IPC::Permission>* aPermissions) override;
 
--- a/dom/ipc/PContent.ipdl
+++ b/dom/ipc/PContent.ipdl
@@ -514,16 +514,17 @@ child:
     async SetOffline(bool offline);
     async SetConnectivity(bool connectivity);
 
     async NotifyVisited(URIParams uri);
 
     async SystemMemoryAvailable(uint64_t getterId, uint32_t memoryAvailable);
 
     async PreferenceUpdate(PrefSetting pref);
+    async VarUpdate(GfxVarUpdate var);
 
     async DataStoragePut(nsString aFilename, DataStorageItem aItem);
     async DataStorageRemove(nsString aFilename, nsCString aKey, DataStorageType aType);
     async DataStorageClear(nsString aFilename);
 
     async NotifyAlertsObserver(nsCString topic, nsString data);
 
     async GeolocationUpdate(GeoPosition somewhere);
@@ -875,16 +876,17 @@ parent:
     async VisitURI(URIParams uri, OptionalURIParams referrer, uint32_t flags);
     async SetURITitle(URIParams uri, nsString title);
 
     async LoadURIExternal(URIParams uri, PBrowser windowContext);
     async ExtProtocolChannelConnectParent(uint32_t registrarId);
 
     // PrefService message
     sync ReadPrefsArray() returns (PrefSetting[] prefs) verify;
+    sync GetGfxVars() returns (GfxVarUpdate[] vars);
 
     sync ReadFontList() returns (FontListEntry[] retValue);
 
     sync ReadDataStorageArray(nsString aFilename)
       returns (DataStorageItem[] retValue);
 
     sync SyncMessage(nsString aMessage, ClonedMessageData aData,
                      CpowEntry[] aCpows, Principal aPrincipal)
--- a/dom/media/MP3Demuxer.cpp
+++ b/dom/media/MP3Demuxer.cpp
@@ -364,17 +364,17 @@ MP3TrackDemuxer::StreamLength() const {
 TimeUnit
 MP3TrackDemuxer::Duration() const {
   if (!mNumParsedFrames) {
     return TimeUnit::FromMicroseconds(-1);
   }
 
   int64_t numFrames = 0;
   const auto numAudioFrames = mParser.VBRInfo().NumAudioFrames();
-  if (mParser.VBRInfo().IsValid()) {
+  if (mParser.VBRInfo().IsValid() && numAudioFrames.valueOr(0) + 1 > 1) {
     // VBR headers don't include the VBR header frame.
     numFrames = numAudioFrames.value() + 1;
   } else {
     const int64_t streamLen = StreamLength();
     if (streamLen < 0) {
       // Unknown length, we can't estimate duration.
       return TimeUnit::FromMicroseconds(-1);
     }
@@ -577,17 +577,16 @@ MP3TrackDemuxer::GetNextFrame(const Medi
   frame->mTimecode = frame->mTime;
   frame->mKeyframe = true;
 
   MOZ_ASSERT(frame->mTime >= 0);
   MOZ_ASSERT(frame->mDuration > 0);
 
   if (mNumParsedFrames == 1) {
     // First frame parsed, let's read VBR info if available.
-    // TODO: read info that helps with seeking (bug 1163667).
     ByteReader reader(frame->Data(), frame->Size());
     mParser.ParseVBRHeader(&reader);
     reader.DiscardRemaining();
     mFirstFrameOffset = frame->mOffset;
   }
 
   MP3LOGV("GetNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64
           " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64
@@ -598,33 +597,33 @@ MP3TrackDemuxer::GetNextFrame(const Medi
   return frame.forget();
 }
 
 int64_t
 MP3TrackDemuxer::OffsetFromFrameIndex(int64_t aFrameIndex) const {
   int64_t offset = 0;
   const auto& vbr = mParser.VBRInfo();
 
-  if (vbr.IsValid()) {
+  if (vbr.IsComplete()) {
     offset = mFirstFrameOffset + aFrameIndex * vbr.NumBytes().value() /
              vbr.NumAudioFrames().value();
   } else if (AverageFrameLength() > 0) {
     offset = mFirstFrameOffset + aFrameIndex * AverageFrameLength();
   }
 
   MP3LOGV("OffsetFromFrameIndex(%" PRId64 ") -> %" PRId64, aFrameIndex, offset);
   return std::max<int64_t>(mFirstFrameOffset, offset);
 }
 
 int64_t
 MP3TrackDemuxer::FrameIndexFromOffset(int64_t aOffset) const {
   int64_t frameIndex = 0;
   const auto& vbr = mParser.VBRInfo();
 
-  if (vbr.IsValid()) {
+  if (vbr.IsComplete()) {
     frameIndex = static_cast<float>(aOffset - mFirstFrameOffset) /
                  vbr.NumBytes().value() * vbr.NumAudioFrames().value();
     frameIndex = std::min<int64_t>(vbr.NumAudioFrames().value(), frameIndex);
   } else if (AverageFrameLength() > 0) {
     frameIndex = (aOffset - mFirstFrameOffset) / AverageFrameLength();
   }
 
   MP3LOGV("FrameIndexFromOffset(%" PRId64 ") -> %" PRId64, aOffset, frameIndex);
@@ -690,17 +689,17 @@ MP3TrackDemuxer::Read(uint8_t* aBuffer, 
 }
 
 double
 MP3TrackDemuxer::AverageFrameLength() const {
   if (mNumParsedFrames) {
     return static_cast<double>(mTotalFrameLen) / mNumParsedFrames;
   }
   const auto& vbr = mParser.VBRInfo();
-  if (vbr.IsValid() && vbr.NumAudioFrames().value() + 1) {
+  if (vbr.IsComplete() && vbr.NumAudioFrames().value() + 1) {
     return static_cast<double>(vbr.NumBytes().value()) /
            (vbr.NumAudioFrames().value() + 1);
   }
   return 0.0;
 }
 
 // FrameParser
 
@@ -1031,17 +1030,22 @@ FrameParser::VBRHeader::Scale() const {
 
 bool
 FrameParser::VBRHeader::IsTOCPresent() const {
   return mTOC.size() == vbr_header::TOC_SIZE;
 }
 
 bool
 FrameParser::VBRHeader::IsValid() const {
-  return mType != NONE &&
+  return mType != NONE;
+}
+
+bool
+FrameParser::VBRHeader::IsComplete() const {
+  return IsValid() &&
          mNumAudioFrames.valueOr(0) > 0 &&
          mNumBytes.valueOr(0) > 0 &&
          // We don't care about the scale for any computations here.
          // mScale < 101 &&
          true;
 }
 
 int64_t
--- a/dom/media/MP3Demuxer.h
+++ b/dom/media/MP3Demuxer.h
@@ -223,19 +223,22 @@ public:
     const Maybe<uint32_t>& NumBytes() const;
 
     // Returns the VBR scale factor (0: best quality, 100: lowest quality).
     const Maybe<uint32_t>& Scale() const;
 
     // Returns true iff Xing/Info TOC (table of contents) is present.
     bool IsTOCPresent() const;
 
-    // Returns whether the header is valid (containing reasonable field values).
+    // Returns whether the header is valid (type XING or VBRI).
     bool IsValid() const;
 
+    // Returns whether the header is valid and contains reasonable non-zero field values.
+    bool IsComplete() const;
+
     // Returns the byte offset for the given duration percentage as a factor
     // (0: begin, 1.0: end).
     int64_t Offset(float aDurationFac) const;
 
     // Parses contents of given ByteReader for a valid VBR header.
     // The offset of the passed ByteReader needs to point to an MPEG frame begin,
     // as a VBRI-style header is searched at a fixed offset relative to frame begin.
     // Returns whether a valid VBR header was found in the range.
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -250,37 +250,36 @@ bool VideoData::SetVideoDataToImage(Plan
     return aVideoImage->CopyData(data);
   } else {
     return aVideoImage->AdoptData(data);
   }
 }
 
 /* static */
 already_AddRefed<VideoData>
-VideoData::Create(const VideoInfo& aInfo,
-                  ImageContainer* aContainer,
-                  Image* aImage,
-                  int64_t aOffset,
-                  int64_t aTime,
-                  int64_t aDuration,
-                  const YCbCrBuffer& aBuffer,
-                  bool aKeyframe,
-                  int64_t aTimecode,
-                  const IntRect& aPicture)
+VideoData::CreateAndCopyData(const VideoInfo& aInfo,
+                             ImageContainer* aContainer,
+                             int64_t aOffset,
+                             int64_t aTime,
+                             int64_t aDuration,
+                             const YCbCrBuffer& aBuffer,
+                             bool aKeyframe,
+                             int64_t aTimecode,
+                             const IntRect& aPicture)
 {
-  if (!aImage && !aContainer) {
+  if (!aContainer) {
     // Create a dummy VideoData with no image. This gives us something to
     // send to media streams if necessary.
     RefPtr<VideoData> v(new VideoData(aOffset,
-                                        aTime,
-                                        aDuration,
-                                        aKeyframe,
-                                        aTimecode,
-                                        aInfo.mDisplay,
-                                        0));
+                                      aTime,
+                                      aDuration,
+                                      aKeyframe,
+                                      aTimecode,
+                                      aInfo.mDisplay,
+                                      0));
     return v.forget();
   }
 
   // The following situation should never happen unless there is a bug
   // in the decoder
   if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth ||
       aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) {
     NS_ERROR("C planes with different sizes");
@@ -308,156 +307,104 @@ VideoData::Create(const VideoInfo& aInfo
   {
     // The specified picture dimensions can't be contained inside the video
     // frame, we'll stomp memory if we try to copy it. Fail.
     NS_WARNING("Overflowing picture rect");
     return nullptr;
   }
 
   RefPtr<VideoData> v(new VideoData(aOffset,
-                                      aTime,
-                                      aDuration,
-                                      aKeyframe,
-                                      aTimecode,
-                                      aInfo.mDisplay,
-                                      0));
+                                    aTime,
+                                    aDuration,
+                                    aKeyframe,
+                                    aTimecode,
+                                    aInfo.mDisplay,
+                                    0));
 #ifdef MOZ_WIDGET_GONK
   const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0];
   const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1];
   const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2];
 #endif
 
-  if (!aImage) {
-    // Currently our decoder only knows how to output to ImageFormat::PLANAR_YCBCR
-    // format.
+  // Currently our decoder only knows how to output to ImageFormat::PLANAR_YCBCR
+  // format.
 #ifdef MOZ_WIDGET_GONK
-    if (IsYV12Format(Y, Cb, Cr) && !IsInEmulator()) {
-      v->mImage = new layers::GrallocImage();
-    }
+  if (IsYV12Format(Y, Cb, Cr) && !IsInEmulator()) {
+    v->mImage = new layers::GrallocImage();
+  }
 #endif
-    if (!v->mImage) {
-      v->mImage = aContainer->CreatePlanarYCbCrImage();
-    }
-  } else {
-    v->mImage = aImage;
+  if (!v->mImage) {
+    v->mImage = aContainer->CreatePlanarYCbCrImage();
   }
 
   if (!v->mImage) {
     return nullptr;
   }
   NS_ASSERTION(v->mImage->GetFormat() == ImageFormat::PLANAR_YCBCR ||
                v->mImage->GetFormat() == ImageFormat::GRALLOC_PLANAR_YCBCR,
                "Wrong format?");
   PlanarYCbCrImage* videoImage = v->mImage->AsPlanarYCbCrImage();
   MOZ_ASSERT(videoImage);
 
-  bool shouldCopyData = (aImage == nullptr);
   if (!VideoData::SetVideoDataToImage(videoImage, aInfo, aBuffer, aPicture,
-                                      shouldCopyData)) {
+                                      true /* aCopyData */)) {
     return nullptr;
   }
 
 #ifdef MOZ_WIDGET_GONK
   if (!videoImage->IsValid() && !aImage && IsYV12Format(Y, Cb, Cr)) {
     // Failed to allocate gralloc. Try fallback.
     v->mImage = aContainer->CreatePlanarYCbCrImage();
     if (!v->mImage) {
       return nullptr;
     }
     videoImage = v->mImage->AsPlanarYCbCrImage();
-    if(!VideoData::SetVideoDataToImage(videoImage, aInfo, aBuffer, aPicture,
-                                       true /* aCopyData */)) {
+    if (!VideoData::SetVideoDataToImage(videoImage, aInfo, aBuffer, aPicture,
+                                        true /* aCopyData */)) {
       return nullptr;
     }
   }
 #endif
   return v.forget();
 }
 
 /* static */
 already_AddRefed<VideoData>
-VideoData::Create(const VideoInfo& aInfo,
-                  ImageContainer* aContainer,
-                  int64_t aOffset,
-                  int64_t aTime,
-                  int64_t aDuration,
-                  const YCbCrBuffer& aBuffer,
-                  bool aKeyframe,
-                  int64_t aTimecode,
-                  const IntRect& aPicture)
-{
-  return Create(aInfo, aContainer, nullptr, aOffset, aTime, aDuration, aBuffer,
-                aKeyframe, aTimecode, aPicture);
-}
-
-/* static */
-already_AddRefed<VideoData>
-VideoData::Create(const VideoInfo& aInfo,
-                  Image* aImage,
-                  int64_t aOffset,
-                  int64_t aTime,
-                  int64_t aDuration,
-                  const YCbCrBuffer& aBuffer,
-                  bool aKeyframe,
-                  int64_t aTimecode,
-                  const IntRect& aPicture)
-{
-  return Create(aInfo, nullptr, aImage, aOffset, aTime, aDuration, aBuffer,
-                aKeyframe, aTimecode, aPicture);
-}
-
-/* static */
-already_AddRefed<VideoData>
 VideoData::CreateFromImage(const VideoInfo& aInfo,
-                           ImageContainer* aContainer,
                            int64_t aOffset,
                            int64_t aTime,
                            int64_t aDuration,
                            const RefPtr<Image>& aImage,
                            bool aKeyframe,
                            int64_t aTimecode,
                            const IntRect& aPicture)
 {
   RefPtr<VideoData> v(new VideoData(aOffset,
-                                      aTime,
-                                      aDuration,
-                                      aKeyframe,
-                                      aTimecode,
-                                      aInfo.mDisplay,
-                                      0));
+                                    aTime,
+                                    aDuration,
+                                    aKeyframe,
+                                    aTimecode,
+                                    aInfo.mDisplay,
+                                    0));
   v->mImage = aImage;
   return v.forget();
 }
 
 #ifdef MOZ_OMX_DECODER
 /* static */
 already_AddRefed<VideoData>
-VideoData::Create(const VideoInfo& aInfo,
-                  ImageContainer* aContainer,
-                  int64_t aOffset,
-                  int64_t aTime,
-                  int64_t aDuration,
-                  mozilla::layers::TextureClient* aBuffer,
-                  bool aKeyframe,
-                  int64_t aTimecode,
-                  const IntRect& aPicture)
+VideoData::CreateAndCopyIntoTextureClient(const VideoInfo& aInfo,
+                                          int64_t aOffset,
+                                          int64_t aTime,
+                                          int64_t aDuration,
+                                          mozilla::layers::TextureClient* aBuffer,
+                                          bool aKeyframe,
+                                          int64_t aTimecode,
+                                          const IntRect& aPicture)
 {
-  if (!aContainer) {
-    // Create a dummy VideoData with no image. This gives us something to
-    // send to media streams if necessary.
-    RefPtr<VideoData> v(new VideoData(aOffset,
-                                        aTime,
-                                        aDuration,
-                                        aKeyframe,
-                                        aTimecode,
-                                        aInfo.mDisplay,
-                                        0));
-    return v.forget();
-  }
-
   // The following situations could be triggered by invalid input
   if (aPicture.width <= 0 || aPicture.height <= 0) {
     NS_WARNING("Empty picture rect");
     return nullptr;
   }
 
   // Ensure the picture size specified in the headers can be extracted out of
   // the frame we've been supplied without indexing out of bounds.
@@ -467,22 +414,22 @@ VideoData::Create(const VideoInfo& aInfo
   {
     // The specified picture dimensions can't be contained inside the video
     // frame, we'll stomp memory if we try to copy it. Fail.
     NS_WARNING("Overflowing picture rect");
     return nullptr;
   }
 
   RefPtr<VideoData> v(new VideoData(aOffset,
-                                      aTime,
-                                      aDuration,
-                                      aKeyframe,
-                                      aTimecode,
-                                      aInfo.mDisplay,
-                                      0));
+                                    aTime,
+                                    aDuration,
+                                    aKeyframe,
+                                    aTimecode,
+                                    aInfo.mDisplay,
+                                    0));
 
   RefPtr<layers::GrallocImage> image = new layers::GrallocImage();
   image->AdoptData(aBuffer, aPicture.Size());
   v->mImage = image;
 
   return v.forget();
 }
 #endif  // MOZ_OMX_DECODER
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -454,61 +454,40 @@ public:
   // Constructs a VideoData object. If aImage is nullptr, creates a new Image
   // holding a copy of the YCbCr data passed in aBuffer. If aImage is not
   // nullptr, it's stored as the underlying video image and aBuffer is assumed
   // to point to memory within aImage so no copy is made. aTimecode is a codec
   // specific number representing the timestamp of the frame of video data.
   // Returns nsnull if an error occurs. This may indicate that memory couldn't
   // be allocated to create the VideoData object, or it may indicate some
   // problem with the input data (e.g. negative stride).
-  static already_AddRefed<VideoData> Create(const VideoInfo& aInfo,
-                                            ImageContainer* aContainer,
-                                            Image* aImage,
-                                            int64_t aOffset,
-                                            int64_t aTime,
-                                            int64_t aDuration,
-                                            const YCbCrBuffer &aBuffer,
-                                            bool aKeyframe,
-                                            int64_t aTimecode,
-                                            const IntRect& aPicture);
+
 
-  // Variant that always makes a copy of aBuffer
-  static already_AddRefed<VideoData> Create(const VideoInfo& aInfo,
-                                            ImageContainer* aContainer,
-                                            int64_t aOffset,
-                                            int64_t aTime,
-                                            int64_t aDuration,
-                                            const YCbCrBuffer &aBuffer,
-                                            bool aKeyframe,
-                                            int64_t aTimecode,
-                                            const IntRect& aPicture);
+  // Creates a new VideoData containing a deep copy of aBuffer. May use aContainer
+  // to allocate an Image to hold the copied data.
+  static already_AddRefed<VideoData> CreateAndCopyData(const VideoInfo& aInfo,
+                                                       ImageContainer* aContainer,
+                                                       int64_t aOffset,
+                                                       int64_t aTime,
+                                                       int64_t aDuration,
+                                                       const YCbCrBuffer &aBuffer,
+                                                       bool aKeyframe,
+                                                       int64_t aTimecode,
+                                                       const IntRect& aPicture);
 
-  // Variant to create a VideoData instance given an existing aImage
-  static already_AddRefed<VideoData> Create(const VideoInfo& aInfo,
-                                            Image* aImage,
-                                            int64_t aOffset,
-                                            int64_t aTime,
-                                            int64_t aDuration,
-                                            const YCbCrBuffer &aBuffer,
-                                            bool aKeyframe,
-                                            int64_t aTimecode,
-                                            const IntRect& aPicture);
-
-  static already_AddRefed<VideoData> Create(const VideoInfo& aInfo,
-                                            ImageContainer* aContainer,
-                                            int64_t aOffset,
-                                            int64_t aTime,
-                                            int64_t aDuration,
-                                            layers::TextureClient* aBuffer,
-                                            bool aKeyframe,
-                                            int64_t aTimecode,
-                                            const IntRect& aPicture);
+  static already_AddRefed<VideoData> CreateAndCopyIntoTextureClient(const VideoInfo& aInfo,
+                                                                    int64_t aOffset,
+                                                                    int64_t aTime,
+                                                                    int64_t aDuration,
+                                                                    layers::TextureClient* aBuffer,
+                                                                    bool aKeyframe,
+                                                                    int64_t aTimecode,
+                                                                    const IntRect& aPicture);
 
   static already_AddRefed<VideoData> CreateFromImage(const VideoInfo& aInfo,
-                                                     ImageContainer* aContainer,
                                                      int64_t aOffset,
                                                      int64_t aTime,
                                                      int64_t aDuration,
                                                      const RefPtr<Image>& aImage,
                                                      bool aKeyframe,
                                                      int64_t aTimecode,
                                                      const IntRect& aPicture);
 
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -9,16 +9,17 @@
 #include "mozilla/MathAlgorithms.h"
 #include <limits>
 #include "nsIObserver.h"
 #include "nsTArray.h"
 #include "VideoUtils.h"
 #include "MediaDecoderStateMachine.h"
 #include "ImageContainer.h"
 #include "MediaResource.h"
+#include "VideoFrameContainer.h"
 #include "nsError.h"
 #include "mozilla/Preferences.h"
 #include "mozilla/StaticPtr.h"
 #include "nsIMemoryReporter.h"
 #include "nsComponentManagerUtils.h"
 #include <algorithm>
 #include "MediaShutdownManager.h"
 #include "AudioChannelService.h"
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -12,16 +12,17 @@
 #include "nsSize.h"
 #include "Layers.h"
 #include "MediaData.h"
 #include "MediaInfo.h"
 #include "MediaFormatReader.h"
 #include "MediaResource.h"
 #include "mozilla/SharedThreadPool.h"
 #include "VideoUtils.h"
+#include "VideoFrameContainer.h"
 
 #include <algorithm>
 
 #ifdef MOZ_EME
 #include "mozilla/CDMProxy.h"
 #endif
 
 using namespace mozilla::media;
@@ -1653,25 +1654,23 @@ MediaFormatReader::SetSeekTarget(const S
   mFallbackSeekTime = mPendingSeekTime = Some(target.GetTime());
 }
 
 TimeUnit
 MediaFormatReader::DemuxStartTime()
 {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(!ForceZeroStartTime());
-  MOZ_ASSERT((!HasAudio() || mAudio.mFirstDemuxedSampleTime.isSome()) &&
-             (!HasVideo() || mVideo.mFirstDemuxedSampleTime.isSome()));
+  MOZ_ASSERT(HasAudio() || HasVideo());
 
-  return std::min(HasAudio()
-                  ? mAudio.mFirstDemuxedSampleTime.ref()
-                  : TimeUnit::FromInfinity(),
-                  HasVideo()
-                  ? mVideo.mFirstDemuxedSampleTime.ref()
-                  : TimeUnit::FromInfinity());
+  const TimeUnit startTime =
+    std::min(mAudio.mFirstDemuxedSampleTime.refOr(TimeUnit::FromInfinity()),
+             mVideo.mFirstDemuxedSampleTime.refOr(TimeUnit::FromInfinity()));
+
+  return startTime.IsInfinite() ? TimeUnit::FromMicroseconds(0) : startTime;
 }
 
 void
 MediaFormatReader::ScheduleSeek()
 {
   if (mSeekScheduled) {
     return;
   }
--- a/dom/media/MediaRecorder.cpp
+++ b/dom/media/MediaRecorder.cpp
@@ -290,18 +290,20 @@ class MediaRecorder::Session: public nsI
   private:
     RefPtr<Session> mSession;
   };
 
   // For Ensure recorder has tracks to record.
   class TracksAvailableCallback : public OnTracksAvailableCallback
   {
   public:
-    explicit TracksAvailableCallback(Session *aSession)
-     : mSession(aSession) {}
+    explicit TracksAvailableCallback(Session *aSession, TrackRate aTrackRate)
+     : mSession(aSession)
+     , mTrackRate(aTrackRate) {}
+
     virtual void NotifyTracksAvailable(DOMMediaStream* aStream)
     {
       if (mSession->mStopIssued) {
         return;
       }
 
       MOZ_RELEASE_ASSERT(aStream);
       mSession->MediaStreamReady(*aStream);
@@ -342,20 +344,21 @@ class MediaRecorder::Session: public nsI
       // Check that we may access the tracks' content.
       if (!mSession->MediaStreamTracksPrincipalSubsumes()) {
         LOG(LogLevel::Warning, ("Session.NotifyTracksAvailable MediaStreamTracks principal check failed"));
         mSession->DoSessionEndTask(NS_ERROR_DOM_SECURITY_ERR);
         return;
       }
 
       LOG(LogLevel::Debug, ("Session.NotifyTracksAvailable track type = (%d)", trackTypes));
-      mSession->InitEncoder(trackTypes);
+      mSession->InitEncoder(trackTypes, mTrackRate);
     }
   private:
     RefPtr<Session> mSession;
+    TrackRate mTrackRate;
   };
   // Main thread task.
   // To delete RecordingSession object.
   class DestroyRunnable : public Runnable
   {
   public:
     explicit DestroyRunnable(Session* aSession)
       : mSession(aSession) {}
@@ -407,16 +410,17 @@ class MediaRecorder::Session: public nsI
 public:
   Session(MediaRecorder* aRecorder, int32_t aTimeSlice)
     : mRecorder(aRecorder)
     , mTimeSlice(aTimeSlice)
     , mStopIssued(false)
     , mIsStartEventFired(false)
     , mIsRegisterProfiler(false)
     , mNeedSessionEndTask(true)
+    , mSelectedVideoTrackID(TRACK_NONE)
   {
     MOZ_ASSERT(NS_IsMainThread());
     MOZ_COUNT_CTOR(MediaRecorder::Session);
 
     uint32_t maxMem = Preferences::GetUint("media.recorder.max_memory",
                                            MAX_ALLOW_MEMORY_BUFFER);
     mEncodedBufferCache = new EncodedBufferCache(maxMem);
     mLastBlobTimeStamp = TimeStamp::Now();
@@ -462,43 +466,44 @@ public:
 
   void Start()
   {
     LOG(LogLevel::Debug, ("Session.Start %p", this));
     MOZ_ASSERT(NS_IsMainThread());
 
     // Create a Track Union Stream
     MediaStreamGraph* gm = mRecorder->GetSourceMediaStream()->Graph();
+    TrackRate trackRate = gm->GraphRate();
     mTrackUnionStream = gm->CreateTrackUnionStream();
     MOZ_ASSERT(mTrackUnionStream, "CreateTrackUnionStream failed");
 
     mTrackUnionStream->SetAutofinish(true);
 
     DOMMediaStream* domStream = mRecorder->Stream();
     if (domStream) {
       // Get the available tracks from the DOMMediaStream.
       // The callback will report back tracks that we have to connect to
       // mTrackUnionStream and listen to principal changes on.
-      TracksAvailableCallback* tracksAvailableCallback = new TracksAvailableCallback(this);
+      TracksAvailableCallback* tracksAvailableCallback = new TracksAvailableCallback(this, trackRate);
       domStream->OnTracksAvailable(tracksAvailableCallback);
     } else {
       // Check that we may access the audio node's content.
       if (!AudioNodePrincipalSubsumes()) {
         LOG(LogLevel::Warning, ("Session.Start AudioNode principal check failed"));
         DoSessionEndTask(NS_ERROR_DOM_SECURITY_ERR);
         return;
       }
       // Bind this Track Union Stream with Source Media.
       RefPtr<MediaInputPort> inputPort =
         mTrackUnionStream->AllocateInputPort(mRecorder->GetSourceMediaStream());
       mInputPorts.AppendElement(inputPort.forget());
       MOZ_ASSERT(mInputPorts[mInputPorts.Length()-1]);
 
       // Web Audio node has only audio.
-      InitEncoder(ContainerWriter::CREATE_AUDIO_TRACK);
+      InitEncoder(ContainerWriter::CREATE_AUDIO_TRACK, trackRate);
     }
   }
 
   void Stop()
   {
     LOG(LogLevel::Debug, ("Session.Stop %p", this));
     MOZ_ASSERT(NS_IsMainThread());
     mStopIssued = true;
@@ -729,17 +734,17 @@ private:
       return false;
     }
 
     uint32_t perm = nsIPermissionManager::DENY_ACTION;
     pm->TestExactPermissionFromPrincipal(doc->NodePrincipal(), aType, &perm);
     return perm == nsIPermissionManager::ALLOW_ACTION;
   }
 
-  void InitEncoder(uint8_t aTrackTypes)
+  void InitEncoder(uint8_t aTrackTypes, TrackRate aTrackRate)
   {
     LOG(LogLevel::Debug, ("Session.InitEncoder %p", this));
     MOZ_ASSERT(NS_IsMainThread());
 
     if (!mRecorder) {
       LOG(LogLevel::Debug, ("Session.InitEncoder failure, mRecorder is null %p", this));
       return;
     }
@@ -747,52 +752,64 @@ private:
     // At this stage, the API doesn't allow UA to choose the output mimeType format.
 
     // Make sure the application has permission to assign AUDIO_3GPP
     if (mRecorder->mMimeType.EqualsLiteral(AUDIO_3GPP) && CheckPermission("audio-capture:3gpp")) {
       mEncoder = MediaEncoder::CreateEncoder(NS_LITERAL_STRING(AUDIO_3GPP),
                                              mRecorder->GetAudioBitrate(),
                                              mRecorder->GetVideoBitrate(),
                                              mRecorder->GetBitrate(),
-                                             aTrackTypes);
+                                             aTrackTypes, aTrackRate);
     } else if (mRecorder->mMimeType.EqualsLiteral(AUDIO_3GPP2) && CheckPermission("audio-capture:3gpp2")) {
       mEncoder = MediaEncoder::CreateEncoder(NS_LITERAL_STRING(AUDIO_3GPP2),
                                              mRecorder->GetAudioBitrate(),
                                              mRecorder->GetVideoBitrate(),
                                              mRecorder->GetBitrate(),
-                                             aTrackTypes);
+                                             aTrackTypes, aTrackRate);
     } else {
       mEncoder = MediaEncoder::CreateEncoder(NS_LITERAL_STRING(""),
                                              mRecorder->GetAudioBitrate(),
                                              mRecorder->GetVideoBitrate(),
                                              mRecorder->GetBitrate(),
-                                             aTrackTypes);
+                                             aTrackTypes, aTrackRate);
     }
 
     if (!mEncoder) {
       LOG(LogLevel::Debug, ("Session.InitEncoder !mEncoder %p", this));
       DoSessionEndTask(NS_ERROR_ABORT);
       return;
     }
 
     // Media stream is ready but UA issues a stop method follow by start method.
     // The Session::stop would clean the mTrackUnionStream. If the AfterTracksAdded
     // comes after stop command, this function would crash.
     if (!mTrackUnionStream) {
       LOG(LogLevel::Debug, ("Session.InitEncoder !mTrackUnionStream %p", this));
       DoSessionEndTask(NS_OK);
       return;
     }
-    mTrackUnionStream->AddListener(mEncoder);
+    mTrackUnionStream->AddListener(mEncoder.get());
+
+    nsTArray<RefPtr<mozilla::dom::VideoStreamTrack>> videoTracks;
+    DOMMediaStream* domStream = mRecorder->Stream();
+    if (domStream) {
+      domStream->GetVideoTracks(videoTracks);
+      if (!videoTracks.IsEmpty()) {
+        // Right now, the MediaRecorder hasn't dealt with multiple video track
+        // issues. So we just bind with the first video track. Bug 1276928 is
+        // the following.
+        videoTracks[0]->AddDirectListener(mEncoder->GetVideoSink());
+      }
+    }
+
     // Try to use direct listeners if possible
-    DOMMediaStream* domStream = mRecorder->Stream();
     if (domStream && domStream->GetInputStream()) {
       mInputStream = domStream->GetInputStream()->AsSourceStream();
       if (mInputStream) {
-        mInputStream->AddDirectListener(mEncoder);
+        mInputStream->AddDirectListener(mEncoder.get());
         mEncoder->SetDirectConnect(true);
       }
     }
 
     // Create a thread to read encode media data from MediaEncoder.
     if (!mReadThread) {
       nsresult rv = NS_NewNamedThread("Media_Encoder", getter_AddRefs(mReadThread));
       if (NS_FAILED(rv)) {
@@ -842,29 +859,34 @@ private:
       MOZ_ASSERT(false, "NS_DispatchToMainThread DestroyRunnable failed");
     }
     mNeedSessionEndTask = false;
   }
   void CleanupStreams()
   {
     if (mInputStream) {
       if (mEncoder) {
-        mInputStream->RemoveDirectListener(mEncoder);
+        mInputStream->RemoveDirectListener(mEncoder.get());
       }
       mInputStream = nullptr;
     }
     for (RefPtr<MediaInputPort>& inputPort : mInputPorts) {
       MOZ_ASSERT(inputPort);
       inputPort->Destroy();
     }
     mInputPorts.Clear();
 
     if (mTrackUnionStream) {
+      // Sometimes the MediaEncoder might be initialized fail and go to
+      // |CleanupStreams|. So the mEncoder might be a nullptr in this case.
+      if (mEncoder && mSelectedVideoTrackID != TRACK_NONE) {
+        mTrackUnionStream->RemoveVideoOutput(mEncoder->GetVideoSink(), mSelectedVideoTrackID);
+      }
       if (mEncoder) {
-        mTrackUnionStream->RemoveListener(mEncoder);
+        mTrackUnionStream->RemoveListener(mEncoder.get());
       }
       mTrackUnionStream->Destroy();
       mTrackUnionStream = nullptr;
     }
 
     if (mMediaStream) {
       mMediaStream->UnregisterTrackListener(this);
       mMediaStream = nullptr;
@@ -943,16 +965,17 @@ private:
   // Indicate the session had fire start event. Encoding thread only.
   bool mIsStartEventFired;
   // The register flag for "Media_Encoder" thread to profiler
   bool mIsRegisterProfiler;
   // False if the InitEncoder called successfully, ensure the
   // ExtractRunnable/DestroyRunnable will end the session.
   // Main thread only.
   bool mNeedSessionEndTask;
+  TrackID mSelectedVideoTrackID;
 };
 
 NS_IMPL_ISUPPORTS(MediaRecorder::Session, nsIObserver)
 
 MediaRecorder::~MediaRecorder()
 {
   if (mPipeStream != nullptr) {
     mInputPort->Destroy();
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -18,20 +18,22 @@
 #include "mozilla/Attributes.h"
 #include "TrackUnionStream.h"
 #include "ImageContainer.h"
 #include "AudioCaptureStream.h"
 #include "AudioChannelService.h"
 #include "AudioNodeStream.h"
 #include "AudioNodeExternalInputStream.h"
 #include "MediaStreamListener.h"
+#include "MediaStreamVideoSink.h"
 #include "mozilla/dom/AudioContextBinding.h"
 #include "mozilla/media/MediaUtils.h"
 #include <algorithm>
 #include "GeckoProfiler.h"
+#include "VideoFrameContainer.h"
 #include "mozilla/unused.h"
 #include "mozilla/media/MediaUtils.h"
 #ifdef MOZ_WEBRTC
 #include "AudioOutputObserver.h"
 #endif
 #include "mtransport/runnable_utils.h"
 
 #include "webaudio/blink/HRTFDatabaseLoader.h"
@@ -102,16 +104,25 @@ MediaStreamGraphImpl::FinishStream(Media
 
   SetStreamOrderDirty();
 }
 
 void
 MediaStreamGraphImpl::AddStreamGraphThread(MediaStream* aStream)
 {
   aStream->mTracksStartTime = mProcessedTime;
+
+  if (aStream->AsSourceStream()) {
+    SourceMediaStream* source = aStream->AsSourceStream();
+    TimeStamp currentTimeStamp = CurrentDriver()->GetCurrentTimeStamp();
+    TimeStamp processedTimeStamp = currentTimeStamp +
+      TimeDuration::FromSeconds(MediaTimeToSeconds(mProcessedTime - IterationEnd()));
+    source->SetStreamTracksStartTimeStamp(processedTimeStamp);
+  }
+
   if (aStream->IsSuspended()) {
     mSuspendedStreams.AppendElement(aStream);
     STREAM_LOG(LogLevel::Debug, ("Adding media stream %p to the graph, in the suspended stream array", aStream));
   } else {
     mStreams.AppendElement(aStream);
     STREAM_LOG(LogLevel::Debug, ("Adding media stream %p to graph %p, count %lu", aStream, this, mStreams.Length()));
     LIFECYCLE_LOG("Adding media stream %p to graph %p, count %lu", aStream, this, mStreams.Length());
   }
@@ -225,26 +236,16 @@ MediaStreamGraphImpl::ExtractPendingInpu
       // Video case.
       if (data->mData->GetType() == MediaSegment::VIDEO) {
         if (data->mCommands) {
           MOZ_ASSERT(!(data->mCommands & SourceMediaStream::TRACK_UNUSED));
           for (MediaStreamListener* l : aStream->mListeners) {
             l->NotifyQueuedTrackChanges(this, data->mID,
                                         offset, static_cast<TrackEventCommand>(data->mCommands), *data->mData);
           }
-        } else {
-          // Fixme: This part will be removed in the bug 1201363. It will be
-          // removed in changeset "Do not copy video segment to StreamTracks in
-          // TrackUnionStream."
-
-          // Dealing with video and not TRACK_CREATE and TRACK_END case.
-          for (MediaStreamListener* l : aStream->mListeners) {
-            l->NotifyQueuedTrackChanges(this, data->mID,
-                                        offset, static_cast<TrackEventCommand>(data->mCommands), *data->mData);
-          }
         }
       }
 
       for (TrackBound<MediaStreamTrackListener>& b : aStream->mTrackListeners) {
         if (b.mTrackID != data->mID) {
           continue;
         }
         b.mListener->NotifyQueuedChanges(this, offset, *data->mData);
@@ -921,203 +922,16 @@ MediaStreamGraphImpl::PlayAudio(MediaStr
     // Need unique id for stream & track - and we want it to match the inserter
     output.WriteTo(LATENCY_STREAM_ID(aStream, track->GetID()),
                                      mMixer, AudioChannelCount(),
                                      mSampleRate);
   }
   return ticksWritten;
 }
 
-static void
-SetImageToBlackPixel(PlanarYCbCrImage* aImage)
-{
-  uint8_t blackPixel[] = { 0x10, 0x80, 0x80 };
-
-  PlanarYCbCrData data;
-  data.mYChannel = blackPixel;
-  data.mCbChannel = blackPixel + 1;
-  data.mCrChannel = blackPixel + 2;
-  data.mYStride = data.mCbCrStride = 1;
-  data.mPicSize = data.mYSize = data.mCbCrSize = IntSize(1, 1);
-  aImage->CopyData(data);
-}
-
-class VideoFrameContainerInvalidateRunnable : public Runnable {
-public:
-  explicit VideoFrameContainerInvalidateRunnable(VideoFrameContainer* aVideoFrameContainer)
-    : mVideoFrameContainer(aVideoFrameContainer)
-  {}
-  NS_IMETHOD Run()
-  {
-    MOZ_ASSERT(NS_IsMainThread());
-
-    mVideoFrameContainer->Invalidate();
-
-    return NS_OK;
-  }
-private:
-  RefPtr<VideoFrameContainer> mVideoFrameContainer;
-};
-
-void
-MediaStreamGraphImpl::PlayVideo(MediaStream* aStream)
-{
-  MOZ_ASSERT(mRealtime, "Should only attempt to play video in realtime mode");
-
-  if (aStream->mVideoOutputs.IsEmpty())
-    return;
-
-  TimeStamp currentTimeStamp = CurrentDriver()->GetCurrentTimeStamp();
-
-  // Collect any new frames produced in this iteration.
-  AutoTArray<ImageContainer::NonOwningImage,4> newImages;
-  PrincipalHandle lastPrincipalHandle = PRINCIPAL_HANDLE_NONE;
-  RefPtr<Image> blackImage;
-
-  MOZ_ASSERT(mProcessedTime >= aStream->mTracksStartTime, "frame position before buffer?");
-  // We only look at the non-blocking interval
-  StreamTime frameBufferTime = aStream->GraphTimeToStreamTime(mProcessedTime);
-  StreamTime bufferEndTime = aStream->GraphTimeToStreamTime(aStream->mStartBlocking);
-  StreamTime start;
-  const VideoChunk* chunk;
-  for ( ;
-       frameBufferTime < bufferEndTime;
-       frameBufferTime = start + chunk->GetDuration()) {
-    // Pick the last track that has a video chunk for the time, and
-    // schedule its frame.
-    chunk = nullptr;
-    for (StreamTracks::TrackIter tracks(aStream->GetStreamTracks(),
-                                        MediaSegment::VIDEO);
-         !tracks.IsEnded();
-         tracks.Next()) {
-      VideoSegment* segment = tracks->Get<VideoSegment>();
-      StreamTime thisStart;
-      const VideoChunk* thisChunk =
-        segment->FindChunkContaining(frameBufferTime, &thisStart);
-      if (thisChunk && thisChunk->mFrame.GetImage()) {
-        start = thisStart;
-        chunk = thisChunk;
-      }
-    }
-    if (!chunk)
-      break;
-
-    const VideoFrame* frame = &chunk->mFrame;
-    if (*frame == aStream->mLastPlayedVideoFrame) {
-      continue;
-    }
-
-    Image* image = frame->GetImage();
-    STREAM_LOG(LogLevel::Verbose,
-               ("MediaStream %p writing video frame %p (%dx%d)",
-                aStream, image, frame->GetIntrinsicSize().width,
-                frame->GetIntrinsicSize().height));
-    // Schedule this frame after the previous frame finishes, instead of at
-    // its start time.  These times only differ in the case of multiple
-    // tracks.
-    // frameBufferTime is in the non-blocking interval.
-    GraphTime frameTime = aStream->StreamTimeToGraphTime(frameBufferTime);
-    TimeStamp targetTime = currentTimeStamp +
-      TimeDuration::FromSeconds(MediaTimeToSeconds(frameTime - IterationEnd()));
-
-    if (frame->GetForceBlack()) {
-      if (!blackImage) {
-        blackImage = aStream->mVideoOutputs[0]->GetImageContainer()->CreatePlanarYCbCrImage();
-        if (blackImage) {
-          // Sets the image to a single black pixel, which will be scaled to
-          // fill the rendered size.
-          SetImageToBlackPixel(blackImage->AsPlanarYCbCrImage());
-        }
-      }
-      if (blackImage) {
-        image = blackImage;
-      }
-    }
-    newImages.AppendElement(ImageContainer::NonOwningImage(image, targetTime));
-
-    lastPrincipalHandle = chunk->GetPrincipalHandle();
-
-    aStream->mLastPlayedVideoFrame = *frame;
-  }
-
-  if (!aStream->mLastPlayedVideoFrame.GetImage())
-    return;
-
-  AutoTArray<ImageContainer::NonOwningImage,4> images;
-  bool haveMultipleImages = false;
-
-  for (uint32_t i = 0; i < aStream->mVideoOutputs.Length(); ++i) {
-    VideoFrameContainer* output = aStream->mVideoOutputs[i];
-
-    bool principalHandleChanged =
-      lastPrincipalHandle != PRINCIPAL_HANDLE_NONE &&
-      lastPrincipalHandle != output->GetLastPrincipalHandle();
-
-    // Find previous frames that may still be valid.
-    AutoTArray<ImageContainer::OwningImage,4> previousImages;
-    output->GetImageContainer()->GetCurrentImages(&previousImages);
-    uint32_t j = previousImages.Length();
-    if (j) {
-      // Re-use the most recent frame before currentTimeStamp and subsequent,
-      // always keeping at least one frame.
-      do {
-        --j;
-      } while (j > 0 && previousImages[j].mTimeStamp > currentTimeStamp);
-    }
-    if (previousImages.Length() - j + newImages.Length() > 1) {
-      haveMultipleImages = true;
-    }
-
-    // Don't update if there are no changes.
-    if (j == 0 && newImages.IsEmpty())
-      continue;
-
-    for ( ; j < previousImages.Length(); ++j) {
-      const auto& image = previousImages[j];
-      // Cope with potential clock skew with AudioCallbackDriver.
-      if (newImages.Length() && image.mTimeStamp > newImages[0].mTimeStamp) {
-        STREAM_LOG(LogLevel::Warning,
-                   ("Dropping %u video frames due to clock skew",
-                    unsigned(previousImages.Length() - j)));
-        break;
-      }
-
-      images.AppendElement(ImageContainer::
-                           NonOwningImage(image.mImage,
-                                          image.mTimeStamp, image.mFrameID));
-    }
-
-    // Add the frames from this iteration.
-    for (auto& image : newImages) {
-      image.mFrameID = output->NewFrameID();
-      images.AppendElement(image);
-    }
-
-    if (principalHandleChanged) {
-      output->UpdatePrincipalHandleForFrameID(lastPrincipalHandle,
-                                              newImages.LastElement().mFrameID);
-    }
-
-    output->SetCurrentFrames(aStream->mLastPlayedVideoFrame.GetIntrinsicSize(),
-                             images);
-
-    nsCOMPtr<nsIRunnable> event =
-      new VideoFrameContainerInvalidateRunnable(output);
-    DispatchToMainThreadAfterStreamStateUpdate(event.forget());
-
-    images.ClearAndRetainStorage();
-  }
-
-  // If the stream has finished and the timestamps of all frames have expired
-  // then no more updates are required.
-  if (aStream->mFinished && !haveMultipleImages) {
-    aStream->mLastPlayedVideoFrame.SetNull();
-  }
-}
-
 void
 MediaStreamGraphImpl::OpenAudioInputImpl(int aID,
                                          AudioDataListener *aListener)
 {
   // Bug 1238038 Need support for multiple mics at once
   if (mInputDeviceUsers.Count() > 0 &&
       !mInputDeviceUsers.Get(aListener, nullptr)) {
     NS_ASSERTION(false, "Input from multiple mics not yet supported; bug 1238038");
@@ -1521,17 +1335,16 @@ MediaStreamGraphImpl::Process()
         StreamTime ticksPlayedForThisStream = PlayAudio(stream);
         if (!ticksPlayed) {
           ticksPlayed = ticksPlayedForThisStream;
         } else {
           MOZ_ASSERT(!ticksPlayedForThisStream || ticksPlayedForThisStream == ticksPlayed,
               "Each stream should have the same number of frame.");
         }
       }
-      PlayVideo(stream);
     }
     if (stream->mStartBlocking > mProcessedTime) {
       allBlockedForever = false;
     }
   }
 
   if (CurrentDriver()->AsAudioCallbackDriver() && ticksPlayed) {
     mMixer.FinishMixing();
@@ -2254,65 +2067,90 @@ MediaStream::RemoveAudioOutput(void* aKe
       mStream->RemoveAudioOutputImpl(mKey);
     }
     void* mKey;
   };
   GraphImpl()->AppendMessage(MakeUnique<Message>(this, aKey));
 }
 
 void
-MediaStream::AddVideoOutputImpl(already_AddRefed<VideoFrameContainer> aContainer)
+MediaStream::AddVideoOutputImpl(already_AddRefed<MediaStreamVideoSink> aSink,
+                                TrackID aID)
 {
-  RefPtr<VideoFrameContainer> container = aContainer;
-  STREAM_LOG(LogLevel::Info, ("MediaStream %p Adding VideoFrameContainer %p as output",
-                              this, container.get()));
-  *mVideoOutputs.AppendElement() = container.forget();
+  RefPtr<MediaStreamVideoSink> sink = aSink;
+  STREAM_LOG(LogLevel::Info, ("MediaStream %p Adding MediaStreamVideoSink %p as output",
+                              this, sink.get()));
+  MOZ_ASSERT(aID != TRACK_NONE);
+   for (auto entry : mVideoOutputs) {
+     if (entry.mListener == sink &&
+         (entry.mTrackID == TRACK_ANY || entry.mTrackID == aID)) {
+       return;
+     }
+   }
+   TrackBound<MediaStreamVideoSink>* l = mVideoOutputs.AppendElement();
+   l->mListener = sink;
+   l->mTrackID = aID;
+
+   AddDirectTrackListenerImpl(sink.forget(), aID);
 }
 
 void
-MediaStream::RemoveVideoOutputImpl(VideoFrameContainer* aContainer)
+MediaStream::RemoveVideoOutputImpl(MediaStreamVideoSink* aSink,
+                                   TrackID aID)
 {
-  STREAM_LOG(LogLevel::Info, ("MediaStream %p Removing VideoFrameContainer %p as output",
-                              this, aContainer));
+  STREAM_LOG(LogLevel::Info, ("MediaStream %p Removing MediaStreamVideoSink %p as output",
+                              this, aSink));
+  MOZ_ASSERT(aID != TRACK_NONE);
+
   // Ensure that any frames currently queued for playback by the compositor
   // are removed.
-  aContainer->ClearFutureFrames();
-  mVideoOutputs.RemoveElement(aContainer);
+  aSink->ClearFrames();
+  for (size_t i = 0; i < mVideoOutputs.Length(); ++i) {
+    if (mVideoOutputs[i].mListener == aSink &&
+        (mVideoOutputs[i].mTrackID == TRACK_ANY ||
+         mVideoOutputs[i].mTrackID == aID)) {
+      mVideoOutputs.RemoveElementAt(i);
+    }
+  }
+
+  RemoveDirectTrackListenerImpl(aSink, aID);
 }
 
 void
-MediaStream::AddVideoOutput(VideoFrameContainer* aContainer)
+MediaStream::AddVideoOutput(MediaStreamVideoSink* aSink, TrackID aID)
 {
   class Message : public ControlMessage {
   public:
-    Message(MediaStream* aStream, VideoFrameContainer* aContainer) :
-      ControlMessage(aStream), mContainer(aContainer) {}
+    Message(MediaStream* aStream, MediaStreamVideoSink* aSink, TrackID aID) :
+      ControlMessage(aStream), mSink(aSink), mID(aID) {}
     void Run() override
     {
-      mStream->AddVideoOutputImpl(mContainer.forget());
+      mStream->AddVideoOutputImpl(mSink.forget(), mID);
     }
-    RefPtr<VideoFrameContainer> mContainer;
+    RefPtr<MediaStreamVideoSink> mSink;
+    TrackID mID;
   };
-  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aContainer));
+  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aSink, aID));
 }
 
 void
-MediaStream::RemoveVideoOutput(VideoFrameContainer* aContainer)
+MediaStream::RemoveVideoOutput(MediaStreamVideoSink* aSink, TrackID aID)
 {
   class Message : public ControlMessage {
   public:
-    Message(MediaStream* aStream, VideoFrameContainer* aContainer) :
-      ControlMessage(aStream), mContainer(aContainer) {}
+    Message(MediaStream* aStream, MediaStreamVideoSink* aSink, TrackID aID) :
+      ControlMessage(aStream), mSink(aSink), mID(aID) {}
     void Run() override
     {
-      mStream->RemoveVideoOutputImpl(mContainer);
+      mStream->RemoveVideoOutputImpl(mSink, mID);
     }
-    RefPtr<VideoFrameContainer> mContainer;
+    RefPtr<MediaStreamVideoSink> mSink;
+    TrackID mID;
   };
-  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aContainer));
+  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aSink, aID));
 }
 
 void
 MediaStream::Suspend()
 {
   class Message : public ControlMessage {
   public:
     explicit Message(MediaStream* aStream) :
@@ -2793,16 +2631,26 @@ SourceMediaStream::ResampleAudioToGraphS
       return;
     }
     aTrackData->mResampler.own(state);
     aTrackData->mResamplerChannelCount = channels;
   }
   segment->ResampleChunks(aTrackData->mResampler, aTrackData->mInputRate, GraphImpl()->GraphRate());
 }
 
+void
+SourceMediaStream::AdvanceTimeVaryingValuesToCurrentTime(GraphTime aCurrentTime,
+                                                         GraphTime aBlockedTime)
+{
+  MutexAutoLock lock(mMutex);
+  mTracksStartTime += aBlockedTime;
+  mStreamTracksStartTimeStamp += TimeDuration::FromSeconds(GraphImpl()->MediaTimeToSeconds(aBlockedTime));
+  mTracks.ForgetUpTo(aCurrentTime - mTracksStartTime);
+}
+
 bool
 SourceMediaStream::AppendToTrack(TrackID aID, MediaSegment* aSegment, MediaSegment *aRawSegment)
 {
   MutexAutoLock lock(mMutex);
   // ::EndAllTrackAndFinished() can end these before the sources notice
   bool appended = false;
   auto graph = GraphImpl();
   if (!mFinished && graph) {
@@ -2915,32 +2763,54 @@ SourceMediaStream::RemoveDirectListener(
 }
 
 void
 SourceMediaStream::AddDirectTrackListenerImpl(already_AddRefed<DirectMediaStreamTrackListener> aListener,
                                               TrackID aTrackID)
 {
   MOZ_ASSERT(IsTrackIDExplicit(aTrackID));
   TrackData* data;
-  bool found;
-  bool isAudio;
-  bool isVideo;
+  bool found = false;
+  bool isAudio = false;
+  bool isVideo = false;
   RefPtr<DirectMediaStreamTrackListener> listener = aListener;
   STREAM_LOG(LogLevel::Debug, ("Adding direct track listener %p bound to track %d to source stream %p",
              listener.get(), aTrackID, this));
 
   {
     MutexAutoLock lock(mMutex);
     data = FindDataForTrack(aTrackID);
     found = !!data;
     if (found) {
       isAudio = data->mData->GetType() == MediaSegment::AUDIO;
       isVideo = data->mData->GetType() == MediaSegment::VIDEO;
     }
+
+    // The track might be removed from mUpdateTrack but still exist in
+    // mTracks.
+    auto streamTrack = FindTrack(aTrackID);
+    bool foundTrack = !!streamTrack;
+    if (foundTrack) {
+      MediaStreamVideoSink* videoSink = listener->AsMediaStreamVideoSink();
+      // Re-send missed VideoSegment to new added MediaStreamVideoSink.
+      if (streamTrack->GetType() == MediaSegment::VIDEO && videoSink) {
+        videoSink->SetCurrentFrames(*(static_cast<VideoSegment*>(streamTrack->GetSegment())));
+      }
+    }
+
     if (found && (isAudio || isVideo)) {
+      for (auto entry : mDirectTrackListeners) {
+        if (entry.mListener == listener &&
+            (entry.mTrackID == TRACK_ANY || entry.mTrackID == aTrackID)) {
+          listener->NotifyDirectListenerInstalled(
+            DirectMediaStreamTrackListener::InstallationResult::ALREADY_EXISTS);
+          return;
+        }
+      }
+
       TrackBound<DirectMediaStreamTrackListener>* sourceListener =
         mDirectTrackListeners.AppendElement();
       sourceListener->mListener = listener;
       sourceListener->mTrackID = aTrackID;
     }
   }
   if (!found) {
     STREAM_LOG(LogLevel::Warning, ("Couldn't find source track for direct track listener %p",
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -10,18 +10,18 @@
 #include "mozilla/Mutex.h"
 #include "mozilla/TaskQueue.h"
 
 #include "mozilla/dom/AudioChannelBinding.h"
 
 #include "AudioStream.h"
 #include "nsTArray.h"
 #include "nsIRunnable.h"
-#include "VideoFrameContainer.h"
 #include "VideoSegment.h"
+#include "StreamTracks.h"
 #include "MainThreadUtils.h"
 #include "StreamTracks.h"
 #include "nsAutoPtr.h"
 #include "nsAutoRef.h"
 #include <speex/speex_resampler.h>
 
 class nsIRunnable;
 
@@ -161,18 +161,20 @@ class AudioNodeStream;
 class AudioSegment;
 class CameraPreviewMediaStream;
 class DirectMediaStreamListener;
 class DirectMediaStreamTrackListener;
 class MediaInputPort;
 class MediaStreamGraphImpl;
 class MediaStreamListener;
 class MediaStreamTrackListener;
+class MediaStreamVideoSink;
 class ProcessedMediaStream;
 class SourceMediaStream;
+class TrackUnionStream;
 
 enum MediaStreamGraphEvent : uint32_t;
 enum TrackEventCommand : uint32_t;
 
 /**
  * Helper struct for binding a track listener to a specific TrackID.
  */
 template<typename Listener>
@@ -204,17 +206,17 @@ struct TrackBound
  * Transitions into and out of the "blocked" and "finished" states are managed
  * by the MediaStreamGraph on the media graph thread.
  *
  * We buffer media data ahead of the consumers' reading offsets. It is possible
  * to have buffered data but still be blocked.
  *
  * Any stream can have its audio and video playing when requested. The media
  * stream graph plays audio by constructing audio output streams as necessary.
- * Video is played by setting video frames into an VideoFrameContainer at the right
+ * Video is played by setting video frames into an MediaStreamVideoSink at the right
  * time. To ensure video plays in sync with audio, make sure that the same
  * stream is playing both the audio and video.
  *
  * The data in a stream is managed by StreamTracks. It consists of a set of
  * tracks of various types that can start and end over time.
  *
  * Streams are explicitly managed. The client creates them via
  * MediaStreamGraph::CreateInput/ProcessedMediaStream, and releases them by calling
@@ -286,20 +288,22 @@ public:
   // separate. Since the stream is always playing the same contents, only
   // a single audio output stream is used; the volumes are combined.
   // Currently only the first enabled audio track is played.
   // XXX change this so all enabled audio tracks are mixed and played.
   virtual void AddAudioOutput(void* aKey);
   virtual void SetAudioOutputVolume(void* aKey, float aVolume);
   virtual void RemoveAudioOutput(void* aKey);
   // Since a stream can be played multiple ways, we need to be able to
-  // play to multiple VideoFrameContainers.
+  // play to multiple MediaStreamVideoSinks.
   // Only the first enabled video track is played.
-  virtual void AddVideoOutput(VideoFrameContainer* aContainer);
-  virtual void RemoveVideoOutput(VideoFrameContainer* aContainer);
+  virtual void AddVideoOutput(MediaStreamVideoSink* aSink,
+                              TrackID aID = TRACK_ANY);
+  virtual void RemoveVideoOutput(MediaStreamVideoSink* aSink,
+                                 TrackID aID = TRACK_ANY);
   // Explicitly suspend. Useful for example if a media element is pausing
   // and we need to stop its stream emitting its buffered data. As soon as the
   // Suspend message reaches the graph, the stream stops processing. It
   // ignores its inputs and produces silence/no video until Resumed. Its
   // current time does not advance.
   virtual void Suspend();
   virtual void Resume();
   // Events will be dispatched by calling methods of aListener.
@@ -398,16 +402,17 @@ public:
 
   friend class MediaStreamGraphImpl;
   friend class MediaInputPort;
   friend class AudioNodeExternalInputStream;
 
   virtual SourceMediaStream* AsSourceStream() { return nullptr; }
   virtual ProcessedMediaStream* AsProcessedStream() { return nullptr; }
   virtual AudioNodeStream* AsAudioNodeStream() { return nullptr; }
+  virtual TrackUnionStream* AsTrackUnionStream() { return nullptr; }
 
   // These Impl methods perform the core functionality of the control methods
   // above, on the media graph thread.
   /**
    * Stop all stream activity and disconnect it from all inputs and outputs.
    * This must be idempotent.
    */
   virtual void DestroyImpl();
@@ -418,18 +423,19 @@ public:
   void SetAudioOutputVolumeImpl(void* aKey, float aVolume);
   void AddAudioOutputImpl(void* aKey);
   // Returns true if this stream has an audio output.
   bool HasAudioOutput()
   {
     return !mAudioOutputs.IsEmpty();
   }
   void RemoveAudioOutputImpl(void* aKey);
-  void AddVideoOutputImpl(already_AddRefed<VideoFrameContainer> aContainer);
-  void RemoveVideoOutputImpl(VideoFrameContainer* aContainer);
+  void AddVideoOutputImpl(already_AddRefed<MediaStreamVideoSink> aSink,
+                          TrackID aID);
+  void RemoveVideoOutputImpl(MediaStreamVideoSink* aSink, TrackID aID);
   void AddListenerImpl(already_AddRefed<MediaStreamListener> aListener);
   void RemoveListenerImpl(MediaStreamListener* aListener);
   void RemoveAllListenersImpl();
   virtual void AddTrackListenerImpl(already_AddRefed<MediaStreamTrackListener> aListener,
                                     TrackID aTrackID);
   virtual void RemoveTrackListenerImpl(MediaStreamTrackListener* aListener,
                                        TrackID aTrackID);
   virtual void AddDirectTrackListenerImpl(already_AddRefed<DirectMediaStreamTrackListener> aListener,
@@ -532,17 +538,19 @@ public:
   void IncrementSuspendCount() { ++mSuspendedCount; }
   void DecrementSuspendCount()
   {
     NS_ASSERTION(mSuspendedCount > 0, "Suspend count underrun");
     --mSuspendedCount;
   }
 
 protected:
-  void AdvanceTimeVaryingValuesToCurrentTime(GraphTime aCurrentTime, GraphTime aBlockedTime)
+  // |AdvanceTimeVaryingValuesToCurrentTime| will be override in SourceMediaStream.
+  virtual void AdvanceTimeVaryingValuesToCurrentTime(GraphTime aCurrentTime,
+                                                     GraphTime aBlockedTime)
   {
     mTracksStartTime += aBlockedTime;
     mTracks.ForgetUpTo(aCurrentTime - mTracksStartTime);
   }
 
   void NotifyMainThreadListeners()
   {
     NS_ASSERTION(NS_IsMainThread(), "Call only on main thread");
@@ -578,17 +586,17 @@ protected:
 
   // Client-set volume of this stream
   struct AudioOutput {
     explicit AudioOutput(void* aKey) : mKey(aKey), mVolume(1.0f) {}
     void* mKey;
     float mVolume;
   };
   nsTArray<AudioOutput> mAudioOutputs;
-  nsTArray<RefPtr<VideoFrameContainer>> mVideoOutputs;
+  nsTArray<TrackBound<MediaStreamVideoSink>> mVideoOutputs;
   // We record the last played video frame to avoid playing the frame again
   // with a different frame id.
   VideoFrame mLastPlayedVideoFrame;
   nsTArray<RefPtr<MediaStreamListener> > mListeners;
   nsTArray<TrackBound<MediaStreamTrackListener>> mTrackListeners;
   nsTArray<MainThreadMediaStreamListener*> mMainThreadListeners;
   nsTArray<TrackID> mDisabledTrackIDs;
 
@@ -794,16 +802,21 @@ public:
 
   /**
    * Returns true if this SourceMediaStream contains at least one audio track
    * that is in pending state.
    * This is thread safe, and takes the SourceMediaStream mutex.
    */
   bool HasPendingAudioTrack();
 
+  TimeStamp GetStreamTracksStrartTimeStamp() {
+    MutexAutoLock lock(mMutex);
+    return mStreamTracksStartTimeStamp;
+  }
+
   // XXX need a Reset API
 
   friend class MediaStreamGraphImpl;
 
 protected:
   enum TrackCommands : uint32_t;
 
   virtual ~SourceMediaStream();
@@ -858,27 +871,40 @@ protected:
    * Notify direct consumers of new data to one of the stream tracks.
    * The data doesn't have to be resampled (though it may be).  This is called
    * from AppendToTrack on the thread providing the data, and will call
    * the Listeners on this thread.
    */
   void NotifyDirectConsumers(TrackData *aTrack,
                              MediaSegment *aSegment);
 
+  virtual void
+  AdvanceTimeVaryingValuesToCurrentTime(GraphTime aCurrentTime,
+                                        GraphTime aBlockedTime) override;
+  void SetStreamTracksStartTimeStamp(const TimeStamp& aTimeStamp)
+  {
+    MutexAutoLock lock(mMutex);
+    mStreamTracksStartTimeStamp = aTimeStamp;
+  }
+
   // Only accessed on the MSG thread.  Used so to ask the MSGImpl to usecount
   // users of a specific input.
   // XXX Should really be a CubebUtils::AudioDeviceID, but they aren't
   // copyable (opaque pointers)
   RefPtr<AudioDataListener> mInputListener;
 
   // This must be acquired *before* MediaStreamGraphImpl's lock, if they are
   // held together.
   Mutex mMutex;
   // protected by mMutex
   StreamTime mUpdateKnownTracksTime;
+  // This time stamp will be updated in adding and blocked SourceMediaStream,
+  // |AddStreamGraphThread| and |AdvanceTimeVaryingValuesToCurrentTime| in
+  // particularly.
+  TimeStamp mStreamTracksStartTimeStamp;
   nsTArray<TrackData> mUpdateTracks;
   nsTArray<TrackData> mPendingTracks;
   nsTArray<RefPtr<DirectMediaStreamListener>> mDirectListeners;
   nsTArray<TrackBound<DirectMediaStreamTrackListener>> mDirectTrackListeners;
   bool mPullEnabled;
   bool mUpdateFinished;
   bool mNeedsMixing;
 };
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -407,20 +407,16 @@ public:
    */
   void CreateOrDestroyAudioStreams(MediaStream* aStream);
   /**
    * Queue audio (mix of stream audio and silence for blocked intervals)
    * to the audio output stream. Returns the number of frames played.
    */
   StreamTime PlayAudio(MediaStream* aStream);
   /**
-   * Set the correct current video frame for stream aStream.
-   */
-  void PlayVideo(MediaStream* aStream);
-  /**
    * No more data will be forthcoming for aStream. The stream will end
    * at the current buffer end point. The StreamTracks's tracks must be
    * explicitly set to finished by the caller.
    */
   void OpenAudioInputImpl(int aID,
                           AudioDataListener *aListener);
   virtual nsresult OpenAudioInput(int aID,
                                   AudioDataListener *aListener) override;
--- a/dom/media/MediaStreamListener.h
+++ b/dom/media/MediaStreamListener.h
@@ -2,20 +2,25 @@
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_MEDIASTREAMLISTENER_h_
 #define MOZILLA_MEDIASTREAMLISTENER_h_
 
+#include "StreamTracks.h"
+
 namespace mozilla {
 
+class AudioSegment;
 class MediaStream;
 class MediaStreamGraph;
+class MediaStreamVideoSink;
+class VideoSegment;
 
 enum MediaStreamGraphEvent : uint32_t {
   EVENT_FINISHED,
   EVENT_REMOVED,
   EVENT_HAS_DIRECT_LISTENERS, // transition from no direct listeners
   EVENT_HAS_NO_DIRECT_LISTENERS,  // transition to no direct listeners
 };
 
@@ -236,29 +241,35 @@ public:
    * successful or not. The results of this installation are the following:
    * TRACK_NOT_FOUND_AT_SOURCE
    *    We found the source stream of media data for this track, but the track
    *    didn't exist. This should only happen if you try to install the listener
    *    directly to a SourceMediaStream that doesn't contain the given TrackID.
    * STREAM_NOT_SUPPORTED
    *    While looking for the data source of this track, we found a MediaStream
    *    that is not a SourceMediaStream or a TrackUnionStream.
+   * ALREADY_EXIST
+   *    This DirectMediaStreamTrackListener already exists in the
+   *    SourceMediaStream.
    * SUCCESS
    *    Installation was successful and this listener will start receiving
    *    NotifyRealtimeData on the next AppendToTrack().
    */
   enum class InstallationResult {
     TRACK_NOT_FOUND_AT_SOURCE,
     TRACK_TYPE_NOT_SUPPORTED,
     STREAM_NOT_SUPPORTED,
+    ALREADY_EXISTS,
     SUCCESS
   };
   virtual void NotifyDirectListenerInstalled(InstallationResult aResult) {}
   virtual void NotifyDirectListenerUninstalled() {}
 
+  virtual MediaStreamVideoSink* AsMediaStreamVideoSink() { return nullptr; }
+
 protected:
   virtual ~DirectMediaStreamTrackListener() {}
 
   void MirrorAndDisableSegment(AudioSegment& aFrom, AudioSegment& aTo);
   void MirrorAndDisableSegment(VideoSegment& aFrom, VideoSegment& aTo);
   void NotifyRealtimeTrackDataAndApplyTrackDisabling(MediaStreamGraph* aGraph,
                                                      StreamTime aTrackOffset,
                                                      MediaSegment& aMedia);
new file mode 100644
--- /dev/null
+++ b/dom/media/MediaStreamVideoSink.cpp
@@ -0,0 +1,21 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaStreamVideoSink.h"
+
+#include "VideoSegment.h"
+
+namespace mozilla {
+void
+MediaStreamVideoSink::NotifyRealtimeTrackData(MediaStreamGraph* aGraph,
+                                              StreamTime aTrackOffset,
+                                              const MediaSegment& aMedia)
+{
+  if (aMedia.GetType() == MediaSegment::VIDEO) {
+    SetCurrentFrames(static_cast<const VideoSegment&>(aMedia));
+  }
+}
+
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/dom/media/MediaStreamVideoSink.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MEDIASTREAMVIDEOSINK_H_
+#define MEDIASTREAMVIDEOSINK_H_
+
+#include "mozilla/Pair.h"
+
+#include "gfxPoint.h"
+#include "MediaStreamListener.h"
+
+namespace mozilla {
+
+class VideoFrameContainer;
+
+/**
+ * Base class of MediaStreamVideoSink family. This is the output of MediaStream.
+ */
+class MediaStreamVideoSink : public DirectMediaStreamTrackListener {
+public:
+  // Method of DirectMediaStreamTrackListener.
+  void NotifyRealtimeTrackData(MediaStreamGraph* aGraph,
+                               StreamTime aTrackOffset,
+                               const MediaSegment& aMedia) override;
+
+  // Call on any thread
+  virtual void SetCurrentFrames(const VideoSegment& aSegment) = 0;
+  virtual void ClearFrames() = 0;
+
+  virtual VideoFrameContainer* AsVideoFrameContainer() { return nullptr; }
+  virtual void Invalidate() {}
+
+  virtual MediaStreamVideoSink* AsMediaStreamVideoSink() override { return this; }
+
+protected:
+  virtual ~MediaStreamVideoSink() {};
+};
+
+} // namespace mozilla
+
+#endif /* MEDIASTREAMVIDEOSINK_H_ */
--- a/dom/media/TrackUnionStream.cpp
+++ b/dom/media/TrackUnionStream.cpp
@@ -327,22 +327,16 @@ TrackUnionStream::TrackUnionStream() :
         MediaStreamListener* l = mListeners[j];
         // Separate Audio and Video.
         if (segment->GetType() == MediaSegment::AUDIO) {
           l->NotifyQueuedAudioData(Graph(), outputTrack->GetID(),
                                    outputStart,
                                    *static_cast<AudioSegment*>(segment),
                                    map->mInputPort->GetSource(),
                                    map->mInputTrackID);
-        } else {
-          // This part will be removed in bug 1201363.
-          l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(),
-                                      outputStart, TrackEventCommand::TRACK_EVENT_NONE, *segment,
-                                      map->mInputPort->GetSource(),
-                                      map->mInputTrackID);
         }
       }
       for (TrackBound<MediaStreamTrackListener>& b : mTrackListeners) {
         if (b.mTrackID != outputTrack->GetID()) {
           continue;
         }
         b.mListener->NotifyQueuedChanges(Graph(), outputStart, *segment);
       }
--- a/dom/media/TrackUnionStream.h
+++ b/dom/media/TrackUnionStream.h
@@ -14,24 +14,29 @@ namespace mozilla {
 
 /**
  * See MediaStreamGraph::CreateTrackUnionStream.
  */
 class TrackUnionStream : public ProcessedMediaStream {
 public:
   explicit TrackUnionStream();
 
+  virtual TrackUnionStream* AsTrackUnionStream() override { return this; }
+  friend class DOMMediaStream;
+
   void RemoveInput(MediaInputPort* aPort) override;
   void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
 
   void SetTrackEnabledImpl(TrackID aTrackID, bool aEnabled) override;
 
   MediaStream* GetInputStreamFor(TrackID aTrackID) override;
   TrackID GetInputTrackIDFor(TrackID aTrackID) override;
 
+  friend class MediaStreamGraphImpl;
+
 protected:
   // Only non-ended tracks are allowed to persist in this map.
   struct TrackMapEntry {
     // mEndOfConsumedInputTicks is the end of the input ticks that we've consumed.
     // 0 if we haven't consumed any yet.
     StreamTime mEndOfConsumedInputTicks;
     // mEndOfLastInputIntervalInInputStream is the timestamp for the end of the
     // previous interval which was unblocked for both the input and output
--- a/dom/media/VideoFrameContainer.cpp
+++ b/dom/media/VideoFrameContainer.cpp
@@ -9,49 +9,186 @@
 #include "mozilla/dom/HTMLMediaElement.h"
 #include "nsIFrame.h"
 #include "nsDisplayList.h"
 #include "nsSVGEffects.h"
 
 using namespace mozilla::layers;
 
 namespace mozilla {
+PRLogModuleInfo* gVideoFrameContainerLog;
+#define CONTAINER_LOG(type, msg) MOZ_LOG(gVideoFrameContainerLog, type, msg)
 
 VideoFrameContainer::VideoFrameContainer(dom::HTMLMediaElement* aElement,
                                          already_AddRefed<ImageContainer> aContainer)
   : mElement(aElement),
     mImageContainer(aContainer), mMutex("nsVideoFrameContainer"),
+    mBlackImage(nullptr),
     mFrameID(0),
     mIntrinsicSizeChanged(false), mImageSizeChanged(false),
     mPendingPrincipalHandle(PRINCIPAL_HANDLE_NONE), mFrameIDForPendingPrincipalHandle(0)
 {
   NS_ASSERTION(aElement, "aElement must not be null");
   NS_ASSERTION(mImageContainer, "aContainer must not be null");
+  if (!gVideoFrameContainerLog) {
+    gVideoFrameContainerLog = PR_NewLogModule("VideoFrameContainer");
+  }
 }
 
 VideoFrameContainer::~VideoFrameContainer()
 {}
 
 PrincipalHandle VideoFrameContainer::GetLastPrincipalHandle()
 {
   MutexAutoLock lock(mMutex);
+  return GetLastPrincipalHandleLocked();
+}
+
+PrincipalHandle VideoFrameContainer::GetLastPrincipalHandleLocked()
+{
   return mLastPrincipalHandle;
 }
 
 void VideoFrameContainer::UpdatePrincipalHandleForFrameID(const PrincipalHandle& aPrincipalHandle,
                                                           const ImageContainer::FrameID& aFrameID)
 {
   MutexAutoLock lock(mMutex);
+  UpdatePrincipalHandleForFrameIDLocked(aPrincipalHandle, aFrameID);
+}
+
+void VideoFrameContainer::UpdatePrincipalHandleForFrameIDLocked(const PrincipalHandle& aPrincipalHandle,
+                                                                const ImageContainer::FrameID& aFrameID)
+{
   if (mPendingPrincipalHandle == aPrincipalHandle) {
     return;
   }
   mPendingPrincipalHandle = aPrincipalHandle;
   mFrameIDForPendingPrincipalHandle = aFrameID;
 }
 
+static void
+SetImageToBlackPixel(PlanarYCbCrImage* aImage)
+{
+  uint8_t blackPixel[] = { 0x10, 0x80, 0x80 };
+
+  PlanarYCbCrData data;
+  data.mYChannel = blackPixel;
+  data.mCbChannel = blackPixel + 1;
+  data.mCrChannel = blackPixel + 2;
+  data.mYStride = data.mCbCrStride = 1;
+  data.mPicSize = data.mYSize = data.mCbCrSize = gfx::IntSize(1, 1);
+  aImage->CopyData(data);
+}
+
+class VideoFrameContainerInvalidateRunnable : public Runnable {
+public:
+  explicit VideoFrameContainerInvalidateRunnable(VideoFrameContainer* aVideoFrameContainer)
+    : mVideoFrameContainer(aVideoFrameContainer)
+  {}
+  NS_IMETHOD Run()
+  {
+    MOZ_ASSERT(NS_IsMainThread());
+
+    mVideoFrameContainer->Invalidate();
+
+    return NS_OK;
+  }
+private:
+  RefPtr<VideoFrameContainer> mVideoFrameContainer;
+};
+
+void VideoFrameContainer::SetCurrentFrames(const VideoSegment& aSegment)
+{
+  if (aSegment.IsEmpty()) {
+    return;
+  }
+
+  MutexAutoLock lock(mMutex);
+
+  // Collect any new frames produced in this iteration.
+  AutoTArray<ImageContainer::NonOwningImage,4> newImages;
+  PrincipalHandle lastPrincipalHandle = PRINCIPAL_HANDLE_NONE;
+
+  VideoSegment::ConstChunkIterator iter(aSegment);
+  while (!iter.IsEnded()) {
+    VideoChunk chunk = *iter;
+
+    const VideoFrame* frame = &chunk.mFrame;
+    if (*frame == mLastPlayedVideoFrame) {
+      iter.Next();
+      continue;
+    }
+
+    Image* image = frame->GetImage();
+    CONTAINER_LOG(LogLevel::Verbose,
+                  ("VideoFrameContainer %p writing video frame %p (%d x %d)",
+                  this, image, frame->GetIntrinsicSize().width,
+                  frame->GetIntrinsicSize().height));
+
+    if (frame->GetForceBlack()) {
+      if (!mBlackImage) {
+        mBlackImage = GetImageContainer()->CreatePlanarYCbCrImage();
+        if (mBlackImage) {
+          // Sets the image to a single black pixel, which will be scaled to
+          // fill the rendered size.
+          SetImageToBlackPixel(mBlackImage->AsPlanarYCbCrImage());
+        }
+      }
+      if (mBlackImage) {
+        image = mBlackImage;
+      }
+    }
+    // Don't append null image to the newImages.
+    if (!image) {
+      iter.Next();
+      continue;
+    }
+    newImages.AppendElement(ImageContainer::NonOwningImage(image, chunk.mTimeStamp));
+
+    lastPrincipalHandle = chunk.GetPrincipalHandle();
+
+    mLastPlayedVideoFrame = *frame;
+    iter.Next();
+  }
+
+  // Don't update if there are no changes.
+  if (newImages.IsEmpty()) {
+    return;
+  }
+
+  AutoTArray<ImageContainer::NonOwningImage,4> images;
+
+  bool principalHandleChanged =
+     lastPrincipalHandle != PRINCIPAL_HANDLE_NONE &&
+     lastPrincipalHandle != GetLastPrincipalHandleLocked();
+
+  // Add the frames from this iteration.
+  for (auto& image : newImages) {
+    image.mFrameID = NewFrameID();
+    images.AppendElement(image);
+  }
+
+  if (principalHandleChanged) {
+    UpdatePrincipalHandleForFrameIDLocked(lastPrincipalHandle,
+                                          newImages.LastElement().mFrameID);
+  }
+
+  SetCurrentFramesLocked(mLastPlayedVideoFrame.GetIntrinsicSize(), images);
+  nsCOMPtr<nsIRunnable> event =
+    new VideoFrameContainerInvalidateRunnable(this);
+  NS_DispatchToMainThread(event.forget());
+
+  images.ClearAndRetainStorage();
+}
+
+void VideoFrameContainer::ClearFrames()
+{
+  ClearFutureFrames();
+}
+
 void VideoFrameContainer::SetCurrentFrame(const gfx::IntSize& aIntrinsicSize,
                                           Image* aImage,
                                           const TimeStamp& aTargetTime)
 {
   if (aImage) {
     MutexAutoLock lock(mMutex);
     AutoTArray<ImageContainer::NonOwningImage,1> imageList;
     imageList.AppendElement(
--- a/dom/media/VideoFrameContainer.h
+++ b/dom/media/VideoFrameContainer.h
@@ -8,60 +8,66 @@
 #define VIDEOFRAMECONTAINER_H_
 
 #include "mozilla/Mutex.h"
 #include "mozilla/TimeStamp.h"
 #include "gfxPoint.h"
 #include "nsCOMPtr.h"
 #include "ImageContainer.h"
 #include "MediaSegment.h"
+#include "MediaStreamVideoSink.h"
+#include "VideoSegment.h"
 
 namespace mozilla {
 
 namespace dom {
 class HTMLMediaElement;
 } // namespace dom
 
 /**
  * This object is used in the decoder backend threads and the main thread
  * to manage the "current video frame" state. This state includes timing data
  * and an intrinsic size (see below).
  * This has to be a thread-safe object since it's accessed by resource decoders
  * and other off-main-thread components. So we can't put this state in the media
  * element itself ... well, maybe we could, but it could be risky and/or
  * confusing.
  */
-class VideoFrameContainer {
-  ~VideoFrameContainer();
+class VideoFrameContainer : public MediaStreamVideoSink {
+  virtual ~VideoFrameContainer();
 
 public:
   typedef layers::ImageContainer ImageContainer;
   typedef layers::Image Image;
 
-  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoFrameContainer)
-
   VideoFrameContainer(dom::HTMLMediaElement* aElement,
                       already_AddRefed<ImageContainer> aContainer);
 
   // Call on any thread
+  virtual void SetCurrentFrames(const VideoSegment& aSegment) override;
+  virtual void ClearFrames() override;
+  void SetCurrentFrame(const gfx::IntSize& aIntrinsicSize, Image* aImage,
+                       const TimeStamp& aTargetTime);
   // Returns the last principalHandle we notified mElement about.
   PrincipalHandle GetLastPrincipalHandle();
+  PrincipalHandle GetLastPrincipalHandleLocked();
   // We will notify mElement that aPrincipalHandle has been applied when all
   // FrameIDs prior to aFrameID have been flushed out.
   // aFrameID is ignored if aPrincipalHandle already is our pending principalHandle.
   void UpdatePrincipalHandleForFrameID(const PrincipalHandle& aPrincipalHandle,
                                        const ImageContainer::FrameID& aFrameID);
-  void SetCurrentFrame(const gfx::IntSize& aIntrinsicSize, Image* aImage,
-                       const TimeStamp& aTargetTime);
+  void UpdatePrincipalHandleForFrameIDLocked(const PrincipalHandle& aPrincipalHandle,
+                                             const ImageContainer::FrameID& aFrameID);
   void SetCurrentFrames(const gfx::IntSize& aIntrinsicSize,
                         const nsTArray<ImageContainer::NonOwningImage>& aImages);
   void ClearCurrentFrame(const gfx::IntSize& aIntrinsicSize)
   {
     SetCurrentFrames(aIntrinsicSize, nsTArray<ImageContainer::NonOwningImage>());
   }
+  VideoFrameContainer* AsVideoFrameContainer() override { return this; }
 
   void ClearCurrentFrame();
   // Make the current frame the only frame in the container, i.e. discard
   // all future frames.
   void ClearFutureFrames();
   // Time in seconds by which the last painted video frame was late by.
   // E.g. if the last painted frame should have been painted at time t,
   // but was actually painted at t+n, this returns n in seconds. Threadsafe.
@@ -75,17 +81,17 @@ public:
     return ++mFrameID;
   }
 
   // Call on main thread
   enum {
     INVALIDATE_DEFAULT,
     INVALIDATE_FORCE
   };
-  void Invalidate() { InvalidateWithFlags(INVALIDATE_DEFAULT); }
+  void Invalidate() override { InvalidateWithFlags(INVALIDATE_DEFAULT); }
   void InvalidateWithFlags(uint32_t aFlags);
   ImageContainer* GetImageContainer();
   void ForgetElement() { mElement = nullptr; }
 
   uint32_t GetDroppedImageCount() { return mImageContainer->GetDroppedImageCount(); }
 
 protected:
   void SetCurrentFramesLocked(const gfx::IntSize& aIntrinsicSize,
@@ -93,25 +99,31 @@ protected:
 
   // Non-addreffed pointer to the element. The element calls ForgetElement
   // to clear this reference when the element is destroyed.
   dom::HTMLMediaElement* mElement;
   RefPtr<ImageContainer> mImageContainer;
 
   // mMutex protects all the fields below.
   Mutex mMutex;
+  // Once the frame is forced to black, we initialize mBlackImage for following
+  // frames.
+  RefPtr<Image> mBlackImage;
   // The intrinsic size is the ideal size which we should render the
   // ImageContainer's current Image at.
   // This can differ from the Image's actual size when the media resource
   // specifies that the Image should be stretched to have the correct aspect
   // ratio.
   gfx::IntSize mIntrinsicSize;
   // We maintain our own mFrameID which is auto-incremented at every
   // SetCurrentFrame() or NewFrameID() call.
   ImageContainer::FrameID mFrameID;
+  // We record the last played video frame to avoid playing the frame again
+  // with a different frame id.
+  VideoFrame mLastPlayedVideoFrame;
   // True when the intrinsic size has been changed by SetCurrentFrame() since
   // the last call to Invalidate().
   // The next call to Invalidate() will recalculate
   // and update the intrinsic size on the element, request a frame reflow and
   // then reset this flag.
   bool mIntrinsicSizeChanged;
   // True when the Image size has changed since the last time Invalidate() was
   // called. When set, the next call to Invalidate() will ensure that the
--- a/dom/media/VideoSegment.cpp
+++ b/dom/media/VideoSegment.cpp
@@ -95,19 +95,21 @@ VideoChunk::VideoChunk()
 VideoChunk::~VideoChunk()
 {}
 
 void
 VideoSegment::AppendFrame(already_AddRefed<Image>&& aImage,
                           StreamTime aDuration,
                           const IntSize& aIntrinsicSize,
                           const PrincipalHandle& aPrincipalHandle,
-                          bool aForceBlack)
+                          bool aForceBlack,
+                          TimeStamp aTimeStamp)
 {
   VideoChunk* chunk = AppendChunk(aDuration);
+  chunk->mTimeStamp = aTimeStamp;
   VideoFrame frame(aImage, aIntrinsicSize);
   frame.SetForceBlack(aForceBlack);
   frame.SetPrincipalHandle(aPrincipalHandle);
   chunk->mFrame.TakeFrom(&frame);
 }
 
 VideoSegment::VideoSegment()
   : MediaSegmentBase<VideoSegment, VideoChunk>(VIDEO)
--- a/dom/media/VideoSegment.h
+++ b/dom/media/VideoSegment.h
@@ -104,17 +104,18 @@ public:
 
   VideoSegment();
   ~VideoSegment();
 
   void AppendFrame(already_AddRefed<Image>&& aImage,
                    StreamTime aDuration,
                    const IntSize& aIntrinsicSize,
                    const PrincipalHandle& aPrincipalHandle,
-                   bool aForceBlack = false);
+                   bool aForceBlack = false,
+                   TimeStamp aTimeStamp = TimeStamp::Now());
   const VideoFrame* GetLastFrame(StreamTime* aStart = nullptr)
   {
     VideoChunk* c = GetLastChunk();
     if (!c) {
       return nullptr;
     }
     if (aStart) {
       *aStart = mDuration - c->mDuration;
@@ -132,13 +133,19 @@ public:
 
   // Segment-generic methods not in MediaSegmentBase
   static Type StaticType() { return VIDEO; }
 
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
+
+  bool IsEmpty() const
+  {
+    return mChunks.IsEmpty();
+  }
+
 };
 
 } // namespace mozilla
 
 #endif /* MOZILLA_VIDEOSEGMENT_H_ */
--- a/dom/media/VideoStreamTrack.cpp
+++ b/dom/media/VideoStreamTrack.cpp
@@ -1,20 +1,35 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "VideoStreamTrack.h"
 
+#include "MediaStreamVideoSink.h"
+#include "MediaStreamGraph.h"
+
 #include "mozilla/dom/VideoStreamTrackBinding.h"
 
 namespace mozilla {
 namespace dom {
 
 JSObject*
 VideoStreamTrack::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
 {
   return VideoStreamTrackBinding::Wrap(aCx, this, aGivenProto);
 }
 
+void
+VideoStreamTrack::AddVideoOutput(MediaStreamVideoSink* aSink)
+{
+  GetOwnedStream()->AddVideoOutput(aSink, mTrackID);
+}
+
+void
+VideoStreamTrack::RemoveVideoOutput(MediaStreamVideoSink* aSink)
+{
+  GetOwnedStream()->RemoveVideoOutput(aSink, mTrackID);
+}
+
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/VideoStreamTrack.h
+++ b/dom/media/VideoStreamTrack.h
@@ -5,32 +5,38 @@
 
 #ifndef VIDEOSTREAMTRACK_H_
 #define VIDEOSTREAMTRACK_H_
 
 #include "MediaStreamTrack.h"
 #include "DOMMediaStream.h"
 
 namespace mozilla {
+
+class MediaStreamVideoSink;
+
 namespace dom {
 
 class VideoStreamTrack : public MediaStreamTrack {
 public:
   VideoStreamTrack(DOMMediaStream* aStream, TrackID aTrackID,
                    TrackID aInputTrackID,
                    MediaStreamTrackSource* aSource,
                    const MediaTrackConstraints& aConstraints = MediaTrackConstraints())
     : MediaStreamTrack(aStream, aTrackID, aInputTrackID, aSource, aConstraints) {}
 
   JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
 
   VideoStreamTrack* AsVideoStreamTrack() override { return this; }
 
   const VideoStreamTrack* AsVideoStreamTrack() const override { return this; }
 
+  void AddVideoOutput(MediaStreamVideoSink* aSink);
+  void RemoveVideoOutput(MediaStreamVideoSink* aSink);
+
   // WebIDL
   void GetKind(nsAString& aKind) override { aKind.AssignLiteral("video"); }
 
 protected:
   already_AddRefed<MediaStreamTrack> CloneInternal(DOMMediaStream* aOwningStream,
                                                    TrackID aTrackID) override
   {
     return do_AddRef(new VideoStreamTrack(aOwningStream,
--- a/dom/media/VideoTrackList.cpp
+++ b/dom/media/VideoTrackList.cpp
@@ -60,16 +60,25 @@ VideoTrackList::RemoveTrack(const RefPtr
 
 void
 VideoTrackList::EmptyTracks()
 {
   mSelectedIndex = -1;
   MediaTrackList::EmptyTracks();
 }
 
+VideoTrack* VideoTrackList::GetSelectedTrack()
+{
+  if (mSelectedIndex < 0 || static_cast<size_t>(mSelectedIndex) >= mTracks.Length()) {
+    return nullptr;
+  }
+
+  return operator[](mSelectedIndex);
+}
+
 VideoTrack*
 VideoTrackList::IndexedGetter(uint32_t aIndex, bool& aFound)
 {
   MediaTrack* track = MediaTrackList::IndexedGetter(aIndex, aFound);
   return track ? track->AsVideoTrack() : nullptr;
 }
 
 VideoTrack*
--- a/dom/media/VideoTrackList.h
+++ b/dom/media/VideoTrackList.h
@@ -27,16 +27,18 @@ public:
   JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
 
   VideoTrack* operator[](uint32_t aIndex);
 
   void RemoveTrack(const RefPtr<MediaTrack>& aTrack) override;
 
   void EmptyTracks() override;
 
+  VideoTrack* GetSelectedTrack();
+
   // WebIDL
   int32_t SelectedIndex() const
   {
     return mSelectedIndex;
   }
 
   VideoTrack* IndexedGetter(uint32_t aIndex, bool& aFound);
 
--- a/dom/media/android/AndroidMediaReader.cpp
+++ b/dom/media/android/AndroidMediaReader.cpp
@@ -9,16 +9,17 @@
 #include "MediaResource.h"
 #include "VideoUtils.h"
 #include "AndroidMediaDecoder.h"
 #include "AndroidMediaPluginHost.h"
 #include "MediaDecoderStateMachine.h"
 #include "ImageContainer.h"
 #include "AbstractMediaDecoder.h"
 #include "gfx2DGlue.h"
+#include "VideoFrameContainer.h"
 
 namespace mozilla {
 
 using namespace mozilla::gfx;
 using namespace mozilla::media;
 
 typedef mozilla::layers::Image Image;
 typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
@@ -180,17 +181,16 @@ bool AndroidMediaReader::DecodeVideoFram
         // was reported relative to the picture size reported by the container.
         picture.x = (mPicture.x * frameSize.width) / mInitialFrame.width;
         picture.y = (mPicture.y * frameSize.height) / mInitialFrame.height;
         picture.width = (frameSize.width * mPicture.width) / mInitialFrame.width;
         picture.height = (frameSize.height * mPicture.height) / mInitialFrame.height;
       }
 
       v = VideoData::CreateFromImage(mInfo.mVideo,
-                                     mDecoder->GetImageContainer(),
                                      pos,
                                      frame.mTimeUs,
                                      1, // We don't know the duration yet.
                                      currentImage,
                                      frame.mKeyFrame,
                                      -1,
                                      picture);
     } else {
@@ -225,25 +225,25 @@ bool AndroidMediaReader::DecodeVideoFram
         // was reported relative to the picture size reported by the container.
         picture.x = (mPicture.x * frame.Y.mWidth) / mInitialFrame.width;
         picture.y = (mPicture.y * frame.Y.mHeight) / mInitialFrame.height;
         picture.width = (frame.Y.mWidth * mPicture.width) / mInitialFrame.width;
         picture.height = (frame.Y.mHeight * mPicture.height) / mInitialFrame.height;
       }
 
       // This is the approximate byte position in the stream.
-      v = VideoData::Create(mInfo.mVideo,
-                            mDecoder->GetImageContainer(),
-                            pos,
-                            frame.mTimeUs,
-                            1, // We don't know the duration yet.
-                            b,
-                            frame.mKeyFrame,
-                            -1,
-                            picture);
+      v = VideoData::CreateAndCopyData(mInfo.mVideo,
+                                       mDecoder->GetImageContainer(),
+                                       pos,
+                                       frame.mTimeUs,
+                                       1, // We don't know the duration yet.
+                                       b,
+                                       frame.mKeyFrame,
+                                       -1,
+                                       picture);
     }
 
     if (!v) {
       return false;
     }
     a.mStats.mParsedFrames++;
     a.mStats.mDecodedFrames++;
     NS_ASSERTION(a.mStats.mDecodedFrames <= a.mStats.mParsedFrames, "Expect to decode fewer frames than parsed in AndroidMedia...");
--- a/dom/media/encoder/MediaEncoder.cpp
+++ b/dom/media/encoder/MediaEncoder.cpp
@@ -29,16 +29,23 @@
 #endif
 
 mozilla::LazyLogModule gMediaEncoderLog("MediaEncoder");
 #define LOG(type, msg) MOZ_LOG(gMediaEncoderLog, type, msg)
 
 namespace mozilla {
 
 void
+MediaStreamVideoRecorderSink::SetCurrentFrames(const VideoSegment& aSegment)
+{
+  MOZ_ASSERT(mVideoEncoder);
+  mVideoEncoder->SetCurrentFrames(aSegment);
+}
+
+void
 MediaEncoder::SetDirectConnect(bool aConnected)
 {
   mDirectConnected = aConnected;
 }
 
 void
 MediaEncoder::NotifyRealtimeData(MediaStreamGraph* aGraph,
                                  TrackID aID,
@@ -48,18 +55,19 @@ MediaEncoder::NotifyRealtimeData(MediaSt
 {
   if (mSuspended == RECORD_NOT_SUSPENDED) {
     // Process the incoming raw track data from MediaStreamGraph, called on the
     // thread of MediaStreamGraph.
     if (mAudioEncoder && aRealtimeMedia.GetType() == MediaSegment::AUDIO) {
       mAudioEncoder->NotifyQueuedTrackChanges(aGraph, aID,
                                               aTrackOffset, aTrackEvents,
                                               aRealtimeMedia);
-
-    } else if (mVideoEncoder && aRealtimeMedia.GetType() == MediaSegment::VIDEO) {
+    } else if (mVideoEncoder &&
+               aRealtimeMedia.GetType() == MediaSegment::VIDEO &&
+               aTrackEvents != TrackEventCommand::TRACK_EVENT_NONE) {
       mVideoEncoder->NotifyQueuedTrackChanges(aGraph, aID,
                                               aTrackOffset, aTrackEvents,
                                               aRealtimeMedia);
     }
   }
 }
 
 void
@@ -136,17 +144,18 @@ MediaEncoder::NotifyEvent(MediaStreamGra
     mVideoEncoder->NotifyEvent(aGraph, event);
   }
 }
 
 /* static */
 already_AddRefed<MediaEncoder>
 MediaEncoder::CreateEncoder(const nsAString& aMIMEType, uint32_t aAudioBitrate,
                             uint32_t aVideoBitrate, uint32_t aBitrate,
-                            uint8_t aTrackTypes)
+                            uint8_t aTrackTypes,
+                            TrackRate aTrackRate)
 {
   PROFILER_LABEL("MediaEncoder", "CreateEncoder",
     js::ProfileEntry::Category::OTHER);
 
   nsAutoPtr<ContainerWriter> writer;
   nsAutoPtr<AudioTrackEncoder> audioEncoder;
   nsAutoPtr<VideoTrackEncoder> videoEncoder;
   RefPtr<MediaEncoder> encoder;
@@ -159,32 +168,32 @@ MediaEncoder::CreateEncoder(const nsAStr
   else if (MediaEncoder::IsWebMEncoderEnabled() &&
           (aMIMEType.EqualsLiteral(VIDEO_WEBM) ||
           (aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK))) {
     if (aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK
         && MediaDecoder::IsOpusEnabled()) {
       audioEncoder = new OpusTrackEncoder();
       NS_ENSURE_TRUE(audioEncoder, nullptr);
     }
-    videoEncoder = new VP8TrackEncoder();
+    videoEncoder = new VP8TrackEncoder(aTrackRate);
     writer = new WebMWriter(aTrackTypes);
     NS_ENSURE_TRUE(writer, nullptr);
     NS_ENSURE_TRUE(videoEncoder, nullptr);
     mimeType = NS_LITERAL_STRING(VIDEO_WEBM);
   }
 #endif //MOZ_WEBM_ENCODER
 #ifdef MOZ_OMX_ENCODER
   else if (MediaEncoder::IsOMXEncoderEnabled() &&
           (aMIMEType.EqualsLiteral(VIDEO_MP4) ||
           (aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK))) {
     if (aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK) {
       audioEncoder = new OmxAACAudioTrackEncoder();
       NS_ENSURE_TRUE(audioEncoder, nullptr);
     }
-    videoEncoder = new OmxVideoTrackEncoder();
+    videoEncoder = new OmxVideoTrackEncoder(aTrackRate);
     writer = new ISOMediaWriter(aTrackTypes);
     NS_ENSURE_TRUE(writer, nullptr);
     NS_ENSURE_TRUE(videoEncoder, nullptr);
     mimeType = NS_LITERAL_STRING(VIDEO_MP4);
   } else if (MediaEncoder::IsOMXEncoderEnabled() &&
             (aMIMEType.EqualsLiteral(AUDIO_3GPP))) {
     audioEncoder = new OmxAMRAudioTrackEncoder();
     NS_ENSURE_TRUE(audioEncoder, nullptr);
--- a/dom/media/encoder/MediaEncoder.h
+++ b/dom/media/encoder/MediaEncoder.h
@@ -4,25 +4,42 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MediaEncoder_h_
 #define MediaEncoder_h_
 
 #include "mozilla/DebugOnly.h"
 #include "TrackEncoder.h"
 #include "ContainerWriter.h"
+#include "CubebUtils.h"
 #include "MediaStreamGraph.h"
 #include "MediaStreamListener.h"
 #include "nsAutoPtr.h"
+#include "MediaStreamVideoSink.h"
 #include "nsIMemoryReporter.h"
 #include "mozilla/MemoryReporting.h"
 #include "mozilla/Atomics.h"
 
 namespace mozilla {
 
+class MediaStreamVideoRecorderSink : public MediaStreamVideoSink
+{
+public:
+  explicit MediaStreamVideoRecorderSink(VideoTrackEncoder* aEncoder)
+    : mVideoEncoder(aEncoder) {}
+
+  // MediaStreamVideoSink methods
+  virtual void SetCurrentFrames(const VideoSegment& aSegment) override;
+  virtual void ClearFrames() override {}
+
+private:
+  virtual ~MediaStreamVideoRecorderSink() {}
+  VideoTrackEncoder* mVideoEncoder;
+};
+
 /**
  * MediaEncoder is the framework of encoding module, it controls and manages
  * procedures between ContainerWriter and TrackEncoder. ContainerWriter packs
  * the encoded track data with a specific container (e.g. ogg, mp4).
  * AudioTrackEncoder and VideoTrackEncoder are subclasses of TrackEncoder, and
  * are responsible for encoding raw data coming from MediaStreamGraph.
  *
  * Also, MediaEncoder is a type of MediaStreamListener, it starts to receive raw
@@ -49,16 +66,17 @@ namespace mozilla {
  * 3) To start encoding, add this component to its source stream.
  *    => sourceStream->AddListener(encoder);
  *
  * 4) To stop encoding, remove this component from its source stream.
  *    => sourceStream->RemoveListener(encoder);
  */
 class MediaEncoder : public DirectMediaStreamListener
 {
+  friend class MediaStreamVideoRecorderSink;
 public :
   enum {
     ENCODE_METADDATA,
     ENCODE_TRACK,
     ENCODE_DONE,
     ENCODE_ERROR,
   };
 
@@ -67,16 +85,17 @@ public :
                VideoTrackEncoder* aVideoEncoder,
                const nsAString& aMIMEType,
                uint32_t aAudioBitrate,
                uint32_t aVideoBitrate,
                uint32_t aBitrate)
     : mWriter(aWriter)
     , mAudioEncoder(aAudioEncoder)
     , mVideoEncoder(aVideoEncoder)
+    , mVideoSink(new MediaStreamVideoRecorderSink(mVideoEncoder))
     , mStartTime(TimeStamp::Now())
     , mMIMEType(aMIMEType)
     , mSizeOfBuffer(0)
     , mState(MediaEncoder::ENCODE_METADDATA)
     , mShutdown(false)
     , mDirectConnected(false)
     , mSuspended(false)
 {}
@@ -150,17 +169,18 @@ public :
   /**
    * Creates an encoder with a given MIME type. Returns null if we are unable
    * to create the encoder. For now, default aMIMEType to "audio/ogg" and use
    * Ogg+Opus if it is empty.
    */
   static already_AddRefed<MediaEncoder> CreateEncoder(const nsAString& aMIMEType,
                                                       uint32_t aAudioBitrate, uint32_t aVideoBitrate,
                                                       uint32_t aBitrate,
-                                                      uint8_t aTrackTypes = ContainerWriter::CREATE_AUDIO_TRACK);
+                                                      uint8_t aTrackTypes = ContainerWriter::CREATE_AUDIO_TRACK,
+                                                      TrackRate aTrackRate = CubebUtils::PreferredSampleRate());
   /**
    * Encodes the raw track data and returns the final container data. Assuming
    * it is called on a single worker thread. The buffer of container data is
    * allocated in ContainerWriter::GetContainerData(), and is appended to
    * aOutputBufs. aMIMEType is the valid mime-type of this returned container
    * data.
    */
   void GetEncodedData(nsTArray<nsTArray<uint8_t> >* aOutputBufs,
@@ -203,24 +223,29 @@ public :
 
   MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
   /*
    * Measure the size of the buffer, and memory occupied by mAudioEncoder
    * and mVideoEncoder
    */
   size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
 
+  MediaStreamVideoRecorderSink* GetVideoSink() {
+    return mVideoSink.get();
+  }
+
 private:
   // Get encoded data from trackEncoder and write to muxer
   nsresult WriteEncodedDataToMuxer(TrackEncoder *aTrackEncoder);
   // Get metadata from trackEncoder and copy to muxer
   nsresult CopyMetadataToMuxer(TrackEncoder* aTrackEncoder);
   nsAutoPtr<ContainerWriter> mWriter;
   nsAutoPtr<AudioTrackEncoder> mAudioEncoder;
   nsAutoPtr<VideoTrackEncoder> mVideoEncoder;
+  RefPtr<MediaStreamVideoRecorderSink> mVideoSink;
   TimeStamp mStartTime;
   nsString mMIMEType;
   int64_t mSizeOfBuffer;
   int mState;
   bool mShutdown;
   bool mDirectConnected;
   Atomic<int> mSuspended;
   // Get duration from create encoder, for logging purpose
--- a/dom/media/encoder/OmxTrackEncoder.cpp
+++ b/dom/media/encoder/OmxTrackEncoder.cpp
@@ -21,30 +21,29 @@
 
 using namespace android;
 
 namespace mozilla {
 
 #define ENCODER_CONFIG_FRAME_RATE 30 // fps
 #define GET_ENCODED_VIDEO_FRAME_TIMEOUT 100000 // microseconds
 
-OmxVideoTrackEncoder::OmxVideoTrackEncoder()
-  : VideoTrackEncoder()
+OmxVideoTrackEncoder::OmxVideoTrackEncoder(TrackRate aTrackRate)
+  : VideoTrackEncoder(aTrackRate)
 {}
 
 OmxVideoTrackEncoder::~OmxVideoTrackEncoder()
 {}
 
 nsresult
 OmxVideoTrackEncoder::Init(int aWidth, int aHeight, int aDisplayWidth,
-                           int aDisplayHeight, TrackRate aTrackRate)
+                           int aDisplayHeight)
 {
   mFrameWidth = aWidth;
   mFrameHeight = aHeight;
-  mTrackRate = aTrackRate;
   mDisplayWidth = aDisplayWidth;
   mDisplayHeight = aDisplayHeight;
 
   mEncoder = OMXCodecWrapper::CreateAVCEncoder();
   NS_ENSURE_TRUE(mEncoder, NS_ERROR_FAILURE);
 
   nsresult rv = mEncoder->Configure(mFrameWidth, mFrameHeight,
                                     ENCODER_CONFIG_FRAME_RATE);
--- a/dom/media/encoder/OmxTrackEncoder.h
+++ b/dom/media/encoder/OmxTrackEncoder.h
@@ -22,27 +22,26 @@ class OMXAudioEncoder;
  * Bean platform.
  */
 
 namespace mozilla {
 
 class OmxVideoTrackEncoder: public VideoTrackEncoder
 {
 public:
-  OmxVideoTrackEncoder();
+  explicit OmxVideoTrackEncoder(TrackRate aTrackRate);
   ~OmxVideoTrackEncoder();
 
   already_AddRefed<TrackMetadataBase> GetMetadata() override;
 
   nsresult GetEncodedTrack(EncodedFrameContainer& aData) override;
 
 protected:
   nsresult Init(int aWidth, int aHeight,
-                int aDisplayWidth, int aDisplayHeight,
-                TrackRate aTrackRate) override;
+                int aDisplayWidth, int aDisplayHeight) override;
 
 private:
   nsAutoPtr<android::OMXVideoEncoder> mEncoder;
 };
 
 class OmxAudioTrackEncoder : public AudioTrackEncoder
 {
 public:
--- a/dom/media/encoder/TrackEncoder.cpp
+++ b/dom/media/encoder/TrackEncoder.cpp
@@ -188,67 +188,83 @@ AudioTrackEncoder::DeInterleaveTrackData
 
 size_t
 AudioTrackEncoder::SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
 {
   return mRawSegment.SizeOfExcludingThis(aMallocSizeOf);
 }
 
 void
+VideoTrackEncoder::Init(const VideoSegment& aSegment)
+{
+  if (mInitialized) {
+    return;
+  }
+
+  mInitCounter++;
+  TRACK_LOG(LogLevel::Debug, ("Init the video encoder %d times", mInitCounter));
+  VideoSegment::ConstChunkIterator iter(aSegment);
+  while (!iter.IsEnded()) {
+   VideoChunk chunk = *iter;
+   if (!chunk.IsNull()) {
+     gfx::IntSize imgsize = chunk.mFrame.GetImage()->GetSize();
+     gfx::IntSize intrinsicSize = chunk.mFrame.GetIntrinsicSize();
+     nsresult rv = Init(imgsize.width, imgsize.height,
+                        intrinsicSize.width, intrinsicSize.height);
+
+     if (NS_FAILED(rv)) {
+       LOG("[VideoTrackEncoder]: Fail to initialize the encoder!");
+       NotifyCancel();
+     }
+     break;
+   }
+
+   iter.Next();
+  }
+}
+
+void
+VideoTrackEncoder::SetCurrentFrames(const VideoSegment& aSegment)
+{
+  if (mCanceled) {
+    return;
+  }
+
+  Init(aSegment);
+  AppendVideoSegment(aSegment);
+}
+
+void
 VideoTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
                                             TrackID aID,
                                             StreamTime aTrackOffset,
                                             uint32_t aTrackEvents,
                                             const MediaSegment& aQueuedMedia)
 {
   if (mCanceled) {
     return;
   }
 
+  if (!(aTrackEvents == TRACK_EVENT_CREATED ||
+       aTrackEvents == TRACK_EVENT_ENDED)) {
+    return;
+  }
+
   const VideoSegment& video = static_cast<const VideoSegment&>(aQueuedMedia);
 
    // Check and initialize parameters for codec encoder.
-  if (!mInitialized) {
-    mInitCounter++;
-    TRACK_LOG(LogLevel::Debug, ("Init the video encoder %d times", mInitCounter));
-    VideoSegment::ChunkIterator iter(const_cast<VideoSegment&>(video));
-    while (!iter.IsEnded()) {
-      VideoChunk chunk = *iter;
-      if (!chunk.IsNull()) {
-        gfx::IntSize imgsize = chunk.mFrame.GetImage()->GetSize();
-        gfx::IntSize intrinsicSize = chunk.mFrame.GetIntrinsicSize();
-        nsresult rv = Init(imgsize.width, imgsize.height,
-                           intrinsicSize.width, intrinsicSize.height,
-                           aGraph->GraphRate());
-        if (NS_FAILED(rv)) {
-          LOG("[VideoTrackEncoder]: Fail to initialize the encoder!");
-          NotifyCancel();
-        }
-        break;
-      }
-
-      iter.Next();
-    }
-
-    mNotInitDuration += aQueuedMedia.GetDuration();
-    if (!mInitialized &&
-        (mNotInitDuration / aGraph->GraphRate() > INIT_FAILED_DURATION) &&
-        mInitCounter > 1) {
-      LOG("[VideoTrackEncoder]: Initialize failed for 30s.");
-      NotifyEndOfStream();
-      return;
-    }
-  }
+  Init(video);
 
   AppendVideoSegment(video);
 
   // The stream has stopped and reached the end of track.
   if (aTrackEvents == TrackEventCommand::TRACK_EVENT_ENDED) {
     LOG("[VideoTrackEncoder]: Receive TRACK_EVENT_ENDED .");
     NotifyEndOfStream();
+    mFirstFrame = true;
   }
 
 }
 
 nsresult
 VideoTrackEncoder::AppendVideoSegment(const VideoSegment& aSegment)
 {
   ReentrantMonitorAutoEnter mon(mReentrantMonitor);
@@ -261,30 +277,45 @@ VideoTrackEncoder::AppendVideoSegment(co
     mTotalFrameDuration += chunk.GetDuration();
     mLastFrameDuration += chunk.GetDuration();
     // Send only the unique video frames for encoding.
     // Or if we got the same video chunks more than 1 seconds,
     // force to send into encoder.
     if ((mLastFrame != chunk.mFrame) ||
         (mLastFrameDuration >= mTrackRate)) {
       RefPtr<layers::Image> image = chunk.mFrame.GetImage();
+
+      // Fixme: see bug 1290777. We should remove the useage of duration here and
+      // in |GetEncodedTrack|.
+      StreamTime duration;
+      if (mFirstFrame)
+      {
+        duration = chunk.GetDuration();
+        mFirstFrame = false;
+      } else {
+        MOZ_ASSERT(chunk.mTimeStamp >= mLastFrameTimeStamp);
+        TimeDuration timeDuration = chunk.mTimeStamp - mLastFrameTimeStamp;
+        duration = SecondsToMediaTime(timeDuration.ToSeconds());
+      }
+
       // Because we may get chunks with a null image (due to input blocking),
       // accumulate duration and give it to the next frame that arrives.
       // Canonically incorrect - the duration should go to the previous frame
       // - but that would require delaying until the next frame arrives.
       // Best would be to do like OMXEncoder and pass an effective timestamp
       // in with each frame.
       if (image) {
         mRawSegment.AppendFrame(image.forget(),
-                                mLastFrameDuration,
+                                duration,
                                 chunk.mFrame.GetIntrinsicSize(),
                                 PRINCIPAL_HANDLE_NONE,
                                 chunk.mFrame.GetForceBlack());
         mLastFrameDuration = 0;
       }
+      mLastFrameTimeStamp = chunk.mTimeStamp;
     }
     mLastFrame.TakeFrom(&chunk.mFrame);
     iter.Next();
   }
 
   if (mRawSegment.GetDuration() > 0) {
     mReentrantMonitor.NotifyAll();
   }
@@ -294,17 +325,17 @@ VideoTrackEncoder::AppendVideoSegment(co
 
 void
 VideoTrackEncoder::NotifyEndOfStream()
 {
   // If source video track is muted till the end of encoding, initialize the
   // encoder with default frame width, frame height, and track rate.
   if (!mCanceled && !mInitialized) {
     Init(DEFAULT_FRAME_WIDTH, DEFAULT_FRAME_HEIGHT,
-         DEFAULT_FRAME_WIDTH, DEFAULT_FRAME_HEIGHT, DEFAULT_TRACK_RATE);
+         DEFAULT_FRAME_WIDTH, DEFAULT_FRAME_HEIGHT);
   }
 
   ReentrantMonitorAutoEnter mon(mReentrantMonitor);
   mEndOfStream = true;
   mReentrantMonitor.NotifyAll();
 }
 
 size_t
--- a/dom/media/encoder/TrackEncoder.h
+++ b/dom/media/encoder/TrackEncoder.h
@@ -243,26 +243,28 @@ protected:
   AudioSegment mRawSegment;
 
   uint32_t mAudioBitrate;
 };
 
 class VideoTrackEncoder : public TrackEncoder
 {
 public:
-  VideoTrackEncoder()
+  explicit VideoTrackEncoder(TrackRate aTrackRate)
     : TrackEncoder()
     , mFrameWidth(0)
     , mFrameHeight(0)
     , mDisplayWidth(0)
     , mDisplayHeight(0)
-    , mTrackRate(0)
+    , mTrackRate(aTrackRate)
     , mTotalFrameDuration(0)
     , mLastFrameDuration(0)
     , mVideoBitrate(0)
+    , mLastFrameTimeStamp(TimeStamp::Now())
+    , mFirstFrame(true)
   {}
 
   /**
    * Notified by the same callback of MediaEncoder when it has received a track
    * change from MediaStreamGraph. Called on the MediaStreamGraph thread.
    */
   void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
                                 StreamTime aTrackOffset,
@@ -272,26 +274,38 @@ public:
   * Measure size of mRawSegment
   */
   size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
 
   void SetBitrate(const uint32_t aBitrate) override
   {
     mVideoBitrate = aBitrate;
   }
+
+  void Init(const VideoSegment& aSegment);
+
+  void SetCurrentFrames(const VideoSegment& aSegment);
+
+  StreamTime SecondsToMediaTime(double aS) const
+  {
+    NS_ASSERTION(0 <= aS && aS <= TRACK_TICKS_MAX/TRACK_RATE_MAX,
+                 "Bad seconds");
+    return mTrackRate * aS;
+  }
+
 protected:
   /**
    * Initialized the video encoder. In order to collect the value of width and
    * height of source frames, this initialization is delayed until we have
    * received the first valid video frame from MediaStreamGraph;
    * mReentrantMonitor will be notified after it has successfully initialized,
    * and this method is called on the MediaStramGraph thread.
    */
   virtual nsresult Init(int aWidth, int aHeight, int aDisplayWidth,
-                        int aDisplayHeight, TrackRate aTrackRate) = 0;
+                        int aDisplayHeight) = 0;
 
   /**
    * Appends source video frames to mRawSegment. We only append the source chunk
    * if it is unique to mLastChunk. Called on the MediaStreamGraph thread.
    */
   nsresult AppendVideoSegment(const VideoSegment& aSegment);
 
   /**
@@ -340,13 +354,17 @@ protected:
   StreamTime mLastFrameDuration;
 
   /**
    * A segment queue of audio track data, protected by mReentrantMonitor.
    */
   VideoSegment mRawSegment;
 
   uint32_t mVideoBitrate;
+
+private:
+  TimeStamp mLastFrameTimeStamp;
+  bool mFirstFrame;
 };
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/encoder/VP8TrackEncoder.cpp
+++ b/dom/media/encoder/VP8TrackEncoder.cpp
@@ -23,18 +23,18 @@ LazyLogModule gVP8TrackEncoderLog("VP8Tr
 // Debug logging macro with object pointer and class name.
 
 #define DEFAULT_BITRATE_BPS 2500000
 #define DEFAULT_ENCODE_FRAMERATE 30
 
 using namespace mozilla::gfx;
 using namespace mozilla::layers;
 
-VP8TrackEncoder::VP8TrackEncoder()
-  : VideoTrackEncoder()
+VP8TrackEncoder::VP8TrackEncoder(TrackRate aTrackRate)
+  : VideoTrackEncoder(aTrackRate)
   , mEncodedFrameDuration(0)
   , mEncodedTimestamp(0)
   , mRemainingTicks(0)
   , mVPXContext(new vpx_codec_ctx_t())
   , mVPXImageWrapper(new vpx_image_t())
 {
   MOZ_COUNT_CTOR(VP8TrackEncoder);
 }
@@ -48,26 +48,24 @@ VP8TrackEncoder::~VP8TrackEncoder()
   if (mVPXImageWrapper) {
     vpx_img_free(mVPXImageWrapper);
   }
   MOZ_COUNT_DTOR(VP8TrackEncoder);
 }
 
 nsresult
 VP8TrackEncoder::Init(int32_t aWidth, int32_t aHeight, int32_t aDisplayWidth,
-                      int32_t aDisplayHeight,TrackRate aTrackRate)
+                      int32_t aDisplayHeight)
 {
-  if (aWidth < 1 || aHeight < 1 || aDisplayWidth < 1 || aDisplayHeight < 1
-      || aTrackRate <= 0) {
+  if (aWidth < 1 || aHeight < 1 || aDisplayWidth < 1 || aDisplayHeight < 1) {
     return NS_ERROR_FAILURE;
   }
 
   ReentrantMonitorAutoEnter mon(mReentrantMonitor);
 
-  mTrackRate = aTrackRate;
   mEncodedFrameRate = DEFAULT_ENCODE_FRAMERATE;
   mEncodedFrameDuration = mTrackRate / mEncodedFrameRate;
   mFrameWidth = aWidth;
   mFrameHeight = aHeight;
   mDisplayWidth = aDisplayWidth;
   mDisplayHeight = aDisplayHeight;
 
   // Encoder configuration structure.
--- a/dom/media/encoder/VP8TrackEncoder.h
+++ b/dom/media/encoder/VP8TrackEncoder.h
@@ -24,27 +24,26 @@ typedef struct vpx_image vpx_image_t;
 class VP8TrackEncoder : public VideoTrackEncoder
 {
   enum EncodeOperation {
     ENCODE_NORMAL_FRAME, // VP8 track encoder works normally.
     ENCODE_I_FRAME, // The next frame will be encoded as I-Frame.
     SKIP_FRAME, // Skip the next frame.
   };
 public:
-  VP8TrackEncoder();
+  explicit VP8TrackEncoder(TrackRate aTrackRate);
   virtual ~VP8TrackEncoder();
 
   already_AddRefed<TrackMetadataBase> GetMetadata() final override;
 
   nsresult GetEncodedTrack(EncodedFrameContainer& aData) final override;
 
 protected:
   nsresult Init(int32_t aWidth, int32_t aHeight,
-                int32_t aDisplayWidth, int32_t aDisplayHeight,
-                TrackRate aTrackRate) final override;
+                int32_t aDisplayWidth, int32_t aDisplayHeight) final override;
 
 private:
   // Calculate the target frame's encoded duration.
   StreamTime CalculateEncodedDuration(StreamTime aDurationCopied);
 
   // Calculate the mRemainingTicks for next target frame.
   StreamTime CalculateRemainingTicks(StreamTime aDurationCopied,
                                      StreamTime aEncodedDuration);
--- a/dom/media/gtest/TestMediaFormatReader.cpp
+++ b/dom/media/gtest/TestMediaFormatReader.cpp
@@ -9,16 +9,17 @@
 #include "mozilla/TaskQueue.h"
 #include "ImageContainer.h"
 #include "Layers.h"
 #include "MediaData.h"
 #include "MediaFormatReader.h"
 #include "MP4Decoder.h"
 #include "MockMediaDecoderOwner.h"
 #include "MockMediaResource.h"
+#include "VideoFrameContainer.h"
 
 using namespace mozilla;
 using namespace mozilla::dom;
 
 class MockMP4Decoder : public MP4Decoder
 {
 public:
   MockMP4Decoder()
--- a/dom/media/gtest/TestVideoTrackEncoder.cpp
+++ b/dom/media/gtest/TestVideoTrackEncoder.cpp
@@ -170,74 +170,73 @@ private:
   mozilla::gfx::IntSize mImageSize;
   nsTArray<uint8_t> mSourceBuffer;
 };
 
 struct InitParam {
   bool mShouldSucceed;  // This parameter should cause success or fail result
   int  mWidth;          // frame width
   int  mHeight;         // frame height
-  mozilla::TrackRate mTrackRate; // track rate. 90K is the most commond track rate.
 };
 
 class TestVP8TrackEncoder: public VP8TrackEncoder
 {
 public:
+  explicit TestVP8TrackEncoder(TrackRate aTrackRate = 90000)
+    : VP8TrackEncoder(aTrackRate) {}
+
   ::testing::AssertionResult TestInit(const InitParam &aParam)
   {
-    nsresult result = Init(aParam.mWidth, aParam.mHeight, aParam.mWidth, aParam.mHeight, aParam.mTrackRate);
+    nsresult result = Init(aParam.mWidth, aParam.mHeight, aParam.mWidth, aParam.mHeight);
 
     if (((NS_FAILED(result) && aParam.mShouldSucceed)) || (NS_SUCCEEDED(result) && !aParam.mShouldSucceed))
     {
       return ::testing::AssertionFailure()
                 << " width = " << aParam.mWidth
-                << " height = " << aParam.mHeight
-                << " TrackRate = " << aParam.mTrackRate << ".";
+                << " height = " << aParam.mHeight;
     }
     else
     {
       return ::testing::AssertionSuccess();
     }
   }
 };
 
 // Init test
 TEST(VP8VideoTrackEncoder, Initialization)
 {
   InitParam params[] = {
     // Failure cases.
-    { false, 640, 480, 0 },      // Trackrate should be larger than 1.
-    { false, 640, 480, -1 },     // Trackrate should be larger than 1.
-    { false, 0, 0, 90000 },      // Height/ width should be larger than 1.
-    { false, 0, 1, 90000 },      // Height/ width should be larger than 1.
-    { false, 1, 0, 90000},       // Height/ width should be larger than 1.
+    { false, 0, 0},      // Height/ width should be larger than 1.
+    { false, 0, 1},      // Height/ width should be larger than 1.
+    { false, 1, 0},       // Height/ width should be larger than 1.
 
     // Success cases
-    { true, 640, 480, 90000},    // Standard VGA
-    { true, 800, 480, 90000},    // Standard WVGA
-    { true, 960, 540, 90000},    // Standard qHD
-    { true, 1280, 720, 90000}    // Standard HD
+    { true, 640, 480},    // Standard VGA
+    { true, 800, 480},    // Standard WVGA
+    { true, 960, 540},    // Standard qHD
+    { true, 1280, 720}    // Standard HD
   };
 
   for (size_t i = 0; i < ArrayLength(params); i++)
   {
     TestVP8TrackEncoder encoder;
     EXPECT_TRUE(encoder.TestInit(params[i]));
   }
 }
 
 // Get MetaData test
 TEST(VP8VideoTrackEncoder, FetchMetaData)
 {
   InitParam params[] = {
     // Success cases
-    { true, 640, 480, 90000},    // Standard VGA
-    { true, 800, 480, 90000},    // Standard WVGA
-    { true, 960, 540, 90000},    // Standard qHD
-    { true, 1280, 720, 90000}    // Standard HD
+    { true, 640, 480},    // Standard VGA
+    { true, 800, 480},    // Standard WVGA
+    { true, 960, 540},    // Standard qHD
+    { true, 1280, 720}    // Standard HD
   };
 
   for (size_t i = 0; i < ArrayLength(params); i++)
   {
     TestVP8TrackEncoder encoder;
     EXPECT_TRUE(encoder.TestInit(params[i]));
 
     RefPtr<TrackMetadataBase> meta = encoder.GetMetadata();
@@ -249,17 +248,17 @@ TEST(VP8VideoTrackEncoder, FetchMetaData
   }
 }
 
 // Encode test
 TEST(VP8VideoTrackEncoder, FrameEncode)
 {
   // Initiate VP8 encoder
   TestVP8TrackEncoder encoder;
-  InitParam param = {true, 640, 480, 90000};
+  InitParam param = {true, 640, 480};
   encoder.TestInit(param);
 
   // Create YUV images as source.
   nsTArray<RefPtr<Image>> images;
   YUVBufferGenerator generator;
   generator.Init(mozilla::gfx::IntSize(640, 480));
   generator.Generate(images);
 
@@ -271,29 +270,29 @@ TEST(VP8VideoTrackEncoder, FrameEncode)
     RefPtr<Image> image = images[i];
     segment.AppendFrame(image.forget(),
                         mozilla::StreamTime(90000),
                         generator.GetSize(),
                         PRINCIPAL_HANDLE_NONE);
   }
 
   // track change notification.
-  encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, 0, segment);
+  encoder.SetCurrentFrames(segment);
 
   // Pull Encoded Data back from encoder.
   EncodedFrameContainer container;
   EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
 }
 
 // EOS test
 TEST(VP8VideoTrackEncoder, EncodeComplete)
 {
   // Initiate VP8 encoder
   TestVP8TrackEncoder encoder;
-  InitParam param = {true, 640, 480, 90000};
+  InitParam param = {true, 640, 480};
   encoder.TestInit(param);
 
   // track end notification.
   VideoSegment segment;
   encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, TrackEventCommand::TRACK_EVENT_ENDED, segment);
 
   // Pull Encoded Data back from encoder. Since we have sent
   // EOS to encoder, encoder.GetEncodedTrack should return
--- a/dom/media/gtest/TestWebMWriter.cpp
+++ b/dom/media/gtest/TestWebMWriter.cpp
@@ -23,21 +23,23 @@ public:
     }
     return false;
   }
 };
 
 class WebMVP8TrackEncoder: public VP8TrackEncoder
 {
 public:
+  explicit WebMVP8TrackEncoder(TrackRate aTrackRate = 90000)
+    : VP8TrackEncoder(aTrackRate) {}
+
   bool TestVP8Creation(int32_t aWidth, int32_t aHeight, int32_t aDisplayWidth,
-                       int32_t aDisplayHeight, TrackRate aTrackRate)
+                       int32_t aDisplayHeight)
   {
-    if (NS_SUCCEEDED(Init(aWidth, aHeight, aDisplayWidth, aDisplayHeight,
-                          aTrackRate))) {
+    if (NS_SUCCEEDED(Init(aWidth, aHeight, aDisplayWidth, aDisplayHeight))) {
       return true;
     }
     return false;
   }
 };
 
 const uint64_t FIXED_DURATION = 1000000;
 const uint32_t FIXED_FRAMESIZE = 500;
@@ -55,17 +57,17 @@ public:
     EXPECT_TRUE(opusEncoder.TestOpusCreation(aChannels, aSampleRate));
     RefPtr<TrackMetadataBase> opusMeta = opusEncoder.GetMetadata();
     SetMetadata(opusMeta);
   }
   void SetVP8Metadata(int32_t aWidth, int32_t aHeight, int32_t aDisplayWidth,
                       int32_t aDisplayHeight,TrackRate aTrackRate) {
     WebMVP8TrackEncoder vp8Encoder;
     EXPECT_TRUE(vp8Encoder.TestVP8Creation(aWidth, aHeight, aDisplayWidth,
-                                           aDisplayHeight, aTrackRate));
+                                           aDisplayHeight));
     RefPtr<TrackMetadataBase> vp8Meta = vp8Encoder.GetMetadata();
     SetMetadata(vp8Meta);
   }
 
   // When we append an I-Frame into WebM muxer, the muxer will treat previous
   // data as "a cluster".
   // In these test cases, we will call the function many times to enclose the
   // previous cluster so that we can retrieve data by |GetContainerData|.
--- a/dom/media/imagecapture/CaptureTask.cpp
+++ b/dom/media/imagecapture/CaptureTask.cpp
@@ -10,16 +10,42 @@
 #include "mozilla/dom/ImageEncoder.h"
 #include "mozilla/dom/MediaStreamTrack.h"
 #include "mozilla/dom/VideoStreamTrack.h"
 #include "gfxUtils.h"
 #include "nsThreadUtils.h"
 
 namespace mozilla {
 
+class CaptureTask::MediaStreamEventListener : public MediaStreamTrackListener
+{
+public:
+  explicit MediaStreamEventListener(CaptureTask* aCaptureTask)
+    : mCaptureTask(aCaptureTask) {};
+
+  // MediaStreamListener methods.
+  void NotifyEnded() override
+  {
+    if(!mCaptureTask->mImageGrabbedOrTrackEnd) {
+      mCaptureTask->PostTrackEndEvent();
+    }
+  }
+
+private:
+  CaptureTask* mCaptureTask;
+};
+
+CaptureTask::CaptureTask(dom::ImageCapture* aImageCapture)
+  : mImageCapture(aImageCapture)
+  , mEventListener(new MediaStreamEventListener(this))
+  , mImageGrabbedOrTrackEnd(false)
+  , mPrincipalChanged(false)
+{
+}
+
 nsresult
 CaptureTask::TaskComplete(already_AddRefed<dom::Blob> aBlob, nsresult aRv)
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   DetachTrack();
 
   nsresult rv;
@@ -50,47 +76,45 @@ CaptureTask::TaskComplete(already_AddRef
 
 void
 CaptureTask::AttachTrack()
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   dom::VideoStreamTrack* track = mImageCapture->GetVideoStreamTrack();
   track->AddPrincipalChangeObserver(this);
-  track->AddListener(this);
+  track->AddListener(mEventListener.get());
+  track->AddDirectListener(this);
 }
 
 void
 CaptureTask::DetachTrack()
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   dom::VideoStreamTrack* track = mImageCapture->GetVideoStreamTrack();
   track->RemovePrincipalChangeObserver(this);
-  track->RemoveListener(this);
+  track->RemoveListener(mEventListener.get());
+  track->RemoveDirectListener(this);
 }
 
 void
 CaptureTask::PrincipalChanged(dom::MediaStreamTrack* aMediaStreamTrack)
 {
   MOZ_ASSERT(NS_IsMainThread());
   mPrincipalChanged = true;
 }
 
 void
-CaptureTask::NotifyQueuedChanges(MediaStreamGraph* aGraph,
-                                 StreamTime aTrackOffset,
-                                 const MediaSegment& aQueuedMedia)
+CaptureTask::SetCurrentFrames(const VideoSegment& aSegment)
 {
   if (mImageGrabbedOrTrackEnd) {
     return;
   }
 
-  MOZ_ASSERT(aQueuedMedia.GetType() == MediaSegment::VIDEO);
-
   // Callback for encoding complete, it calls on main thread.
   class EncodeComplete : public dom::EncodeCompleteCallback
   {
   public:
     explicit EncodeComplete(CaptureTask* aTask) : mTask(aTask) {}
 
     nsresult ReceiveBlob(already_AddRefed<dom::Blob> aBlob) override
     {
@@ -99,21 +123,23 @@ CaptureTask::NotifyQueuedChanges(MediaSt
       mTask = nullptr;
       return NS_OK;
     }
 
   protected:
     RefPtr<CaptureTask> mTask;
   };
 
-  VideoSegment* video =
-    const_cast<VideoSegment*> (static_cast<const VideoSegment*>(&aQueuedMedia));
-  VideoSegment::ChunkIterator iter(*video);
+  VideoSegment::ConstChunkIterator iter(aSegment);
+
+
+
   while (!iter.IsEnded()) {
     VideoChunk chunk = *iter;
+
     // Extract the first valid video frame.
     VideoFrame frame;
     if (!chunk.IsNull()) {
       RefPtr<layers::Image> image;
       if (chunk.mFrame.GetForceBlack()) {
         // Create a black image.
         image = VideoFrame::CreateBlackImage(chunk.mFrame.GetIntrinsicSize());
       } else {
@@ -137,24 +163,16 @@ CaptureTask::NotifyQueuedChanges(MediaSt
       }
       return;
     }
     iter.Next();
   }
 }
 
 void
-CaptureTask::NotifyEnded()
-{
-  if(!mImageGrabbedOrTrackEnd) {
-    PostTrackEndEvent();
-  }
-}
-
-void
 CaptureTask::PostTrackEndEvent()
 {
   mImageGrabbedOrTrackEnd = true;
 
   // Got track end or finish event, stop the task.
   class TrackEndRunnable : public Runnable
   {
   public:
--- a/dom/media/imagecapture/CaptureTask.h
+++ b/dom/media/imagecapture/CaptureTask.h
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef CAPTURETASK_H
 #define CAPTURETASK_H
 
 #include "MediaStreamGraph.h"
 #include "MediaStreamListener.h"
 #include "PrincipalChangeObserver.h"
+#include "MediaStreamVideoSink.h"
 
 namespace mozilla {
 
 namespace dom {
 class Blob;
 class ImageCapture;
 class MediaStreamTrack;
 } // namespace dom
@@ -24,26 +25,25 @@ class MediaStreamTrack;
  * ImageEncoder. The whole procedures start at AttachTrack(), it will add this
  * class into MediaStream and retrieves an image in MediaStreamGraph thread.
  * Once the image is retrieved, it will be sent to ImageEncoder and the encoded
  * blob will be sent out via encoder callback in main thread.
  *
  * CaptureTask holds a reference of ImageCapture to ensure ImageCapture won't be
  * released during the period of the capturing process described above.
  */
-class CaptureTask : public MediaStreamTrackListener,
+class CaptureTask : public MediaStreamVideoSink,
                     public dom::PrincipalChangeObserver<dom::MediaStreamTrack>
 {
 public:
-  // MediaStreamTrackListener methods.
-  void NotifyQueuedChanges(MediaStreamGraph* aGraph,
-                           StreamTime aTrackOffset,
-                           const MediaSegment& aQueuedMedia) override;
+  class MediaStreamEventListener;
 
-  void NotifyEnded() override;
+  // MediaStreamVideoSink methods.
+  void SetCurrentFrames(const VideoSegment& aSegment) override;
+  void ClearFrames() override {}
 
   // PrincipalChangeObserver<MediaStreamTrack> method.
   void PrincipalChanged(dom::MediaStreamTrack* aMediaStreamTrack) override;
 
   // CaptureTask methods.
 
   // It is called when aBlob is ready to post back to script in company with
   // aRv == NS_OK. If aRv is not NS_OK, it will post an error event to script.
@@ -56,33 +56,32 @@ public:
   // It should be on main thread only.
   void AttachTrack();
 
   // Remove listeners from MediaStreamTrack and PrincipalChangeObserver.
   // It should be on main thread only.
   void DetachTrack();
 
   // CaptureTask should be created on main thread.
-  explicit CaptureTask(dom::ImageCapture* aImageCapture)
-    : mImageCapture(aImageCapture)
-    , mImageGrabbedOrTrackEnd(false)
-    , mPrincipalChanged(false) {}
+  explicit CaptureTask(dom::ImageCapture* aImageCapture);
 
 protected:
   virtual ~CaptureTask() {}
 
   // Post a runnable on main thread to end this task and call TaskComplete to post
   // error event to script. It is called off-main-thread.
   void PostTrackEndEvent();
 
   // The ImageCapture associates with this task. This reference count should not
   // change in this class unless it clears this reference after a blob or error
   // event to script.
   RefPtr<dom::ImageCapture> mImageCapture;
 
+  RefPtr<MediaStreamEventListener> mEventListener;
+
   // True when an image is retrieved from MediaStreamGraph or MediaStreamGraph
   // sends a track finish, end, or removed event.
   bool mImageGrabbedOrTrackEnd;
 
   // True after MediaStreamTrack principal changes while waiting for a photo
   // to finish and we should raise a security error.
   bool mPrincipalChanged;
 };
--- a/dom/media/imagecapture/ImageCapture.cpp
+++ b/dom/media/imagecapture/ImageCapture.cpp
@@ -142,18 +142,17 @@ ImageCapture::TakePhoto(ErrorResult& aRe
 
   // Try if MediaEngine supports taking photo.
   nsresult rv = TakePhotoByMediaEngine();
 
   // It falls back to MediaStreamGraph image capture if MediaEngine doesn't
   // support TakePhoto().
   if (rv == NS_ERROR_NOT_IMPLEMENTED) {
     IC_LOG("MediaEngine doesn't support TakePhoto(), it falls back to MediaStreamGraph.");
-    RefPtr<CaptureTask> task =
-      new CaptureTask(this);
+    RefPtr<CaptureTask> task = new CaptureTask(this);
 
     // It adds itself into MediaStreamGraph, so ImageCapture doesn't need to hold
     // the reference.
     task->AttachTrack();
   }
 }
 
 nsresult
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -493,24 +493,26 @@ DecodedStream::SendAudio(double aVolume,
 }
 
 static void
 WriteVideoToMediaStream(MediaStream* aStream,
                         layers::Image* aImage,
                         int64_t aEndMicroseconds,
                         int64_t aStartMicroseconds,
                         const mozilla::gfx::IntSize& aIntrinsicSize,
+                        const TimeStamp& aTimeStamp,
                         VideoSegment* aOutput,
                         const PrincipalHandle& aPrincipalHandle)
 {
   RefPtr<layers::Image> image = aImage;
   StreamTime duration =
       aStream->MicrosecondsToStreamTimeRoundDown(aEndMicroseconds) -
       aStream->MicrosecondsToStreamTimeRoundDown(aStartMicroseconds);
-  aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize, aPrincipalHandle);
+  aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize,
+                       aPrincipalHandle, false, aTimeStamp);
 }
 
 static bool
 ZeroDurationAtLastChunk(VideoSegment& aInput)
 {
   // Get the last video frame's start time in VideoSegment aInput.
   // If the start time is equal to the duration of aInput, means the last video
   // frame's duration is zero.
@@ -532,38 +534,48 @@ DecodedStream::SendVideo(bool aIsSameOri
   TrackID videoTrackId = mInfo.mVideo.mTrackId;
   AutoTArray<RefPtr<MediaData>, 10> video;
   SourceMediaStream* sourceStream = mData->mStream;
 
   // It's OK to hold references to the VideoData because VideoData
   // is ref-counted.
   mVideoQueue.GetElementsAfter(mData->mNextVideoTime, &video);
 
+  // tracksStartTimeStamp might be null when the SourceMediaStream not yet
+  // be added to MediaStreamGraph.
+  TimeStamp tracksStartTimeStamp = sourceStream->GetStreamTracksStrartTimeStamp();
+  if (tracksStartTimeStamp.IsNull()) {
+    tracksStartTimeStamp = TimeStamp::Now();
+  }
+
   for (uint32_t i = 0; i < video.Length(); ++i) {
     VideoData* v = video[i]->As<VideoData>();
 
     if (mData->mNextVideoTime < v->mTime) {
       // Write last video frame to catch up. mLastVideoImage can be null here
       // which is fine, it just means there's no video.
 
       // TODO: |mLastVideoImage| should come from the last image rendered
       // by the state machine. This will avoid the black frame when capture
       // happens in the middle of playback (especially in th middle of a
       // video frame). E.g. if we have a video frame that is 30 sec long
       // and capture happens at 15 sec, we'll have to append a black frame
       // that is 15 sec long.
       WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, v->mTime,
-          mData->mNextVideoTime, mData->mLastVideoImageDisplaySize, &output,
-          aPrincipalHandle);
+          mData->mNextVideoTime, mData->mLastVideoImageDisplaySize,
+          tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->mTime),
+          &output, aPrincipalHandle);
       mData->mNextVideoTime = v->mTime;
     }
 
     if (mData->mNextVideoTime < v->GetEndTime()) {
       WriteVideoToMediaStream(sourceStream, v->mImage, v->GetEndTime(),
-          mData->mNextVideoTime, v->mDisplay, &output, aPrincipalHandle);
+          mData->mNextVideoTime, v->mDisplay,
+          tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->GetEndTime()),
+          &output, aPrincipalHandle);
       mData->mNextVideoTime = v->GetEndTime();
       mData->mLastVideoImage = v->mImage;
       mData->mLastVideoImageDisplaySize = v->mDisplay;
     }
   }
 
   // Check the output is not empty.
   if (output.GetLastFrame()) {
@@ -580,17 +592,19 @@ DecodedStream::SendVideo(bool aIsSameOri
 
   if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
     if (mData->mEOSVideoCompensation) {
       VideoSegment endSegment;
       // Calculate the deviation clock time from DecodedStream.
       int64_t deviation_usec = sourceStream->StreamTimeToMicroseconds(1);
       WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage,
           mData->mNextVideoTime + deviation_usec, mData->mNextVideoTime,
-          mData->mLastVideoImageDisplaySize, &endSegment, aPrincipalHandle);
+          mData->mLastVideoImageDisplaySize,
+          tracksStartTimeStamp + TimeDuration::FromMicroseconds(mData->mNextVideoTime + deviation_usec),
+          &endSegment, aPrincipalHandle);
       mData->mNextVideoTime += deviation_usec;
       MOZ_ASSERT(endSegment.GetDuration() > 0);
       if (!aIsSameOrigin) {
         endSegment.ReplaceWithDisabled();
       }
       sourceStream->AppendToTrack(videoTrackId, &endSegment);
     }
     sourceStream->EndTrack(videoTrackId);
--- a/dom/media/moz.build
+++ b/dom/media/moz.build
@@ -122,16 +122,17 @@ EXPORTS += [
     'MediaQueue.h',
     'MediaRecorder.h',
     'MediaResource.h',
     'MediaResourceCallback.h',
     'MediaSegment.h',
     'MediaStatistics.h',
     'MediaStreamGraph.h',
     'MediaStreamListener.h',
+    'MediaStreamVideoSink.h',
     'MediaTimer.h',
     'MediaTrack.h',
     'MediaTrackList.h',
     'MP3Decoder.h',
     'MP3Demuxer.h',
     'MP3FrameParser.h',
     'NextFrameSeekTask.h',
     'nsIDocumentActivity.h',
@@ -234,16 +235,17 @@ UNIFIED_SOURCES += [
     'MediaPrefs.cpp',
     'MediaRecorder.cpp',
     'MediaResource.cpp',
     'MediaShutdownManager.cpp',
     'MediaStreamError.cpp',
     'MediaStreamGraph.cpp',
     'MediaStreamListener.cpp',
     'MediaStreamTrack.cpp',
+    'MediaStreamVideoSink.cpp',
     'MediaTimer.cpp',
     'MediaTrack.cpp',
     'MediaTrackList.cpp',
     'MP3Decoder.cpp',
     'MP3Demuxer.cpp',
     'MP3FrameParser.cpp',
     'NextFrameSeekTask.cpp',
     'QueueObject.cpp',
--- a/dom/media/ogg/OggReader.cpp
+++ b/dom/media/ogg/OggReader.cpp
@@ -20,16 +20,17 @@ extern "C" {
 #include "mozilla/TimeStamp.h"
 #include "VorbisUtils.h"
 #include "MediaMetadataManager.h"
 #include "nsAutoPtr.h"
 #include "nsISeekableStream.h"
 #include "gfx2DGlue.h"
 #include "mozilla/Telemetry.h"
 #include "nsPrintfCString.h"
+#include "VideoFrameContainer.h"
 
 using namespace mozilla::gfx;
 using namespace mozilla::media;
 
 namespace mozilla {
 
 // On B2G estimate the buffered ranges rather than calculating them explicitly.
 // This prevents us doing I/O on the main thread, which is prohibited in B2G.
@@ -875,25 +876,26 @@ nsresult OggReader::DecodeTheora(ogg_pac
   for (uint32_t i=0; i < 3; ++i) {
     b.mPlanes[i].mData = buffer[i].data;
     b.mPlanes[i].mHeight = buffer[i].height;
     b.mPlanes[i].mWidth = buffer[i].width;
     b.mPlanes[i].mStride = buffer[i].stride;
     b.mPlanes[i].mOffset = b.mPlanes[i].mSkip = 0;
   }
 
-  RefPtr<VideoData> v = VideoData::Create(mInfo.mVideo,
-                                            mDecoder->GetImageContainer(),
-                                            mResource.Tell(),
-                                            time,
-                                            endTime - time,
-                                            b,
-                                            isKeyframe,
-                                            aPacket->granulepos,
-                                            mPicture);
+  RefPtr<VideoData> v =
+    VideoData::CreateAndCopyData(mInfo.mVideo,
+                                 mDecoder->GetImageContainer(),
+                                 mResource.Tell(),
+                                 time,
+                                 endTime - time,
+                                 b,
+                                 isKeyframe,
+                                 aPacket->granulepos,
+                                 mPicture);
   if (!v) {
     // There may be other reasons for this error, but for
     // simplicity just assume the worst case: out of memory.
     NS_WARNING("Failed to allocate memory for video frame");
     return NS_ERROR_OUT_OF_MEMORY;
   }
   mVideoQueue.Push(v);
   return NS_OK;
--- a/dom/media/omx/MediaOmxReader.cpp
+++ b/dom/media/omx/MediaOmxReader.cpp
@@ -12,16 +12,17 @@
 #include "VideoUtils.h"
 #include "MediaOmxDecoder.h"
 #include "AbstractMediaDecoder.h"
 #include "AudioChannelService.h"
 #include "OmxDecoder.h"
 #include "MPAPI.h"
 #include "gfx2DGlue.h"
 #include "MediaStreamSource.h"
+#include "VideoFrameContainer.h"
 
 #define MAX_DROPPED_FRAMES 25
 // Try not to spend more than this much time in a single call to DecodeVideoFrame.
 #define MAX_VIDEO_DECODE_SECONDS 0.1
 
 using namespace mozilla::gfx;
 using namespace mozilla::media;
 using namespace android;
@@ -403,35 +404,35 @@ bool MediaOmxReader::DecodeVideoFrame(bo
 
       b.mPlanes[2].mData = static_cast<uint8_t *>(frame.Cr.mData);
       b.mPlanes[2].mStride = frame.Cr.mStride;
       b.mPlanes[2].mHeight = frame.Cr.mHeight;
       b.mPlanes[2].mWidth = frame.Cr.mWidth;
       b.mPlanes[2].mOffset = frame.Cr.mOffset;
       b.mPlanes[2].mSkip = frame.Cr.mSkip;
 
-      v = VideoData::Create(mInfo.mVideo,
-                            mDecoder->GetImageContainer(),
-                            pos,
-                            frame.mTimeUs,
-                            1, // We don't know the duration.
-                            b,
-                            frame.mKeyFrame,
-                            -1,
-                            picture);
+      v = VideoData::CreateAndCopyData(mInfo.mVideo,
+                                       mDecoder->GetImageContainer(),
+                                       pos,
+                                       frame.mTimeUs,
+                                       1, // We don't know the duration.
+                                       b,
+                                       frame.mKeyFrame,
+                                       -1,
+                                       picture);
     } else {
-      v = VideoData::Create(mInfo.mVideo,
-                            mDecoder->GetImageContainer(),
-                            pos,
-                            frame.mTimeUs,
-                            1, // We don't know the duration.
-                            frame.mGraphicBuffer,
-                            frame.mKeyFrame,
-                            -1,
-                            picture);
+      v = VideoData::CreateAndCopyData(mInfo.mVideo,
+                                       mDecoder->GetImageContainer(),
+                                       pos,
+                                       frame.mTimeUs,
+                                       1, // We don't know the duration.
+                                       frame.mGraphicBuffer,
+                                       frame.mKeyFrame,
+                                       -1,
+                                       picture);
     }
 
     if (!v) {
       NS_WARNING("Unable to create VideoData");
       return false;
     }
 
     a.mStats.mDecodedFrames++;
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -158,26 +158,25 @@ public:
     buffer.mPlanes[2].mWidth = mFrameWidth / 2;
     buffer.mPlanes[2].mOffset = 0;
     buffer.mPlanes[2].mSkip = 0;
 
     // Set to color white.
     memset(buffer.mPlanes[0].mData, 255, sizeY);
     memset(buffer.mPlanes[1].mData, 128, sizeCbCr);
 
-    return VideoData::Create(mInfo,
-                             mImageContainer,
-                             nullptr,
-                             aOffsetInStream,
-                             aDTS.ToMicroseconds(),
-                             aDuration.ToMicroseconds(),
-                             buffer,
-                             true,
-                             aDTS.ToMicroseconds(),
-                             mPicture);
+    return VideoData::CreateAndCopyData(mInfo,
+                                        mImageContainer,
+                                        aOffsetInStream,
+                                        aDTS.ToMicroseconds(),
+                                        aDuration.ToMicroseconds(),
+                                        buffer,
+                                        true,
+                                        aDTS.ToMicroseconds(),
+                                        mPicture);
   }
 
 private:
   VideoInfo mInfo;
   gfx::IntRect mPicture;
   uint32_t mFrameWidth;
   uint32_t mFrameHeight;
   RefPtr<layers::ImageContainer> mImageContainer;
--- a/dom/media/platforms/agnostic/TheoraDecoder.cpp
+++ b/dom/media/platforms/agnostic/TheoraDecoder.cpp
@@ -163,26 +163,27 @@ TheoraDecoder::DoDecode(MediaRawData* aS
     b.mPlanes[2].mWidth = mTheoraInfo.frame_width >> hdec;
     b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;
 
     IntRect pictureArea(mTheoraInfo.pic_x, mTheoraInfo.pic_y,
                         mTheoraInfo.pic_width, mTheoraInfo.pic_height);
 
     VideoInfo info;
     info.mDisplay = mInfo.mDisplay;
-    RefPtr<VideoData> v = VideoData::Create(info,
-                                            mImageContainer,
-                                            aSample->mOffset,
-                                            aSample->mTime,
-                                            aSample->mDuration,
-                                            b,
-                                            aSample->mKeyframe,
-                                            aSample->mTimecode,
-                                            mInfo.ScaledImageRect(mTheoraInfo.frame_width,
-                                                                  mTheoraInfo.frame_height));
+    RefPtr<VideoData> v =
+      VideoData::CreateAndCopyData(info,
+                                   mImageContainer,
+                                   aSample->mOffset,
+                                   aSample->mTime,
+                                   aSample->mDuration,
+                                   b,
+                                   aSample->mKeyframe,
+                                   aSample->mTimecode,
+                                   mInfo.ScaledImageRect(mTheoraInfo.frame_width,
+                                                         mTheoraInfo.frame_height));
     if (!v) {
       LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
           mTheoraInfo.frame_width, mTheoraInfo.frame_height, mInfo.mDisplay.width, mInfo.mDisplay.height,
           mInfo.mImage.width, mInfo.mImage.height);
       return -1;
     }
     mCallback->Output(v);
     return 0;
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -155,26 +155,27 @@ VPXDecoder::DoDecode(MediaRawData* aSamp
 
       b.mPlanes[2].mHeight = img->d_h;
       b.mPlanes[2].mWidth = img->d_w;
     } else {
       LOG("VPX Unknown image format");
       return -1;
     }
 
-    RefPtr<VideoData> v = VideoData::Create(mInfo,
-                                            mImageContainer,
-                                            aSample->mOffset,
-                                            aSample->mTime,
-                                            aSample->mDuration,
-                                            b,
-                                            aSample->mKeyframe,
-                                            aSample->mTimecode,
-                                            mInfo.ScaledImageRect(img->d_w,
-                                                                  img->d_h));
+    RefPtr<VideoData> v =
+      VideoData::CreateAndCopyData(mInfo,
+                                   mImageContainer,
+                                   aSample->mOffset,
+                                   aSample->mTime,
+                                   aSample->mDuration,
+                                   b,
+                                   aSample->mKeyframe,
+                                   aSample->mTimecode,
+                                   mInfo.ScaledImageRect(img->d_w,
+                                                         img->d_h));
 
     if (!v) {
       LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
           img->d_w, img->d_h, mInfo.mDisplay.width, mInfo.mDisplay.height,
           mInfo.mImage.width, mInfo.mImage.height);
       return -1;
     }
     mCallback->Output(v);
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
@@ -36,25 +36,26 @@ VideoCallbackAdapter::Decoded(GMPVideoi4
       b.mPlanes[i].mWidth = (decodedFrame->Width() + 1) / 2;
       b.mPlanes[i].mHeight = (decodedFrame->Height() + 1) / 2;
     }
     b.mPlanes[i].mOffset = 0;
     b.mPlanes[i].mSkip = 0;
   }
 
   gfx::IntRect pictureRegion(0, 0, decodedFrame->Width(), decodedFrame->Height());
-  RefPtr<VideoData> v = VideoData::Create(mVideoInfo,
-                                            mImageContainer,
-                                            mLastStreamOffset,
-                                            decodedFrame->Timestamp(),
-                                            decodedFrame->Duration(),
-                                            b,
-                                            false,
-                                            -1,
-                                            pictureRegion);
+  RefPtr<VideoData> v =
+    VideoData::CreateAndCopyData(mVideoInfo,
+                                 mImageContainer,
+                                 mLastStreamOffset,
+                                 decodedFrame->Timestamp(),
+                                 decodedFrame->Duration(),
+                                 b,
+                                 false,
+                                 -1,
+                                 pictureRegion);
   if (v) {
     mCallback->Output(v);
   } else {
     mCallback->Error(MediaDataDecoderError::FATAL_ERROR);
   }
 }
 
 void
--- a/dom/media/platforms/android/AndroidDecoderModule.cpp
+++ b/dom/media/platforms/android/AndroidDecoderModule.cpp
@@ -139,17 +139,16 @@ public:
     int32_t offset;
     NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);
 
     int64_t presentationTimeUs;
     NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
 
     RefPtr<VideoData> v =
       VideoData::CreateFromImage(mConfig,
-                                 mImageContainer,
                                  offset,
                                  presentationTimeUs,
                                  aDuration.ToMicroseconds(),
                                  img,
                                  isSync,
                                  presentationTimeUs,
                                  gfx::IntRect(0, 0,
                                               mConfig.mDisplay.width,
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -361,40 +361,38 @@ AppleVTDecoder::OutputFrame(CVPixelBuffe
     buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
     buffer.mPlanes[2].mWidth = (width+1) / 2;
     buffer.mPlanes[2].mHeight = (height+1) / 2;
     buffer.mPlanes[2].mOffset = 1;
     buffer.mPlanes[2].mSkip = 1;
 
     // Copy the image data into our own format.
     data =
-      VideoData::Create(info,
-                        mImageContainer,
-                        nullptr,
-                        aFrameRef.byte_offset,
-                        aFrameRef.composition_timestamp.ToMicroseconds(),
-                        aFrameRef.duration.ToMicroseconds(),
-                        buffer,
-                        aFrameRef.is_sync_point,
-                        aFrameRef.decode_timestamp.ToMicroseconds(),
-                        visible);
+      VideoData::CreateAndCopyData(info,
+                                   mImageContainer,
+                                   aFrameRef.byte_offset,
+                                   aFrameRef.composition_timestamp.ToMicroseconds(),
+                                   aFrameRef.duration.ToMicroseconds(),
+                                   buffer,
+                                   aFrameRef.is_sync_point,
+                                   aFrameRef.decode_timestamp.ToMicroseconds(),
+                                   visible);
     // Unlock the returned image data.
     CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
   } else {
 #ifndef MOZ_WIDGET_UIKIT
     IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
     MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");
 
     RefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
 
     RefPtr<layers::Image> image = new MacIOSurfaceImage(macSurface);
 
     data =
       VideoData::CreateFromImage(info,
-                                 mImageContainer,
                                  aFrameRef.byte_offset,
                                  aFrameRef.composition_timestamp.ToMicroseconds(),
                                  aFrameRef.duration.ToMicroseconds(),
                                  image.forget(),
                                  aFrameRef.is_sync_point,
                                  aFrameRef.decode_timestamp.ToMicroseconds(),
                                  visible);
 #else
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -284,26 +284,27 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
     if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P) {
       b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = mFrame->width;
       b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = mFrame->height;
     } else {
       b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
       b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
     }
 
-    RefPtr<VideoData> v = VideoData::Create(mInfo,
-                                            mImageContainer,
-                                            aSample->mOffset,
-                                            pts,
-                                            duration,
-                                            b,
-                                            !!mFrame->key_frame,
-                                            -1,
-                                            mInfo.ScaledImageRect(mFrame->width,
-                                                                  mFrame->height));
+    RefPtr<VideoData> v =
+      VideoData::CreateAndCopyData(mInfo,
+                                   mImageContainer,
+                                   aSample->mOffset,
+                                   pts,
+                                   duration,
+                                   b,
+                                   !!mFrame->key_frame,
+                                   -1,
+                                   mInfo.ScaledImageRect(mFrame->width,
+                                                         mFrame->height));
 
     if (!v) {
       NS_WARNING("image allocation error.");
       return DecodeResult::FATAL_ERROR;
     }
     mCallback->Output(v);
     return DecodeResult::DECODE_FRAME;
   }
--- a/dom/media/platforms/gonk/GonkVideoDecoderManager.cpp
+++ b/dom/media/platforms/gonk/GonkVideoDecoderManager.cpp
@@ -420,26 +420,26 @@ GonkVideoDecoderManager::CreateVideoData
 
     CopyGraphicBuffer(srcBuffer, destBuffer);
   } else {
     textureClient = mNativeWindow->getTextureClientFromBuffer(srcBuffer.get());
     textureClient->SetRecycleCallback(GonkVideoDecoderManager::RecycleCallback, this);
     static_cast<GrallocTextureData*>(textureClient->GetInternalData())->SetMediaBuffer(aSource);
   }
 
-  RefPtr<VideoData> data = VideoData::Create(mConfig,
-                                             mImageContainer,
-                                             0, // Filled later by caller.
-                                             0, // Filled later by caller.
-                                             1, // No way to pass sample duration from muxer to
-                                                // OMX codec, so we hardcode the duration here.
-                                             textureClient,
-                                             false, // Filled later by caller.
-                                             -1,
-                                             aPicture);
+  RefPtr<VideoData> data =
+    VideoData::CreateAndCopyIntoTextureClient(mConfig,
+                                              0, // Filled later by caller.
+                                              0, // Filled later by caller.
+                                              1, // No way to pass sample duration from muxer to
+                                                 // OMX codec, so we hardcode the duration here.
+                                              textureClient,
+                                              false, // Filled later by caller.
+                                              -1,
+                                              aPicture);
   return data.forget();
 }
 
 already_AddRefed<VideoData>
 GonkVideoDecoderManager::CreateVideoDataFromDataBuffer(MediaBuffer* aSource, gfx::IntRect& aPicture)
 {
   if (!aSource->data()) {
     GVDM_LOG("No data in Video Buffer!");
@@ -489,25 +489,26 @@ GonkVideoDecoderManager::CreateVideoData
 
   b.mPlanes[2].mData = yuv420p_v;
   b.mPlanes[2].mWidth =(mFrameInfo.mWidth + 1) / 2;
   b.mPlanes[2].mHeight = (mFrameInfo.mHeight + 1) / 2;
   b.mPlanes[2].mStride = (stride + 1) / 2;
   b.mPlanes[2].mOffset = 0;
   b.mPlanes[2].mSkip = 0;
 
-  RefPtr<VideoData> data = VideoData::Create(mConfig,
-                                             mImageContainer,
-                                             0, // Filled later by caller.
-                                             0, // Filled later by caller.
-                                             1, // We don't know the duration.
-                                             b,
-                                             0, // Filled later by caller.
-                                             -1,
-                                             aPicture);
+  RefPtr<VideoData> data =
+    VideoData::CreateAndCopyData(mConfig,
+                                 mImageContainer,
+                                 0, // Filled later by caller.
+                                 0, // Filled later by caller.
+                                 1, // We don't know the duration.
+                                 b,
+                                 0, // Filled later by caller.
+                                 -1,
+                                 aPicture);
 
   return data.forget();
 }
 
 bool
 GonkVideoDecoderManager::SetVideoFormat()
 {
   // read video metadata from MediaCodec
--- a/dom/media/platforms/omx/GonkOmxPlatformLayer.cpp
+++ b/dom/media/platforms/omx/GonkOmxPlatformLayer.cpp
@@ -304,25 +304,25 @@ GonkBufferData::GetPlatformMediaData()
   }
 
   if (!mTextureClientRecycleHandler) {
     // There is no GraphicBuffer, it should fallback to normal YUV420 VideoData.
     return nullptr;
   }
 
   VideoInfo info(*mGonkPlatformLayer->GetTrackInfo()->GetAsVideoInfo());
-  RefPtr<VideoData> data = VideoData::Create(info,
-                                             mGonkPlatformLayer->GetImageContainer(),
-                                             0,
-                                             mBuffer->nTimeStamp,
-                                             1,
-                                             mTextureClientRecycleHandler->GetTextureClient(),
-                                             false,
-                                             0,
-                                             info.ImageRect());
+  RefPtr<VideoData> data =
+    VideoData::CreateAndCopyIntoTextureClient(info,
+                                              0,
+                                              mBuffer->nTimeStamp,
+                                              1,
+                                              mTextureClientRecycleHandler->GetTextureClient(),
+                                              false,
+                                              0,
+                                              info.ImageRect());
   LOG("%p, disp width %d, height %d, pic width %d, height %d, time %ld",
       this, info.mDisplay.width, info.mDisplay.height,
       info.mImage.width, info.mImage.height, mBuffer->nTimeStamp);
 
   // Get TextureClient Promise here to wait for resolved.
   RefPtr<GonkBufferData> self(this);
   mTextureClientRecycleHandler->WaitforRecycle()
     ->Then(mGonkPlatformLayer->GetTaskQueue(), __func__,
--- a/dom/media/platforms/omx/OmxDataDecoder.cpp
+++ b/dom/media/platforms/omx/OmxDataDecoder.cpp
@@ -1019,25 +1019,26 @@ MediaDataHelper::CreateYUV420VideoData(B
   b.mPlanes[2].mData = yuv420p_v;
   b.mPlanes[2].mWidth =(width + 1) / 2;
   b.mPlanes[2].mHeight = (height + 1) / 2;
   b.mPlanes[2].mStride = (stride + 1) / 2;
   b.mPlanes[2].mOffset = 0;
   b.mPlanes[2].mSkip = 0;
 
   VideoInfo info(*mTrackInfo->GetAsVideoInfo());
-  RefPtr<VideoData> data = VideoData::Create(info,
-                                             mImageContainer,
-                                             0, // Filled later by caller.
-                                             0, // Filled later by caller.
-                                             1, // We don't know the duration.
-                                             b,
-                                             0, // Filled later by caller.
-                                             -1,
-                                             info.ImageRect());
+  RefPtr<VideoData> data =
+    VideoData::CreateAndCopyData(info,
+                                 mImageContainer,
+                                 0, // Filled later by caller.
+                                 0, // Filled later by caller.
+                                 1, // We don't know the duration.
+                                 b,
+                                 0, // Filled later by caller.
+                                 -1,
+                                 info.ImageRect());
 
   LOG("YUV420 VideoData: disp width %d, height %d, pic width %d, height %d, time %ld",
       info.mDisplay.width, info.mDisplay.height, info.mImage.width,
       info.mImage.height, aBufferData->mBuffer->nTimeStamp);
 
   return data.forget();
 }
 
--- a/dom/media/platforms/wmf/DXVA2Manager.cpp
+++ b/dom/media/platforms/wmf/DXVA2Manager.cpp
@@ -93,17 +93,16 @@ public:
   HRESULT Init(nsACString& aFailureReason);
 
   IUnknown* GetDXVADeviceManager() override;
 
   // Copies a region (aRegion) of the video frame stored in aVideoSample
   // into an image which is returned by aOutImage.
   HRESULT CopyToImage(IMFSample* aVideoSample,
                       const nsIntRect& aRegion,
-                      ImageContainer* aContainer,
                       Image** aOutImage) override;
 
   bool SupportsConfig(IMFMediaType* aType, float aFramerate) override;
 
 private:
   RefPtr<IDirect3D9Ex> mD3D9;
   RefPtr<IDirect3DDevice9Ex> mDevice;
   RefPtr<IDirect3DDeviceManager9> mDeviceManager;
@@ -434,17 +433,16 @@ D3D9DXVA2Manager::Init(nsACString& aFail
   reporter.SetSuccessful();
 
   return S_OK;
 }
 
 HRESULT
 D3D9DXVA2Manager::CopyToImage(IMFSample* aSample,
                               const nsIntRect& aRegion,
-                              ImageContainer* aImageContainer,
                               Image** aOutImage)
 {
   RefPtr<IMFMediaBuffer> buffer;
   HRESULT hr = aSample->GetBufferByIndex(0, getter_AddRefs(buffer));
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   RefPtr<IDirect3DSurface9> surface;
   hr = wmf::MFGetService(buffer,
@@ -514,17 +512,16 @@ public:
   HRESULT Init(nsACString& aFailureReason);
 
   IUnknown* GetDXVADeviceManager() override;
 
   // Copies a region (aRegion) of the video frame stored in aVideoSample
   // into an image which is returned by aOutImage.
   HRESULT CopyToImage(IMFSample* aVideoSample,
                       const nsIntRect& aRegion,
-                      ImageContainer* aContainer,
                       Image** aOutImage) override;
 
   HRESULT ConfigureForSize(uint32_t aWidth, uint32_t aHeight) override;
 
   bool IsD3D11() override { return true; }
 
   bool SupportsConfig(IMFMediaType* aType, float aFramerate) override;
 
@@ -777,21 +774,19 @@ D3D11DXVA2Manager::CreateOutputSample(Re
 
   aSample = sample;
   return S_OK;
 }
 
 HRESULT
 D3D11DXVA2Manager::CopyToImage(IMFSample* aVideoSample,
                                const nsIntRect& aRegion,
-                               ImageContainer* aContainer,
                                Image** aOutImage)
 {
   NS_ENSURE_TRUE(aVideoSample, E_POINTER);
-  NS_ENSURE_TRUE(aContainer, E_POINTER);
   NS_ENSURE_TRUE(aOutImage, E_POINTER);
 
   // Our video frame is stored in a non-sharable ID3D11Texture2D. We need
   // to create a copy of that frame as a sharable resource, save its share
   // handle, and put that handle into the rendering pipeline.
 
   RefPtr<D3D11ShareHandleImage> image =
     new D3D11ShareHandleImage(gfx::IntSize(mWidth, mHeight), aRegion);
--- a/dom/media/platforms/wmf/DXVA2Manager.h
+++ b/dom/media/platforms/wmf/DXVA2Manager.h
@@ -30,17 +30,16 @@ public:
   // device we're using for hardware accelerated video decoding. If we're using
   // D3D9Ex, this is an IDirect3DDeviceManager9. For D3D11 this is an
   // IMFDXGIDeviceManager. It is safe to call this on any thread.
   virtual IUnknown* GetDXVADeviceManager() = 0;
 
   // Creates an Image for the video frame stored in aVideoSample.
   virtual HRESULT CopyToImage(IMFSample* aVideoSample,
                               const nsIntRect& aRegion,
-                              layers::ImageContainer* aContainer,
                               layers::Image** aOutImage) = 0;
 
   virtual HRESULT ConfigureForSize(uint32_t aWidth, uint32_t aHeight) { return S_OK; }
 
   virtual bool IsD3D11() { return false; }
 
   virtual ~DXVA2Manager();
 
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -706,25 +706,26 @@ WMFVideoMFTManager::CreateBasicVideoFram
   media::TimeUnit pts = GetSampleTime(aSample);
   NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
   media::TimeUnit duration = GetSampleDuration(aSample);
   NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
   nsIntRect pictureRegion = mVideoInfo.ScaledImageRect(videoWidth, videoHeight);
 
   if (mLayersBackend != LayersBackend::LAYERS_D3D9 &&
       mLayersBackend != LayersBackend::LAYERS_D3D11) {
-    RefPtr<VideoData> v = VideoData::Create(mVideoInfo,
-                                            mImageContainer,
-                                            aStreamOffset,
-                                            pts.ToMicroseconds(),
-                                            duration.ToMicroseconds(),
-                                            b,
-                                            false,
-                                            -1,
-                                            pictureRegion);
+    RefPtr<VideoData> v =
+      VideoData::CreateAndCopyData(mVideoInfo,
+                                   mImageContainer,
+                                   aStreamOffset,
+                                   pts.ToMicroseconds(),
+                                   duration.ToMicroseconds(),
+                                   b,
+                                   false,
+                                   -1,
+                                   pictureRegion);
     if (twoDBuffer) {
       twoDBuffer->Unlock2D();
     } else {
       buffer->Unlock();
     }
     v.forget(aOutVideoData);
     return S_OK;
   }
@@ -735,17 +736,16 @@ WMFVideoMFTManager::CreateBasicVideoFram
   VideoData::SetVideoDataToImage(image,
                                  mVideoInfo,
                                  b,
                                  pictureRegion,
                                  false);
 
   RefPtr<VideoData> v =
     VideoData::CreateFromImage(mVideoInfo,
-                               mImageContainer,
                                aStreamOffset,
                                pts.ToMicroseconds(),
                                duration.ToMicroseconds(),
                                image.forget(),
                                false,
                                -1,
                                pictureRegion);
 
@@ -766,27 +766,25 @@ WMFVideoMFTManager::CreateD3DVideoFrame(
   *aOutVideoData = nullptr;
   HRESULT hr;
 
   nsIntRect pictureRegion =
     mVideoInfo.ScaledImageRect(mImageSize.width, mImageSize.height);
   RefPtr<Image> image;
   hr = mDXVA2Manager->CopyToImage(aSample,
                                   pictureRegion,
-                                  mImageContainer,
                                   getter_AddRefs(image));
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
   NS_ENSURE_TRUE(image, E_FAIL);
 
   media::TimeUnit pts = GetSampleTime(aSample);
   NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
   media::TimeUnit duration = GetSampleDuration(aSample);
   NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
   RefPtr<VideoData> v = VideoData::CreateFromImage(mVideoInfo,
-                                                   mImageContainer,
                                                    aStreamOffset,
                                                    pts.ToMicroseconds(),
                                                    duration.ToMicroseconds(),
                                                    image.forget(),
                                                    false,
                                                    -1,
                                                    pictureRegion);
 
--- a/dom/media/raw/RawReader.cpp
+++ b/dom/media/raw/RawReader.cpp
@@ -181,25 +181,26 @@ bool RawReader::DecodeVideoFrame(bool &a
   b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;
 
   b.mPlanes[2].mData = b.mPlanes[1].mData + mMetadata.frameHeight * cbcrStride / 2;
   b.mPlanes[2].mStride = cbcrStride;
   b.mPlanes[2].mHeight = mMetadata.frameHeight / 2;
   b.mPlanes[2].mWidth = mMetadata.frameWidth / 2;
   b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;
 
-  RefPtr<VideoData> v = VideoData::Create(mInfo.mVideo,
-                                            mDecoder->GetImageContainer(),
-                                            -1,
-                                            currentFrameTime,
-                                            (USECS_PER_S / mFrameRate),
-                                            b,
-                                            1, // In raw video every frame is a keyframe
-                                            -1,
-                                            mPicture);
+  RefPtr<VideoData> v =
+    VideoData::CreateAndCopyData(mInfo.mVideo,
+                                 mDecoder->GetImageContainer(),
+                                 -1,
+                                 currentFrameTime,
+                                 (USECS_PER_S / mFrameRate),
+                                 b,
+                                 1, // In raw video every frame is a keyframe
+                                 -1,
+                                 mPicture);
   if (!v)
     return false;
 
   mVideoQueue.Push(v);
   mCurrentFrame++;
   a.mStats.mDecodedFrames++;
 
   return true;
--- a/dom/media/webaudio/test/browser_bug1181073.js
+++ b/dom/media/webaudio/test/browser_bug1181073.js
@@ -1,76 +1,40 @@
 add_task(function*() {
   // Make the min_background_timeout_value very high to avoid problems on slow machines
   yield new Promise(resolve => SpecialPowers.pushPrefEnv({
     'set': [['dom.min_background_timeout_value', 3000]]
   }, resolve));
 
-  let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, "https://example.com");
-  let browser = gBrowser.selectedBrowser;
+  // Make a new tab, and put it in the background
+  yield BrowserTestUtils.withNewTab("about:blank", function*(browser) {
+    yield BrowserTestUtils.withNewTab("about:blank", function*() {
+      let time = yield ContentTask.spawn(browser, null, function () {
+        return new Promise(resolve => {
+          let start = content.performance.now();
+          let id = content.window.setInterval(function() {
+            let end = content.performance.now();
+            content.window.clearInterval(id);
+            resolve(end - start);
+          }, 0);
+        });
+      });
 
-  // Make the tab a background tab, so that setInterval will be throttled.
-  yield BrowserTestUtils.openNewForegroundTab(gBrowser);
+      ok(time > 2000, "Interval is throttled with no webaudio (" + time + " ms)");
 
-  let time = yield ContentTask.spawn(browser, null, function () {
-    return new Promise(resolve => {
-      let start = content.performance.now();
-      let id = content.window.setInterval(function() {
-        let end = content.performance.now();
-        content.window.clearInterval(id);
-        resolve(end - start);
-      }, 0);
-    });
-  });
+      time = yield ContentTask.spawn(browser, null, function () {
+        return new Promise(resolve => {
+          // Create an audio context, and save it on the window so it doesn't get GCed
+          content.window._audioCtx = new content.window.AudioContext();
 
-  ok(time > 2000, "Interval is throttled with no webaudio (" + time + " ms)");
+          let start = content.performance.now();
+          let id = content.window.setInterval(function() {
+            let end = content.performance.now();
+            content.window.clearInterval(id);
+            resolve(end - start);
+          }, 0);
+        });
+      });
 
-  // Set up a listener for the oscillator's demise
-  let oscillatorDemisePromise = ContentTask.spawn(browser, null, function() {
-    return new Promise(resolve => {
-      let observer = () => resolve();
-      // Record the observer on the content object so we can throw it out later
-      content.__bug1181073_observer = observer;
-      Services.obs.addObserver(observer, "webaudio-node-demise", false);
+      ok(time < 1000, "Interval is not throttled with an audio context present (" + time + " ms)");
     });
   });
-
-  time = yield ContentTask.spawn(browser, null, function () {
-    return new Promise(resolve => {
-      // Start playing audio, save it on the window so it doesn't get GCed
-      let audioCtx = content.window.audioCtx = new content.window.AudioContext();
-      let oscillator = audioCtx.createOscillator();
-      oscillator.type = 'square';
-      oscillator.frequency.value = 3000;
-      oscillator.start();
-
-      let start = content.performance.now();
-      let id = content.window.setInterval(function() {
-        let end = content.performance.now();
-        content.window.clearInterval(id);
-        oscillator.stop();
-        resolve(end - start);
-      }, 0);
-    });
-  });
-
-  ok(time < 1000, "Interval is not throttled with audio playing (" + time + " ms)");
-
-  // Destroy the oscillator, but not the audio context
-  yield new Promise(resolve => SpecialPowers.exactGC(resolve));
-  yield oscillatorDemisePromise;
-
-  time = yield ContentTask.spawn(browser, null, function () {
-    return new Promise(resolve => {
-      let start = content.performance.now();
-      let id = content.window.setInterval(function() {
-        let end = content.performance.now();
-        content.window.clearInterval(id);
-        resolve(end - start);
-      }, 0);
-    });
-  });
-
-  ok(time > 2000, "Interval is throttled with audio stopped (" + time + " ms)");
-
-  while (gBrowser.tabs.length > 1)
-    gBrowser.removeCurrentTab();
 });
--- a/dom/media/webrtc/MediaEngineWebRTC.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTC.cpp
@@ -103,17 +103,16 @@ void AudioInputCubeb::UpdateDeviceList()
   }
   mDevices = devices;
 }
 
 MediaEngineWebRTC::MediaEngineWebRTC(MediaEnginePrefs &aPrefs)
   : mMutex("mozilla::MediaEngineWebRTC"),
     mVoiceEngine(nullptr),
     mAudioInput(nullptr),
-    mAudioEngineInit(false),
     mFullDuplex(aPrefs.mFullDuplex),
     mExtendedFilter(aPrefs.mExtendedFilter),
     mDelayAgnostic(aPrefs.mDelayAgnostic)
 {
 #ifndef MOZ_B2G_CAMERA
   nsCOMPtr<nsIComponentRegistrar> compMgr;
   NS_GetComponentRegistrar(getter_AddRefs(compMgr));
   if (compMgr) {
@@ -344,21 +343,22 @@ MediaEngineWebRTC::EnumerateAudioDevices
     }
   }
 
   ptrVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
   if (!ptrVoEBase) {
     return;
   }
 
-  if (!mAudioEngineInit) {
-    if (ptrVoEBase->Init() < 0) {
-      return;
-    }
-    mAudioEngineInit = true;
+  // Always re-init the voice engine, since if we close the last use we
+  // DeInitEngine() and Terminate(), which shuts down Process() - but means
+  // we have to Init() again before using it.  Init() when already inited is
+  // just a no-op, so call always.
+  if (ptrVoEBase->Init() < 0) {
+    return;
   }
 
   if (!mAudioInput) {
     if (SupportsDuplex()) {
       // The platform_supports_full_duplex.
       mAudioInput = new mozilla::AudioInputCubeb(mVoiceEngine);
     } else {
       mAudioInput = new mozilla::AudioInputWebRTC(mVoiceEngine);
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -594,17 +594,16 @@ private:
 
   nsCOMPtr<nsIThread> mThread;
 
   // gUM runnables can e.g. Enumerate from multiple threads
   Mutex mMutex;
   webrtc::VoiceEngine* mVoiceEngine;
   webrtc::Config mConfig;
   RefPtr<mozilla::AudioInput> mAudioInput;
-  bool mAudioEngineInit;
   bool mFullDuplex;
   bool mExtendedFilter;
   bool mDelayAgnostic;
   bool mHasTabVideoSource;
 
   // Store devices we've already seen in a hashtable for quick return.
   // Maps UUID to MediaEngineSource (one set for audio, one for video).
   nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources;
--- a/dom/plugins/base/nsNPAPIPluginInstance.cpp
+++ b/dom/plugins/base/nsNPAPIPluginInstance.cpp
@@ -908,17 +908,17 @@ void* nsNPAPIPluginInstance::AcquireCont
 {
   if (!mContentSurface) {
     mContentSurface = CreateSurfaceTexture();
 
     if (!mContentSurface)
       return nullptr;
   }
 
-  return mContentSurface->NativeWindow()->Handle();
+  return mContentSurface->NativeWindow();
 }
 
 AndroidSurfaceTexture*
 nsNPAPIPluginInstance::AsSurfaceTexture()
 {
   if (!mContentSurface)
     return nullptr;
 
@@ -929,17 +929,17 @@ void* nsNPAPIPluginInstance::AcquireVide
 {
   RefPtr<AndroidSurfaceTexture> surface = CreateSurfaceTexture();
   if (!surface) {
     return nullptr;
   }
 
   VideoInfo* info = new VideoInfo(surface);
 
-  void* window = info->mSurfaceTexture->NativeWindow()->Handle();
+  void* window = info->mSurfaceTexture->NativeWindow();
   mVideos.insert(std::pair<void*, VideoInfo*>(window, info));
 
   return window;
 }
 
 void nsNPAPIPluginInstance::ReleaseVideoWindow(void* window)
 {
   std::map<void*, VideoInfo*>::iterator it = mVideos.find(window);
--- a/dom/presentation/PresentationCallbacks.cpp
+++ b/dom/presentation/PresentationCallbacks.cpp
@@ -172,54 +172,56 @@ PresentationResponderLoadingCallback::In
   if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
   }
 
   if ((busyFlags == nsIDocShell::BUSY_FLAGS_NONE) ||
       (busyFlags & nsIDocShell::BUSY_FLAGS_PAGE_LOADING)) {
     // The docshell has finished loading or is receiving data (|STATE_TRANSFERRING|
     // has already been fired), so the page is ready for presentation use.
-    return NotifyReceiverReady();
+    return NotifyReceiverReady(/* isLoading = */ true);
   }
 
   // Start to listen to document state change event |STATE_TRANSFERRING|.
   return mProgress->AddProgressListener(this, nsIWebProgress::NOTIFY_STATE_DOCUMENT);
 }
 
 nsresult
-PresentationResponderLoadingCallback::NotifyReceiverReady()
+PresentationResponderLoadingCallback::NotifyReceiverReady(bool aIsLoading)
 {
   nsCOMPtr<nsPIDOMWindowOuter> window = do_GetInterface(mProgress);
   if (NS_WARN_IF(!window || !window->GetCurrentInnerWindow())) {
     return NS_ERROR_NOT_AVAILABLE;
   }
   uint64_t windowId = window->GetCurrentInnerWindow()->WindowID();
 
   nsCOMPtr<nsIPresentationService> service =
     do_GetService(PRESENTATION_SERVICE_CONTRACTID);
   if (NS_WARN_IF(!service)) {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
-  return service->NotifyReceiverReady(mSessionId, windowId);
+  return service->NotifyReceiverReady(mSessionId, windowId, aIsLoading);
 }
 
 // nsIWebProgressListener
 NS_IMETHODIMP
 PresentationResponderLoadingCallback::OnStateChange(nsIWebProgress* aWebProgress,
                                                     nsIRequest* aRequest,
                                                     uint32_t aStateFlags,
                                                     nsresult aStatus)
 {
   MOZ_ASSERT(NS_IsMainThread());
 
-  if (aStateFlags & nsIWebProgressListener::STATE_TRANSFERRING) {
+  if (aStateFlags & (nsIWebProgressListener::STATE_TRANSFERRING |
+                     nsIWebProgressListener::STATE_STOP)) {
     mProgress->RemoveProgressListener(this);
 
-    return NotifyReceiverReady();
+    bool isLoading = aStateFlags & nsIWebProgressListener::STATE_TRANSFERRING;
+    return NotifyReceiverReady(isLoading);
   }
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
 PresentationResponderLoadingCallback::OnProgressChange(nsIWebProgress* aWebProgress,
                                                        nsIRequest* aRequest,
--- a/dom/presentation/PresentationCallbacks.h
+++ b/dom/presentation/PresentationCallbacks.h
@@ -71,17 +71,17 @@ public:
 
   explicit PresentationResponderLoadingCallback(const nsAString& aSessionId);
 
   nsresult Init(nsIDocShell* aDocShell);
 
 private:
   ~PresentationResponderLoadingCallback();
 
-  nsresult NotifyReceiverReady();
+  nsresult NotifyReceiverReady(bool aIsLoading);
 
   nsString mSessionId;
   nsCOMPtr<nsIWebProgress> mProgress;
 };
 
 } // namespace dom
 } // namespace mozilla
 
--- a/dom/presentation/PresentationService.cpp
+++ b/dom/presentation/PresentationService.cpp
@@ -884,26 +884,31 @@ NS_IMETHODIMP
 PresentationService::GetExistentSessionIdAtLaunch(uint64_t aWindowId,
                                                   nsAString& aSessionId)
 {
   return GetExistentSessionIdAtLaunchInternal(aWindowId, aSessionId);
 }
 
 NS_IMETHODIMP
 PresentationService::NotifyReceiverReady(const nsAString& aSessionId,
-                                         uint64_t aWindowId)
+                                         uint64_t aWindowId,
+                                         bool aIsLoading)
 {
   RefPtr<PresentationSessionInfo> info =
     GetSessionInfo(aSessionId, nsIPresentationService::ROLE_RECEIVER);
   if (NS_WARN_IF(!info)) {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
   AddRespondingSessionId(aWindowId, aSessionId);
 
+  if (!aIsLoading) {
+    return static_cast<PresentationPresentingInfo*>(info.get())->NotifyResponderFailure();
+  }
+
   nsCOMPtr<nsIPresentationRespondingListener> listener;
   if (mRespondingListeners.Get(aWindowId, getter_AddRefs(listener))) {
     nsresult rv = listener->NotifySessionConnect(aWindowId, aSessionId);
     if (NS_WARN_IF(NS_FAILED(rv))) {
       return rv;
     }
   }
 
--- a/dom/presentation/PresentationSessionInfo.cpp
+++ b/dom/presentation/PresentationSessionInfo.cpp
@@ -853,18 +853,20 @@ PresentationControllingInfo::NotifyDisco
   }
 
   // Unset control channel here so it won't try to re-close it in potential
   // subsequent |Shutdown| calls.
   SetControlChannel(nullptr);
 
   if (NS_WARN_IF(NS_FAILED(aReason) || !mIsResponderReady)) {
     // The presentation session instance may already exist.
-    // Change the state to TERMINATED since it never succeeds.
-    SetStateWithReason(nsIPresentationSessionListener::STATE_TERMINATED, aReason);
+    // Change the state to CLOSED if it is not terminated.
+    if (nsIPresentationSessionListener::STATE_TERMINATED != mState) {
+      SetStateWithReason(nsIPresentationSessionListener::STATE_CLOSED, aReason);
+    }
 
     // Reply error for an abnormal close.
     return ReplyError(NS_ERROR_DOM_OPERATION_ERR);
   }
 
   return NS_OK;
 }
 
@@ -1208,16 +1210,27 @@ PresentationPresentingInfo::NotifyRespon
     if (NS_WARN_IF(NS_FAILED(rv))) {
       return ReplyError(NS_ERROR_DOM_OPERATION_ERR);
     }
   }
 
   return NS_OK;
 }
 
+nsresult
+PresentationPresentingInfo::NotifyResponderFailure()
+{
+  if (mTimer) {
+    mTimer->Cancel();
+    mTimer = nullptr;
+  }
+
+  return ReplyError(NS_ERROR_DOM_OPERATION_ERR);
+}
+
 // nsIPresentationControlChannelListener
 NS_IMETHODIMP
 PresentationPresentingInfo::OnOffer(nsIPresentationChannelDescription* aDescription)
 {
   if (NS_WARN_IF(mHasFlushPendingEvents)) {
     return ReplyError(NS_ERROR_DOM_OPERATION_ERR);
   }
 
--- a/dom/presentation/PresentationSessionInfo.h
+++ b/dom/presentation/PresentationSessionInfo.h
@@ -225,16 +225,17 @@ public:
   {
     MOZ_ASSERT(aDevice);
     SetDevice(aDevice);
   }
 
   nsresult Init(nsIPresentationControlChannel* aControlChannel) override;
 
   nsresult NotifyResponderReady();
+  nsresult NotifyResponderFailure();
 
   NS_IMETHODIMP OnSessionTransport(nsIPresentationSessionTransport* transport) override;
 
   void ResolvedCallback(JSContext* aCx, JS::Handle<JS::Value> aValue) override;
 
   void RejectedCallback(JSContext* aCx, JS::Handle<JS::Value> aValue) override;
 
   void SetPromise(Promise* aPromise)
--- a/dom/presentation/interfaces/nsIPresentationService.idl
+++ b/dom/presentation/interfaces/nsIPresentationService.idl
@@ -169,24 +169,24 @@ interface nsIPresentationService : nsISu
    *
    * @param windowId: The inner window ID used to look up the session ID.
    */
   DOMString getExistentSessionIdAtLaunch(in unsigned long long windowId);
 
   /*
    * Notify the receiver page is ready for presentation use.
    *
-   * @param sessionId: An ID to identify presentation session.
-   * @param windowId: The inner window ID associated with the presentation
-   *                  session. (0 implies no window ID since no actual window
-   *                  uses 0 as its ID. Generally it's the case the window is
-   *                  located in different process from this service)
+   * @param sessionId An ID to identify presentation session.
+   * @param windowId  The inner window ID associated with the presentation
+   *                  session.
+   * @param isLoading true if receiver page is loading successfully.
    */
   void notifyReceiverReady(in DOMString sessionId,
-                           [optional] in unsigned long long windowId);
+                           in unsigned long long windowId,
+                           in boolean isLoading);
 
   /*
    * Notify the transport is closed
    *
    * @param sessionId: An ID to identify presentation session.
    * @param role: Identify the function called by controller or receiver.
    * @param reason: the error message. NS_OK indicates it is closed normally.
    */
--- a/dom/presentation/ipc/PPresentation.ipdl
+++ b/dom/presentation/ipc/PPresentation.ipdl
@@ -90,14 +90,14 @@ parent:
   async RegisterSessionHandler(nsString aSessionId, uint8_t aRole);
   async UnregisterSessionHandler(nsString aSessionId, uint8_t aRole);
 
   async RegisterRespondingHandler(uint64_t aWindowId);
   async UnregisterRespondingHandler(uint64_t aWindowId);
 
   async PPresentationRequest(PresentationIPCRequest aRequest);
 
-  async NotifyReceiverReady(nsString aSessionId, uint64_t aWindowId);
+  async NotifyReceiverReady(nsString aSessionId, uint64_t aWindowId, bool aIsLoading);
   async NotifyTransportClosed(nsString aSessionId, uint8_t aRole, nsresult aReason);
 };
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/presentation/ipc/PresentationIPCService.cpp
+++ b/dom/presentation/ipc/PresentationIPCService.cpp
@@ -359,30 +359,32 @@ NS_IMETHODIMP
 PresentationIPCService::GetExistentSessionIdAtLaunch(uint64_t aWindowId,
                                                      nsAString& aSessionId)
 {
   return GetExistentSessionIdAtLaunchInternal(aWindowId, aSessionId);;
 }
 
 NS_IMETHODIMP
 PresentationIPCService::NotifyReceiverReady(const nsAString& aSessionId,
-                                            uint64_t aWindowId)
+                                            uint64_t aWindowId,
+                                            bool aIsLoading)
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   // No actual window uses 0 as its ID.
   if (NS_WARN_IF(aWindowId == 0)) {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
   // Track the responding info for an OOP receiver page.
   AddRespondingSessionId(aWindowId, aSessionId);
 
   NS_WARN_IF(!sPresentationChild->SendNotifyReceiverReady(nsString(aSessionId),
-                                                          aWindowId));
+                                                          aWindowId,
+                                                          aIsLoading));
 
   // Release mCallback after using aSessionId
   // because aSessionId is held by mCallback.
   mCallback = nullptr;
   return NS_OK;
 }
 
 NS_IMETHODIMP
--- a/dom/presentation/ipc/PresentationParent.cpp
+++ b/dom/presentation/ipc/PresentationParent.cpp
@@ -275,22 +275,25 @@ PresentationParent::NotifySessionConnect
                  !SendNotifySessionConnect(aWindowId, nsString(aSessionId)))) {
     return NS_ERROR_FAILURE;
   }
   return NS_OK;
 }
 
 bool
 PresentationParent::RecvNotifyReceiverReady(const nsString& aSessionId,
-                                            const uint64_t& aWindowId)
+                                            const uint64_t& aWindowId,
+                                            const bool& aIsLoading)
 {
   MOZ_ASSERT(mService);
 
   RegisterTransportBuilder(aSessionId, nsIPresentationService::ROLE_RECEIVER);
-  NS_WARN_IF(NS_FAILED(mService->NotifyReceiverReady(aSessionId, aWindowId)));
+  NS_WARN_IF(NS_FAILED(mService->NotifyReceiverReady(aSessionId,
+                                                     aWindowId,
+                                                     aIsLoading)));
   return true;
 }
 
 bool
 PresentationParent::RecvNotifyTransportClosed(const nsString& aSessionId,
                                               const uint8_t& aRole,
                                               const nsresult& aReason)
 {
--- a/dom/presentation/ipc/PresentationParent.h
+++ b/dom/presentation/ipc/PresentationParent.h
@@ -66,17 +66,18 @@ public:
   virtual bool RecvUnregisterSessionHandler(const nsString& aSessionId,
                                             const uint8_t& aRole) override;
 
   virtual bool RecvRegisterRespondingHandler(const uint64_t& aWindowId) override;
 
   virtual bool RecvUnregisterRespondingHandler(const uint64_t& aWindowId) override;
 
   virtual bool RecvNotifyReceiverReady(const nsString& aSessionId,
-                                       const uint64_t& aWindowId) override;
+                                       const uint64_t& aWindowId,
+                                       const bool& aIsLoading) override;
 
   virtual bool RecvNotifyTransportClosed(const nsString& aSessionId,
                                          const uint8_t& aRole,
                                          const nsresult& aReason) override;
 
 private:
   virtual ~PresentationParent();
 
new file mode 100644
--- /dev/null
+++ b/dom/presentation/tests/mochitest/file_presentation_unknown_content_type.test
@@ -0,0 +1,1 @@
+
new file mode 100644
--- /dev/null
+++ b/dom/presentation/tests/mochitest/file_presentation_unknown_content_type.test^headers^
@@ -0,0 +1,1 @@
+Content-Type: application/unknown
--- a/dom/presentation/tests/mochitest/mochitest.ini
+++ b/dom/presentation/tests/mochitest/mochitest.ini
@@ -15,16 +15,19 @@ support-files =
   file_presentation_receiver_auxiliary_navigation.html
   test_presentation_receiver_auxiliary_navigation.js
   file_presentation_sandboxed_presentation.html
   file_presentation_terminate.html
   test_presentation_terminate.js
   file_presentation_terminate_establish_connection_error.html
   test_presentation_terminate_establish_connection_error.js
   file_presentation_reconnect.html
+  file_presentation_unknown_content_type.test
+  file_presentation_unknown_content_type.test^headers^
+  test_presentation_tcp_receiver_establish_connection_unknown_content_type.js
 
 [test_presentation_dc_sender.html]
 [test_presentation_dc_receiver.html]
 skip-if = (e10s || toolkit == 'gonk' || toolkit == 'android') # Bug 1129785
 [test_presentation_dc_receiver_oop.html]
 skip-if = (e10s || toolkit == 'gonk' || toolkit == 'android') # Bug 1129785
 [test_presentation_1ua_sender_and_receiver.html]
 skip-if = (e10s || toolkit == 'gonk' || toolkit == 'android') # Bug 1129785
@@ -38,16 +41,20 @@ skip-if = (e10s || toolkit == 'gonk' || 
 [test_presentation_tcp_sender_disconnect.html]
 skip-if = toolkit == 'android' # Bug 1129785
 [test_presentation_tcp_sender_establish_connection_error.html]
 skip-if = toolkit == 'android' # Bug 1129785
 [test_presentation_tcp_receiver_establish_connection_error.html]
 skip-if = (e10s || toolkit == 'gonk' || toolkit == 'android' || os == 'mac' || os == 'win' || buildapp == 'mulet') # Bug 1129785, Bug 1204709
 [test_presentation_tcp_receiver_establish_connection_timeout.html]
 skip-if = (e10s || toolkit == 'gonk' || toolkit == 'android') # Bug 1129785
+[test_presentation_tcp_receiver_establish_connection_unknown_content_type_inproc.html]
+skip-if = (e10s || toolkit == 'gonk' || toolkit == 'android')
+[test_presentation_tcp_receiver_establish_connection_unknown_content_type_oop.html]
+skip-if = (e10s || toolkit == 'gonk' || toolkit == 'android')
 [test_presentation_tcp_receiver.html]
 skip-if = (e10s || toolkit == 'gonk' || toolkit == 'android') # Bug 1129785
 [test_presentation_tcp_receiver_oop.html]
 skip-if = (e10s || toolkit == 'gonk' || toolkit == 'android') # Bug 1129785
 [test_presentation_receiver_auxiliary_navigation_inproc.html]
 skip-if = (e10s || toolkit == 'gonk')
 [test_presentation_receiver_auxiliary_navigation_oop.html]
 skip-if = (e10s || toolkit == 'gonk')
new file mode 100644
--- /dev/null
+++ b/dom/presentation/tests/mochitest/test_presentation_tcp_receiver_establish_connection_unknown_content_type.js
@@ -0,0 +1,86 @@
+'use strict';
+
+var gScript = SpecialPowers.loadChromeScript(SimpleTest.getTestFileURL('PresentationSessionChromeScript.js'));
+var receiverUrl = SimpleTest.getTestFileURL('file_presentation_unknown_content_type.test');
+
+var obs = SpecialPowers.Cc['@mozilla.org/observer-service;1']
+          .getService(SpecialPowers.Ci.nsIObserverService);
+
+var receiverIframe;
+
+function setup() {
+  return new Promise(function(aResolve, aReject) {
+    gScript.sendAsyncMessage('trigger-device-add');
+
+    receiverIframe = document.createElement('iframe');
+    receiverIframe.setAttribute('mozbrowser', 'true');
+    receiverIframe.setAttribute('mozpresentation', receiverUrl);
+    receiverIframe.setAttribute('src', receiverUrl);
+    var oop = location.pathname.indexOf('_inproc') == -1;
+    receiverIframe.setAttribute("remote", oop);
+
+    var promise = new Promise(function(aResolve, aReject) {
+      document.body.appendChild(receiverIframe);
+
+      aResolve(receiverIframe);
+    });
+    obs.notifyObservers(promise, 'setup-request-promise', null);
+
+    aResolve();
+  });
+}
+
+function testIncomingSessionRequestReceiverLaunchUnknownContentType() {
+  let promise = Promise.all([
+    new Promise(function(aResolve, aReject) {
+      gScript.addMessageListener('receiver-launching', function launchReceiverHandler(aSessionId) {
+        gScript.removeMessageListener('receiver-launching', launchReceiverHandler);
+        info('Trying to launch receiver page.');
+
+        receiverIframe.addEventListener('mozbrowserclose', function() {
+          ok(true, 'observe receiver window closed');
+          aResolve();
+        });
+      });
+    }),
+    new Promise(function(aResolve, aReject) {
+      gScript.addMessageListener('control-channel-closed', function controlChannelClosedHandler(aReason) {
+        gScript.removeMessageListener('control-channel-closed', controlChannelClosedHandler);
+        is(aReason, 0x80530020 /* NS_ERROR_DOM_OPERATION_ERR */, 'The control channel is closed due to load failure.');
+        aResolve();
+      });
+    })
+  ]);
+
+  gScript.sendAsyncMessage('trigger-incoming-session-request', receiverUrl);
+  return promise;
+}
+
+function teardown() {
+  gScript.addMessageListener('teardown-complete', function teardownCompleteHandler() {
+    gScript.removeMessageListener('teardown-complete', teardownCompleteHandler);
+    gScript.destroy();
+    SimpleTest.finish();
+  });
+
+  gScript.sendAsyncMessage('teardown');
+}
+
+function runTests() {
+  setup().
+  then(testIncomingSessionRequestReceiverLaunchUnknownContentType).
+  then(teardown);
+}
+
+SimpleTest.waitForExplicitFinish();
+SpecialPowers.pushPermissions([
+  {type: 'presentation-device-manage', allow: false, context: document},
+  {type: 'presentation', allow: true, context: document},
+  {type: 'browser', allow: true, context: document},
+], function() {
+  SpecialPowers.pushPrefEnv({ 'set': [['dom.presentation.enabled', true],
+                                      ['dom.presentation.session_transport.data_channel.enable', false],
+                                      ['dom.mozBrowserFramesEnabled', true],
+                                      ['dom.ipc.tabs.disabled', false]]},
+                            runTests);
+});
new file mode 100644
--- /dev/null
+++ b/dom/presentation/tests/mochitest/test_presentation_tcp_receiver_establish_connection_unknown_content_type_inproc.html
@@ -0,0 +1,16 @@
+<!DOCTYPE HTML>
+<html>
+<!-- Any copyright is dedicated to the Public Domain.
+   - http://creativecommons.org/publicdomain/zero/1.0/ -->
+<head>
+  <meta charset="utf-8">
+  <title>Test for unknown content type of B2G Presentation API at receiver side</title>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+</head>
+<body>
+<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1287717">Test for unknown content type of B2G Presentation API at receiver side</a>
+    <script type="application/javascript;version=1.8" src="test_presentation_tcp_receiver_establish_connection_unknown_content_type.js">
+    </script>
+</body>
+</html>
new file mode 100644
--- /dev/null
+++ b/dom/presentation/tests/mochitest/test_presentation_tcp_receiver_establish_connection_unknown_content_type_oop.html
@@ -0,0 +1,16 @@
+<!DOCTYPE HTML>
+<html>
+<!-- Any copyright is dedicated to the Public Domain.
+   - http://creativecommons.org/publicdomain/zero/1.0/ -->
+<head>
+  <meta charset="utf-8">
+  <title>Test for unknown content type of B2G Presentation API at receiver side (OOP)</title>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+</head>
+<body>
+<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1287717">Test for unknown content type of B2G Presentation API at receiver side (OOP)</a>
+    <script type="application/javascript;version=1.8" src="test_presentation_tcp_receiver_establish_connection_unknown_content_type.js">
+    </script>
+</body>
+</html>
--- a/dom/presentation/tests/mochitest/test_presentation_tcp_sender_establish_connection_error.html
+++ b/dom/presentation/tests/mochitest/test_presentation_tcp_sender_establish_connection_error.html
@@ -100,19 +100,19 @@ function testStartConnectionUnexpectedCo
       gScript.removeMessageListener('offer-sent', offerSentHandler);
       ok(aIsValid, "A valid offer is sent out.");
       gScript.sendAsyncMessage('trigger-control-channel-close', SpecialPowers.Cr.NS_ERROR_FAILURE);
     });
 
     request.start().then(
       function(aConnection) {
         is(aConnection.state, "connecting", "The initial state should be connecting.");
-        aConnection.onterminate = function() {
-          aConnection.onterminate = null;
-          is(aConnection.state, "terminated", "Connection should be terminated.");
+        aConnection.onclose = function() {
+          aConnection.onclose = null;
+          is(aConnection.state, "closed", "Connection should be closed.");
           aResolve();
         };
       },
       function(aError) {
         ok(false, "Error occurred when establishing a connection: " + aError);
         teardown();
         aReject();
       }
@@ -148,19 +148,19 @@ function testStartConnectionUnexpectedCo
       gScript.removeMessageListener('offer-sent', offerSentHandler);
       ok(aIsValid, "A valid offer is sent out.");
       gScript.sendAsyncMessage('trigger-control-channel-close', SpecialPowers.Cr.NS_OK);
     });
 
     request.start().then(
       function(aConnection) {
         is(aConnection.state, "connecting", "The initial state should be connecting.");
-        aConnection.onterminate = function() {
-          aConnection.onterminate = null;
-          is(aConnection.state, "terminated", "Connection should be terminated.");
+        aConnection.onclose = function() {
+          aConnection.onclose = null;
+          is(aConnection.state, "closed", "Connection should be closed.");
           aResolve();
         };
       },
       function(aError) {
         ok(false, "Error occurred when establishing a connection: " + aError);
         teardown();
         aReject();
       }
@@ -207,19 +207,19 @@ function testStartConnectionUnexpectedCo
     gScript.addMessageListener('data-transport-closed', function dataTransportClosedHandler(aReason) {
       gScript.removeMessageListener('data-transport-closed', dataTransportClosedHandler);
       info("The data transport is closed. " + aReason);
     });
 
     request.start().then(
       function(aConnection) {
         is(aConnection.state, "connecting", "The initial state should be connecting.");
-        aConnection.onterminate = function() {
-          aConnection.onterminate = null;
-          is(aConnection.state, "terminated", "Connection should be terminated.");
+        aConnection.onclose = function() {
+          aConnection.onclose = null;
+          is(aConnection.state, "closed", "Connection should be closed.");
           aResolve();
         };
       },
       function(aError) {
         ok(false, "Error occurred when establishing a connection: " + aError);
         teardown();
         aReject();
       }
@@ -266,19 +266,19 @@ function testStartConnectionUnexpectedCo
     gScript.addMessageListener('data-transport-closed', function dataTransportClosedHandler(aReason) {
       gScript.removeMessageListener('data-transport-closed', dataTransportClosedHandler);
       info("The data transport is closed. " + aReason);
     });
 
     request.start().then(
       function(aConnection) {
         is(aConnection.state, "connecting", "The initial state should be connecting.");
-        aConnection.onterminate = function() {
-          aConnection.onterminate = null;
-          is(aConnection.state, "terminated", "Connection should be terminated.");
+        aConnection.onclose = function() {
+          aConnection.onclose = null;
+          is(aConnection.state, "closed", "Connection should be closed.");
           aResolve();
         };
       },
       function(aError) {
         ok(false, "Error occurred when establishing a connection: " + aError);
         teardown();
         aReject();
       }
@@ -325,19 +325,19 @@ function testStartConnectionUnexpectedDa
     gScript.addMessageListener('data-transport-closed', function dataTransportClosedHandler(aReason) {
       gScript.removeMessageListener('data-transport-closed', dataTransportClosedHandler);
       info("The data transport is closed. " + aReason);
     });
 
     request.start().then(
       function(aConnection) {
         is(aConnection.state, "connecting", "The initial state should be connecting.");
-        aConnection.onterminate = function() {
-          aConnection.onterminate = null;
-          is(aConnection.state, "terminated", "Connection should be terminated.");
+        aConnection.onclose = function() {
+          aConnection.onclose = null;
+          is(aConnection.state, "closed", "Connection should be closed.");
           aResolve();
         };
       },
       function(aError) {
         ok(false, "Error occurred when establishing a connection: " + aError);
         teardown();
         aReject();
       }
--- a/dom/tests/mochitest/pointerlock/file_childIframe.html
+++ b/dom/tests/mochitest/pointerlock/file_childIframe.html
@@ -32,17 +32,17 @@ https://bugzilla.mozilla.org/show_bug.cg
   <a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=633602">
     Mozilla Bug 633602
   </a>
 
   <div id="parent">
     <table id="childTable">
       <tr>
         <td>
-          <iframe id="iframe" src="iframe_differentDOM.html" onload="start();">
+          <iframe id="iframe" src="iframe_differentDOM.html">
           </iframe>
         </td>
         <td>
           <div id="childDiv">
           </div>
         </td>
       </tr>
     </table>
--- a/dom/tests/mochitest/pointerlock/mochitest.ini
+++ b/dom/tests/mochitest/pointerlock/mochitest.ini
@@ -18,9 +18,9 @@ support-files =
   file_suppressSomeMouseEvents.html
   file_locksvgelement.html
   file_allowPointerLockSandboxFlag.html
   file_changeLockElement.html
   iframe_differentDOM.html
 
 [test_pointerlock-api.html]
 tags = fullscreen
-skip-if = buildapp == 'b2g' || toolkit == 'android' || os == 'win' # B2G - window.open focus issues using fullscreen. Win: Bug 931445
+skip-if = buildapp == 'b2g' || toolkit == 'android' # B2G - window.open focus issues using fullscreen.
--- a/gfx/2d/Factory.cpp
+++ b/gfx/2d/Factory.cpp
@@ -456,16 +456,17 @@ bool
 Factory::DoesBackendSupportDataDrawtarget(BackendType aType)
 {
   switch (aType) {
   case BackendType::DIRECT2D:
   case BackendType::DIRECT2D1_1:
   case BackendType::RECORDING:
   case BackendType::NONE:
   case BackendType::COREGRAPHICS_ACCELERATED:
+  case BackendType::BACKEND_LAST:
     return false;
   case BackendType::CAIRO:
   case BackendType::COREGRAPHICS:
   case BackendType::SKIA:
     return true;
   }
 
   return false;
--- a/gfx/2d/Types.h
+++ b/gfx/2d/Types.h
@@ -132,17 +132,20 @@ enum class DrawTargetType : int8_t {
 enum class BackendType : int8_t {
   NONE = 0,
   DIRECT2D, // Used for version independent D2D objects.
   COREGRAPHICS,
   COREGRAPHICS_ACCELERATED,
   CAIRO,
   SKIA,
   RECORDING,
-  DIRECT2D1_1
+  DIRECT2D1_1,
+
+  // Add new entries above this line.
+  BACKEND_LAST
 };
 
 enum class FontType : int8_t {
   DWRITE,
   GDI,
   MAC,
   SKIA,
   CAIRO,
new file mode 100644
--- /dev/null
+++ b/gfx/config/gfxVarReceiver.h
@@ -0,0 +1,25 @@
+/* -*- Mode: C++; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* vim: set sts=2 ts=8 sw=2 tw=99 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#ifndef mozilla_gfx_config_gfxVarReceiver_h
+#define mozilla_gfx_config_gfxVarReceiver_h
+
+namespace mozilla {
+namespace gfx {
+
+class GfxVarUpdate;
+
+// This allows downstream processes (such as PContent, PGPU) to listen for
+// updates on gfxVarReceiver.
+class gfxVarReceiver
+{
+public:
+  virtual void OnVarChanged(const GfxVarUpdate& aVar) = 0;
+};
+
+} // namespace gfx
+} // namespace mozilla
+
+#endif // mozilla_gfx_config_gfxVarReceiver_h
new file mode 100644
--- /dev/null
+++ b/gfx/config/gfxVars.cpp
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* vim: set sts=2 ts=8 sw=2 tw=99 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gfxVars.h"
+#include "gfxVarReceiver.h"
+#include "mozilla/dom/ContentChild.h"
+
+namespace mozilla {
+namespace gfx {
+
+StaticAutoPtr<gfxVars> gfxVars::sInstance;
+StaticAutoPtr<nsTArray<gfxVars::VarBase*>> gfxVars::sVarList;
+
+void
+gfxVars::Initialize()
+{
+  if (sInstance) {
+    return;
+  }
+
+  // sVarList must be initialized first since it's used in the constructor for
+  // sInstance.
+  sVarList = new nsTArray<gfxVars::VarBase*>();
+  sInstance = new gfxVars;
+
+  // Like Preferences, we want content to synchronously get initial data on
+  // init. Note the GPU process is not handled here - it cannot send sync
+  // messages, so instead the initial data is pushed down.
+  if (XRE_IsContentProcess()) {
+    InfallibleTArray<GfxVarUpdate> vars;
+    dom::ContentChild::GetSingleton()->SendGetGfxVars(&vars);
+    for (const auto& var : vars) {
+      ApplyUpdate(var);
+    }
+  }
+}
+
+gfxVars::gfxVars()
+{
+}
+
+void
+gfxVars::Shutdown()
+{
+  sInstance = nullptr;
+  sVarList = nullptr;
+}
+
+/* static */ void
+gfxVars::ApplyUpdate(const GfxVarUpdate& aUpdate)
+{
+  // Only subprocesses receive updates and apply them locally.
+  MOZ_ASSERT(!XRE_IsParentProcess());
+  sVarList->ElementAt(aUpdate.index())->SetValue(aUpdate.value());
+}
+
+/* static */ void
+gfxVars::AddReceiver(gfxVarReceiver* aReceiver)
+{
+  MOZ_ASSERT(NS_IsMainThread());
+
+  // Don't double-add receivers, in case a broken content process sends two
+  // init messages.
+  if (!sInstance->mReceivers.Contains(aReceiver)) {
+    sInstance->mReceivers.AppendElement(aReceiver);
+  }
+}
+
+/* static */ void
+gfxVars::RemoveReceiver(gfxVarReceiver* aReceiver)
+{
+  MOZ_ASSERT(NS_IsMainThread());
+
+  if (sInstance) {
+    sInstance->mReceivers.RemoveElement(aReceiver);
+  }
+}
+
+/* static */ nsTArray<GfxVarUpdate>
+gfxVars::FetchNonDefaultVars()
+{
+  MOZ_ASSERT(NS_IsMainThread());
+  MOZ_ASSERT(sVarList);
+
+  nsTArray<GfxVarUpdate> updates;
+  for (size_t i = 0; i < sVarList->Length(); i++) {
+    VarBase* var = sVarList->ElementAt(i);
+    if (var->HasDefaultValue()) {
+      continue;
+    }
+
+    GfxVarValue value;
+    var->GetValue(&value);
+
+    updates.AppendElement(GfxVarUpdate(i, value));
+  }
+
+  return updates;
+}
+
+gfxVars::VarBase::VarBase()
+{
+  mIndex = gfxVars::sVarList->Length();
+  gfxVars::sVarList->AppendElement(this);
+}
+
+void
+gfxVars::NotifyReceivers(VarBase* aVar)
+{
+  MOZ_ASSERT(NS_IsMainThread());
+
+  GfxVarValue value;
+  aVar->GetValue(&value);
+
+  GfxVarUpdate update(aVar->Index(), value);
+  for (auto& receiver : mReceivers) {
+    receiver->OnVarChanged(update);
+  }
+}
+
+} // namespace gfx
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/gfx/config/gfxVars.h
@@ -0,0 +1,143 @@
+/* -*- Mode: C++; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* vim: set sts=2 ts=8 sw=2 tw=99 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#ifndef mozilla_gfx_config_gfxVars_h
+#define mozilla_gfx_config_gfxVars_h
+
+#include <stdint.h>
+#include "mozilla/Assertions.h"
+#include "mozilla/StaticPtr.h"
+#include "mozilla/gfx/GraphicsMessages.h"
+#include "mozilla/gfx/Point.h"
+#include "mozilla/gfx/Types.h"
+#include "nsTArray.h"
+#include "nsXULAppAPI.h"
+
+namespace mozilla {
+namespace gfx {
+
+class gfxVarReceiver;
+
+// Generator for graphics vars.
+#define GFX_VARS_LIST(_)                                                \
+  /* C++ Name,                  Data Type,        Default Value */      \
+  _(BrowserTabsRemoteAutostart, bool,             false)                \
+  _(ContentBackend,             BackendType,      BackendType::NONE)    \
+  _(TileSize,                   IntSize,          IntSize(-1, -1))      \
+  _(UseXRender,                 bool,             false)                \
+  /* Add new entries above this line. */
+
+// Some graphics settings are computed on the UI process and must be
+// communicated to content and GPU processes. gfxVars helps facilitate
+// this. Its function is similar to gfxPrefs, except rather than hold
+// user preferences, it holds dynamically computed values.
+//
+// Each variable in GFX_VARS_LIST exposes the following static methods:
+//
+//    const DataType& CxxName();
+//    void SetCxxName(const DataType& aValue);
+//
+// Note that the setter may only be called in the UI process; a gfxVar must be
+// a variable that is determined in the UI process and pushed to child
+// processes.
+class gfxVars final
+{
+public:
+  static void Initialize();
+  static void Shutdown();
+
+  static void ApplyUpdate(const GfxVarUpdate& aUpdate);
+  static void AddReceiver(gfxVarReceiver* aReceiver);
+  static void RemoveReceiver(gfxVarReceiver* aReceiver);
+
+  // Return a list of updates for all variables with non-default values.
+  static nsTArray<GfxVarUpdate> FetchNonDefaultVars();
+
+public:
+  // Each variable must expose Set and Get methods for IPDL.
+  class VarBase
+  {
+  public:
+    VarBase();
+    virtual void SetValue(const GfxVarValue& aValue) = 0;
+    virtual void GetValue(GfxVarValue* aOutValue) = 0;
+    virtual bool HasDefaultValue() const = 0;
+    size_t Index() const {
+      return mIndex;
+    }
+  private:
+    size_t mIndex;
+  };
+
+private:
+  static StaticAutoPtr<gfxVars> sInstance;
+  static StaticAutoPtr<nsTArray<VarBase*>> sVarList;
+
+  template <typename T, T Default()>
+  class VarImpl final : public VarBase
+  {
+  public:
+    VarImpl()
+     : mValue(Default())
+    {}
+    void SetValue(const GfxVarValue& aValue) override {
+      aValue.get(&mValue);
+    }
+    void GetValue(GfxVarValue* aOutValue) override {
+      *aOutValue = GfxVarValue(mValue);
+    }
+    bool HasDefaultValue() const override {
+      return mValue == Default();
+    }
+    const T& Get() const {
+      return mValue;
+    }
+    // Return true if the value changed, false otherwise.
+    bool Set(const T& aValue) {
+      MOZ_ASSERT(XRE_IsParentProcess());
+      if (mValue == aValue) {
+        return false;
+      }
+      mValue = aValue;
+      return true;
+    }
+  private:
+    T mValue;
+  };
+
+#define GFX_VAR_DECL(CxxName, DataType, DefaultValue)           \
+private:                                                        \
+  static DataType Get##CxxName##Default() {                     \
+    return DefaultValue;                                        \
+  }                                                             \
+  VarImpl<DataType, Get##CxxName##Default> mVar##CxxName;       \
+public:                                                         \
+  static const DataType& CxxName() {                            \
+    return sInstance->mVar##CxxName.Get();                      \
+  }                                                             \
+  static void Set##CxxName(const DataType& aValue) {            \
+    if (sInstance->mVar##CxxName.Set(aValue)) {                 \
+      sInstance->NotifyReceivers(&sInstance->mVar##CxxName);    \
+    }                                                           \
+  }
+
+  GFX_VARS_LIST(GFX_VAR_DECL)
+#undef GFX_VAR_DECL
+
+private:
+  gfxVars();
+
+  void NotifyReceivers(VarBase* aVar);
+
+private:
+  nsTArray<gfxVarReceiver*> mReceivers;
+};
+
+#undef GFX_VARS_LIST
+
+} // namespace gfx
+} // namespace mozilla
+
+#endif // mozilla_gfx_config_gfxVars_h
--- a/gfx/config/moz.build
+++ b/gfx/config/moz.build
@@ -5,14 +5,22 @@
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 EXPORTS += [
   'gfxConfig.h',
   'gfxFallback.h',
   'gfxFeature.h',
 ]
 
+EXPORTS.mozilla.gfx += [
+  'gfxVarReceiver.h',
+  'gfxVars.h',
+]
+
 UNIFIED_SOURCES += [
   'gfxConfig.cpp',
   'gfxFeature.cpp',
+  'gfxVars.cpp',
 ]
 
+include('/ipc/chromium/chromium-config.mozbuild')
+
 FINAL_LIBRARY = 'xul'
deleted file mode 100644
--- a/gfx/gl/AndroidNativeWindow.cpp
+++ /dev/null
@@ -1,281 +0,0 @@
-#ifdef MOZ_WIDGET_ANDROID
-
-#include "AndroidNativeWindow.h"
-#include "prlink.h"
-
-// #define ANDROID_NATIVE_WINDOW_DEBUG
-
-#if defined(ANDROID_NATIVE_WINDOW_DEBUG) || defined(DEBUG)
-#define ALOG(args...)  __android_log_print(ANDROID_LOG_INFO, "AndroidNativeWindow" , ## args)
-#else
-#define ALOG(args...) ((void)0)
-#endif
-
-using namespace mozilla::gfx;
-using namespace mozilla::gl;
-using namespace mozilla;
-
-class NativeWindowLibrary
-{
-public:
-
-  NativeWindowLibrary()
-    : fANativeWindow_fromSurface(nullptr)
-    , fANativeWindow_release(nullptr)
-    , fANativeWindow_setBuffersGeometry(nullptr)
-    , fANativeWindow_lock(nullptr)
-    , fANativeWindow_unlockAndPost(nullptr)
-    , fANativeWindow_getFormat(nullptr)
-    , fANativeWindow_getWidth(nullptr)
-    , fANativeWindow_getHeight(nullptr)
-  {
-    PRLibrary* lib = PR_LoadLibrary("libandroid.so");
-
-    fANativeWindow_fromSurface = (pfnANativeWindow_fromSurface)PR_FindSymbol(lib, "ANativeWindow_fromSurface");
-    fANativeWindow_release = (pfnANativeWindow_release)PR_FindSymbol(lib, "ANativeWindow_release");
-    fANativeWindow_setBuffersGeometry = (pfnANativeWindow_setBuffersGeometry)PR_FindSymbol(lib, "ANativeWindow_setBuffersGeometry");
-    fANativeWindow_lock = (pfnANativeWindow_lock)PR_FindSymbol(lib, "ANativeWindow_lock");
-    fANativeWindow_unlockAndPost = (pfnANativeWindow_unlockAndPost)PR_FindSymbol(lib, "ANativeWindow_unlockAndPost");
-    fANativeWindow_getFormat = (pfnANativeWindow_getFormat)PR_FindSymbol(lib, "ANativeWindow_getFormat");
-    fANativeWindow_getWidth = (pfnANativeWindow_getWidth)PR_FindSymbol(lib, "ANativeWindow_getWidth");
-    fANativeWindow_getHeight = (pfnANativeWindow_getHeight)PR_FindSymbol(lib, "ANativeWindow_getHeight");
-  }
-
-  void* ANativeWindow_fromSurface(JNIEnv* aEnv, jobject aSurface) {
-    ALOG("%s: env=%p, surface=%p\n", __PRETTY_FUNCTION__, aEnv, aSurface);
-    if (!Initialized()) {
-      return nullptr;
-    }
-
-    return fANativeWindow_fromSurface(aEnv, aSurface);
-  }
-
-  void ANativeWindow_release(void* aWindow) {
-    ALOG("%s: window=%p\n", __PRETTY_FUNCTION__, aWindow);
-    if (!Initialized()) {
-      return;
-    }
-
-    fANativeWindow_release(aWindow);
-  }
-
-  bool ANativeWindow_setBuffersGeometry(void* aWindow, int32_t aWidth, int32_t aHeight, int32_t aFormat) {
-    ALOG("%s: window=%p, width=%d, height=%d, format=%d\n", __PRETTY_FUNCTION__, aWindow, aWidth, aHeight, aFormat);
-    if (!Initialized()) {
-      return false;
-    }
-
-    return fANativeWindow_setBuffersGeometry(aWindow, aWidth, aHeight, (int32_t)aFormat) == 0;
-  }
-
-  bool ANativeWindow_lock(void* aWindow, void* out_buffer, void*in_out_dirtyBounds) {
-    ALOG("%s: window=%p, out_buffer=%p, in_out_dirtyBounds=%p\n", __PRETTY_FUNCTION__,
-         aWindow, out_buffer, in_out_dirtyBounds);
-    if (!Initialized()) {
-      return false;
-    }
-
-    return fANativeWindow_lock(aWindow, out_buffer, in_out_dirtyBounds) == 0;
-  }
-
-  bool ANativeWindow_unlockAndPost(void* aWindow) {
-    ALOG("%s: window=%p\n", __PRETTY_FUNCTION__, aWindow);
-    if (!Initialized()) {
-      return false;
-    }
-
-    return fANativeWindow_unlockAndPost(aWindow) == 0;
-  }
-
-  AndroidWindowFormat ANativeWindow_getFormat(void* aWindow) {
-    ALOG("%s: window=%p\n", __PRETTY_FUNCTION__, aWindow);
-    if (!Initialized()) {
-      return AndroidWindowFormat::Unknown;
-    }
-
-    return (AndroidWindowFormat)fANativeWindow_getFormat(aWindow);
-  }
-
-  int32_t ANativeWindow_getWidth(void* aWindow) {
-    ALOG("%s: window=%p\n", __PRETTY_FUNCTION__, aWindow);
-    if (!Initialized()) {
-      return -1;
-    }
-
-    return fANativeWindow_getWidth(aWindow);
-  }
-
-  int32_t ANativeWindow_getHeight(void* aWindow) {
-    ALOG("%s: window=%p\n", __PRETTY_FUNCTION__, aWindow);
-    if (!Initialized()) {
-      return -1;
-    }
-
-    return fANativeWindow_getHeight(aWindow);
-  }
-
-  bool Initialized() {
-    return fANativeWindow_fromSurface && fANativeWindow_release && fANativeWindow_setBuffersGeometry
-      && fANativeWindow_lock && fANativeWindow_unlockAndPost && fANativeWindow_getFormat && fANativeWindow_getWidth
-      && fANativeWindow_getHeight;
-  }
-
-private:
-
-  typedef void* (*pfnANativeWindow_fromSurface)(JNIEnv* env, jobject surface);
-  pfnANativeWindow_fromSurface fANativeWindow_fromSurface;
-
-  typedef void (*pfnANativeWindow_release)(void* window);
-  pfnANativeWindow_release fANativeWindow_release;
-
-  typedef int32_t (*pfnANativeWindow_setBuffersGeometry)(void* window, int32_t width, int32_t height, int32_t format);
-  pfnANativeWindow_setBuffersGeometry fANativeWindow_setBuffersGeometry;
-
-  typedef int32_t (*pfnANativeWindow_lock)(void* window, void* out_buffer, void* in_out_dirtyBounds);
-  pfnANativeWindow_lock fANativeWindow_lock;
-
-  typedef int32_t (*pfnANativeWindow_unlockAndPost)(void* window);
-  pfnANativeWindow_unlockAndPost fANativeWindow_unlockAndPost;
-
-  typedef AndroidWindowFormat (*pfnANativeWindow_getFormat)(void* window);
-  pfnANativeWindow_getFormat fANativeWindow_getFormat;
-
-  typedef int32_t (*pfnANativeWindow_getWidth)(void* window);
-  pfnANativeWindow_getWidth fANativeWindow_getWidth;
-
-  typedef int32_t (*pfnANativeWindow_getHeight)(void* window);
-  pfnANativeWindow_getHeight fANativeWindow_getHeight;
-};
-
-static NativeWindowLibrary* sLibrary = nullptr;
-
-static bool
-EnsureInit()
-{
-  static bool initialized = false;
-  if (!initialized) {
-    if (!sLibrary) {
-      sLibrary = new NativeWindowLibrary();
-    }
-    initialized = sLibrary->Initialized();
-  }
-
-  return initialized;
-}
-
-
-namespace mozilla {
-
-/* static */ AndroidNativeWindow*
-AndroidNativeWindow::CreateFromSurface(JNIEnv* aEnv, jobject aSurface)
-{
-  if (!EnsureInit()) {
-    ALOG("Not initialized");
-    return nullptr;
-  }
-
-  void* window = sLibrary->ANativeWindow_fromSurface(aEnv, aSurface);
-  if (!window) {
-    ALOG("Failed to create window from surface");
-    return nullptr;
-  }
-
-  return new AndroidNativeWindow(window);
-}
-
-AndroidNativeWindow::~AndroidNativeWindow()
-{
-  if (EnsureInit() && mWindow) {
-    sLibrary->ANativeWindow_release(mWindow);
-    mWindow = nullptr;
-  }
-}
-
-IntSize
-AndroidNativeWindow::Size()
-{
-  MOZ_ASSERT(mWindow);
-  if (!EnsureInit()) {
-    return IntSize(0, 0);
-  }
-
-  return IntSize(sLibrary->ANativeWindow_getWidth(mWindow), sLibrary->ANativeWindow_getHeight(mWindow));
-}
-
-AndroidWindowFormat
-AndroidNativeWindow::Format()
-{
-  MOZ_ASSERT(mWindow);
-  if (!EnsureInit()) {
-    return AndroidWindowFormat::Unknown;
-  }
-
-  return sLibrary->ANativeWindow_getFormat(mWindow);
-}
-
-bool
-AndroidNativeWindow::SetBuffersGeometry(int32_t aWidth, int32_t aHeight, AndroidWindowFormat aFormat)
-{
-  MOZ_ASSERT(mWindow);
-  if (!EnsureInit())
-    return false;
-
-  return sLibrary->ANativeWindow_setBuffersGeometry(mWindow, aWidth, aHeight, (int32_t)aFormat);
-}
-
-bool
-AndroidNativeWindow::Lock(void** out_bits,int32_t* out_width, int32_t* out_height,
-                          int32_t* out_stride, AndroidWindowFormat* out_format)
-{
-  /* Copied from native_window.h in Android NDK (platform-9) */
-  typedef struct ANativeWindow_Buffer {
-      // The number of pixels that are show horizontally.
-      int32_t width;
-
-      // The number of pixels that are shown vertically.
-      int32_t height;
-
-      // The number of *pixels* that a line in the buffer takes in
-      // memory.  This may be >= width.
-      int32_t stride;
-
-      // The format of the buffer.  One of WINDOW_FORMAT_*
-      int32_t format;
-
-      // The actual bits.
-      void* bits;
-
-      // Do not touch.
-      uint32_t reserved[6];
-  } ANativeWindow_Buffer;
-
-
-  ANativeWindow_Buffer buffer;
-
-  if (!sLibrary->ANativeWindow_lock(mWindow, &buffer, nullptr)) {
-    ALOG("Failed to lock");
-    return false;
-  }
-
-  *out_bits = buffer.bits;
-  *out_width = buffer.width;
-  *out_height = buffer.height;
-  *out_stride = buffer.stride;
-  *out_format = (AndroidWindowFormat)buffer.format;
-  return true;
-}
-
-bool
-AndroidNativeWindow::UnlockAndPost()
-{
-  if (!EnsureInit()) {
-    ALOG("Not initialized");
-    return false;
-  }
-
-  return sLibrary->ANativeWindow_unlockAndPost(mWindow);
-}
-
-}
-
-#endif // MOZ_WIDGET_ANDROID
deleted file mode 100644
--- a/gfx/gl/AndroidNativeWindow.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-// vim:set ts=2 sts=2 sw=2 et cin:
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef AndroidNativeWindow_h__
-#define AndroidNativeWindow_h__
-#ifdef MOZ_WIDGET_ANDROID
-
-#include <jni.h>
-#include "GLDefs.h"
-
-#include "nsISupports.h"
-#include "mozilla/gfx/2D.h"
-
-
-namespace mozilla {
-namespace gl {
-
-enum class AndroidWindowFormat {
-  Unknown = -1,
-  RGBA_8888 = 1,
-  RGBX_8888 = 1 << 1,
-  RGB_565 = 1 << 2
-};
-
-/**
- * This class is a wrapper around Android's SurfaceTexture class.
- * Usage is pretty much exactly like the Java class, so see
- * the Android documentation for details.
- */
-class AndroidNativeWindow {
-  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AndroidNativeWindow)
-
-public:
-
-  static AndroidNativeWindow* CreateFromSurface(JNIEnv* aEnv, jobject aSurface);
-
-  gfx::IntSize Size();
-  AndroidWindowFormat Format();
-
-  bool SetBuffersGeometry(int32_t aWidth, int32_t aHeight, AndroidWindowFormat aFormat);
-
-  bool Lock(void** out_bits, int32_t* out_width, int32_t* out_height, int32_t* out_stride, AndroidWindowFormat* out_format);
-  bool UnlockAndPost();
-
-  void* Handle() { return mWindow; }
-
-protected:
-  AndroidNativeWindow(void* aWindow)
-    : mWindow(aWindow)
-  {
-
-  }
-
-  virtual ~AndroidNativeWindow();
-
-  void* mWindow;
-};
-
-}
-}
-
-
-#endif
-#endif
--- a/gfx/gl/AndroidSurfaceTexture.cpp
+++ b/gfx/gl/AndroidSurfaceTexture.cpp
@@ -2,16 +2,17 @@
 // vim:set ts=2 sts=2 sw=2 et cin:
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifdef MOZ_WIDGET_ANDROID
 
 #include <map>
+#include <android/native_window_jni.h>
 #include <android/log.h>
 #include "AndroidSurfaceTexture.h"
 #include "gfxImageSurface.h"
 #include "gfxPrefs.h"
 #include "AndroidBridge.h"
 #include "nsThreadUtils.h"
 #include "mozilla/gfx/Matrix.h"
 #include "GeneratedJNIWrappers.h"
@@ -194,18 +195,18 @@ AndroidSurfaceTexture::Init(GLContext* a
 
   mAttachedContext = aContext;
 
   if (NS_WARN_IF(NS_FAILED(
       Surface::New(mSurfaceTexture, ReturnTo(&mSurface))))) {
     return false;
   }
 
-  mNativeWindow = AndroidNativeWindow::CreateFromSurface(jni::GetEnvForThread(),
-                                                         mSurface.Get());
+  mNativeWindow = ANativeWindow_fromSurface(jni::GetEnvForThread(),
+                                            mSurface.Get());
   MOZ_ASSERT(mNativeWindow, "Failed to create native window from surface");
 
   mID = sInstances.Add(this);
 
   return true;
 }
 
 AndroidSurfaceTexture::AndroidSurfaceTexture()
@@ -222,16 +223,21 @@ AndroidSurfaceTexture::~AndroidSurfaceTe
   sInstances.Remove(mID);
 
   mFrameAvailableCallback = nullptr;
 
   if (mSurfaceTexture) {
     GeckoAppShell::UnregisterSurfaceTextureFrameListener(mSurfaceTexture);
     mSurfaceTexture = nullptr;
   }
+
+  if (mNativeWindow) {
+    ANativeWindow_release(mNativeWindow);
+    mNativeWindow = nullptr;
+  }
 }
 
 void
 AndroidSurfaceTexture::UpdateTexImage()
 {
   mSurfaceTexture->UpdateTexImage();
 }
 
--- a/gfx/gl/AndroidSurfaceTexture.h
+++ b/gfx/gl/AndroidSurfaceTexture.h
@@ -4,25 +4,25 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef AndroidSurfaceTexture_h__
 #define AndroidSurfaceTexture_h__
 #ifdef MOZ_WIDGET_ANDROID
 
 #include <jni.h>
+#include <android/native_window.h>
 #include "nsIRunnable.h"
 #include "gfxPlatform.h"
 #include "GLDefs.h"
 #include "mozilla/gfx/2D.h"
 #include "mozilla/gfx/MatrixFwd.h"
 #include "mozilla/Monitor.h"
 
 #include "SurfaceTexture.h"
-#include "AndroidNativeWindow.h"
 
 namespace mozilla {
 namespace gl {
 
 class GLContext;
 
 /**
  * This class is a wrapper around Android's SurfaceTexture class.
@@ -57,17 +57,17 @@ public:
   nsresult Detach();
 
   // Ability to detach is based on API version (16+), and we also block PowerVR
   // since it has some type of fencing problem. Bug 1100126.
   bool CanDetach() const;
 
   GLContext* AttachedContext() const { return mAttachedContext; }
 
-  AndroidNativeWindow* NativeWindow() const {
+  ANativeWindow* NativeWindow() const {
     return mNativeWindow;
   }
 
   // This attaches the updated data to the TEXTURE_EXTERNAL target
   void UpdateTexImage();
 
   void GetTransformMatrix(mozilla::gfx::Matrix4x4& aMatrix) const;
   int ID() const { return mID; }
@@ -92,17 +92,17 @@ private:
   bool Init(GLContext* aContext, GLuint aTexture);
 
   GLuint mTexture;
   java::sdk::SurfaceTexture::GlobalRef mSurfaceTexture;
   java::sdk::Surface::GlobalRef mSurface;
 
   GLContext* mAttachedContext;
 
-  RefPtr<AndroidNativeWindow> mNativeWindow;
+  ANativeWindow* mNativeWindow;
   int mID;
   nsCOMPtr<nsIRunnable> mFrameAvailableCallback;
 
   mutable Monitor mMonitor;
 };
 
 }
 }
--- a/gfx/gl/moz.build
+++ b/gfx/gl/moz.build
@@ -21,17 +21,16 @@ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'an
     gl_provider = 'EGL'
 elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gonk':
     gl_provider = 'EGL'
 
 if CONFIG['MOZ_GL_PROVIDER']:
     gl_provider = CONFIG['MOZ_GL_PROVIDER']
 
 EXPORTS += [
-    'AndroidNativeWindow.h',
     'AndroidSurfaceTexture.h',
     'DecomposeIntoNoRepeatTriangles.h',
     'EGLUtils.h',
     'ForceDiscreteGPUHelperCGL.h',
     'GfxTexturesReporter.h',
     'GLBlitHelper.h',
     'GLConsts.h',
     'GLContext.h',
@@ -124,17 +123,16 @@ elif gl_provider == 'GLX':
         'GLContextProviderGLX.cpp',
         'SharedSurfaceGLX.cpp'
     ]
     EXPORTS += [
         'SharedSurfaceGLX.h'
     ]
 
 UNIFIED_SOURCES += [
-    'AndroidNativeWindow.cpp',
     'AndroidSurfaceTexture.cpp',
     'DecomposeIntoNoRepeatTriangles.cpp',
     'EGLUtils.cpp',
     'GfxTexturesReporter.cpp',
     'GLBlitHelper.cpp',
     'GLContext.cpp',
     'GLContextFeatures.cpp',
     'GLContextProviderEGL.cpp',
--- a/gfx/ipc/GPUChild.cpp
+++ b/gfx/ipc/GPUChild.cpp
@@ -1,16 +1,17 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=99: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #include "GPUChild.h"
 #include "gfxPrefs.h"
 #include "GPUProcessHost.h"
+#include "mozilla/gfx/gfxVars.h"
 
 namespace mozilla {
 namespace gfx {
 
 GPUChild::GPUChild(GPUProcessHost* aHost)
  : mHost(aHost)
 {
   MOZ_COUNT_CTOR(GPUChild);
@@ -35,22 +36,32 @@ GPUChild::Init()
       continue;
     }
 
     GfxPrefValue value;
     pref->GetCachedValue(&value);
     prefs.AppendElement(GfxPrefSetting(pref->Index(), value));
   }
 
-  SendInit(prefs);
+  nsTArray<GfxVarUpdate> updates = gfxVars::FetchNonDefaultVars();
+  SendInit(prefs, updates);
+
+  gfxVars::AddReceiver(this);
+}
+
+void
+GPUChild::OnVarChanged(const GfxVarUpdate& aVar)
+{
+  SendUpdateVar(aVar);
 }
 
 void
 GPUChild::ActorDestroy(ActorDestroyReason aWhy)
 {
+  gfxVars::RemoveReceiver(this);
   mHost->OnChannelClosed();
 }
 
 class DeferredDeleteGPUChild : public Runnable
 {
 public:
   explicit DeferredDeleteGPUChild(UniquePtr<GPUChild>&& aChild)
     : mChild(Move(aChild))
--- a/gfx/ipc/GPUChild.h
+++ b/gfx/ipc/GPUChild.h
@@ -4,34 +4,41 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #ifndef _include_mozilla_gfx_ipc_GPUChild_h_
 #define _include_mozilla_gfx_ipc_GPUChild_h_
 
 #include "mozilla/RefPtr.h"
 #include "mozilla/UniquePtr.h"
 #include "mozilla/gfx/PGPUChild.h"
+#include "mozilla/gfx/gfxVarReceiver.h"
 
 namespace mozilla {
 namespace gfx {
 
 class GPUProcessHost;
 
-class GPUChild final : public PGPUChild
+class GPUChild final
+  : public PGPUChild,
+    public gfxVarReceiver
 {
 public:
   explicit GPUChild(GPUProcessHost* aHost);
   ~GPUChild();
 
   void Init();
 
-  static void Destroy(UniquePtr<GPUChild>&& aChild);
+  // gfxVarReceiver overrides.
+  void OnVarChanged(const GfxVarUpdate& aVar) override;
 
+  // PGPUChild overrides.
   void ActorDestroy(ActorDestroyReason aWhy) override;
 
+  static void Destroy(UniquePtr<GPUChild>&& aChild);
+
 private:
   GPUProcessHost* mHost;
 };
 
 } // namespace gfx
 } // namespace mozilla
 
 #endif // _include_mozilla_gfx_ipc_GPUChild_h_
--- a/gfx/ipc/GPUParent.cpp
+++ b/gfx/ipc/GPUParent.cpp
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #include "GPUParent.h"
 #include "gfxConfig.h"
 #include "gfxPlatform.h"
 #include "gfxPrefs.h"
 #include "GPUProcessHost.h"
 #include "mozilla/Assertions.h"
+#include "mozilla/gfx/gfxVars.h"
 #include "mozilla/ipc/ProcessChild.h"
 #include "mozilla/layers/CompositorBridgeParent.h"
 #include "mozilla/layers/CompositorThread.h"
 #include "mozilla/layers/ImageBridgeParent.h"
 #include "VRManager.h"
 #include "VRManagerParent.h"
 #include "VsyncBridgeParent.h"
 
@@ -37,30 +38,35 @@ GPUParent::Init(base::ProcessId aParentP
                 IPC::Channel* aChannel)
 {
   if (NS_WARN_IF(!Open(aChannel, aParentPid, aIOLoop))) {
     return false;
   }
 
   // Ensure gfxPrefs are initialized.
   gfxPrefs::GetSingleton();
+  gfxVars::Initialize();
   CompositorThreadHolder::Start();
   VRManager::ManagerInit();
   gfxPlatform::InitNullMetadata();
   return true;
 }
 
 bool
-GPUParent::RecvInit(nsTArray<GfxPrefSetting>&& prefs)
+GPUParent::RecvInit(nsTArray<GfxPrefSetting>&& prefs,
+                    nsTArray<GfxVarUpdate>&& vars)
 {
   const nsTArray<gfxPrefs::Pref*>& globalPrefs = gfxPrefs::all();
   for (auto& setting : prefs) {
     gfxPrefs::Pref* pref = globalPrefs[setting.index()];
     pref->SetCachedValue(setting.value());
   }
+  for (const auto& var : vars) {
+    gfxVars::ApplyUpdate(var);
+  }
   return true;
 }
 
 bool
 GPUParent::RecvInitVsyncBridge(Endpoint<PVsyncBridgeParent>&& aVsyncEndpoint)
 {
   VsyncBridgeParent::Start(Move(aVsyncEndpoint));
   return true;
@@ -83,16 +89,23 @@ GPUParent::RecvInitVRManager(Endpoint<PV
 bool
 GPUParent::RecvUpdatePref(const GfxPrefSetting& setting)
 {
   gfxPrefs::Pref* pref = gfxPrefs::all()[setting.index()];
   pref->SetCachedValue(setting.value());
   return true;
 }
 
+bool
+GPUParent::RecvUpdateVar(const GfxVarUpdate& aUpdate)
+{
+  gfxVars::ApplyUpdate(aUpdate);
+  return true;
+}
+
 static void
 OpenParent(RefPtr<CompositorBridgeParent> aParent,
            Endpoint<PCompositorBridgeParent>&& aEndpoint)
 {
   if (!aParent->Bind(Move(aEndpoint))) {
     MOZ_CRASH("Failed to bind compositor");
   }
 }
--- a/gfx/ipc/GPUParent.h
+++ b/gfx/ipc/GPUParent.h
@@ -19,21 +19,23 @@ class GPUParent final : public PGPUParen
 public:
   GPUParent();
   ~GPUParent();
 
   bool Init(base::ProcessId aParentPid,
             MessageLoop* aIOLoop,
             IPC::Channel* aChannel);
 
-  bool RecvInit(nsTArray<GfxPrefSetting>&& prefs) override;
+  bool RecvInit(nsTArray<GfxPrefSetting>&& prefs,
+                nsTArray<GfxVarUpdate>&& vars) override;
   bool RecvInitVsyncBridge(Endpoint<PVsyncBridgeParent>&& aVsyncEndpoint) override;
   bool RecvInitImageBridge(Endpoint<PImageBridgeParent>&& aEndpoint) override;
   bool RecvInitVRManager(Endpoint<PVRManagerParent>&& aEndpoint) override;
   bool RecvUpdatePref(const GfxPrefSetting& pref) override;
+  bool RecvUpdateVar(const GfxVarUpdate& pref) override;
   bool RecvNewWidgetCompositor(
     Endpoint<PCompositorBridgeParent>&& aEndpoint,
     const CSSToLayoutDeviceScale& aScale,
     const TimeDuration& aVsyncRate,
     const bool& aUseExternalSurface,
     const IntSize& aSurfaceSize) override;
   bool RecvNewContentCompositorBridge(Endpoint<PCompositorBridgeParent>&& aEndpoint) override;
   bool RecvNewContentImageBridge(Endpoint<PImageBridgeParent>&& aEndpoint) override;
--- a/gfx/ipc/GfxMessageUtils.h
+++ b/gfx/ipc/GfxMessageUtils.h
@@ -217,16 +217,24 @@ template <>
 struct ParamTraits<mozilla::layers::LayersBackend>
   : public ContiguousEnumSerializer<
              mozilla::layers::LayersBackend,
              mozilla::layers::LayersBackend::LAYERS_NONE,
              mozilla::layers::LayersBackend::LAYERS_LAST>
 {};
 
 template <>
+struct ParamTraits<mozilla::gfx::BackendType>
+  : public ContiguousEnumSerializer<
+             mozilla::gfx::BackendType,
+             mozilla::gfx::BackendType::NONE,
+             mozilla::gfx::BackendType::BACKEND_LAST>
+{};
+
+template <>
 struct ParamTraits<mozilla::layers::ScaleMode>
   : public ContiguousEnumSerializer<
              mozilla::layers::ScaleMode,
              mozilla::layers::ScaleMode::SCALE_NONE,
              mozilla::layers::ScaleMode::SENTINEL>
 {};
 
 template <>
--- a/gfx/ipc/GraphicsMessages.ipdlh
+++ b/gfx/ipc/GraphicsMessages.ipdlh
@@ -1,26 +1,41 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
  * vim: sw=2 ts=8 et :
  */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 using struct DxgiAdapterDesc from "mozilla/D3DMessageUtils.h";
-
+using mozilla::gfx::BackendType from "mozilla/gfx/Types.h";
+using mozilla::gfx::IntSize from "mozilla/gfx/Point.h";
+
 namespace mozilla {
 namespace gfx {
 
 struct DeviceInitData
 {
   bool useHwCompositing;
 
   // Windows only.
   bool useD3D11;
   bool useD3D11WARP;
   bool d3d11TextureSharingWorks;
   bool useD2D1;
   DxgiAdapterDesc adapter;
 };
 
+union GfxVarValue
+{
+  BackendType;
+  bool;
+  IntSize;
+};
+
+struct GfxVarUpdate
+{
+  size_t index;
+  GfxVarValue value;
+};
+
 } // namespace gfx
 } // namespace mozilla
--- a/gfx/ipc/PGPU.ipdl
+++ b/gfx/ipc/PGPU.ipdl
@@ -1,13 +1,14 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
+include GraphicsMessages;
 include protocol PCompositorBridge;
 include protocol PImageBridge;
 include protocol PVRManager;
 include protocol PVsyncBridge;
 
 using mozilla::TimeDuration from "mozilla/TimeStamp.h";
 using mozilla::CSSToLayoutDeviceScale from "Units.h";
 using mozilla::gfx::IntSize from "mozilla/gfx/2D.h";
@@ -26,24 +27,25 @@ struct GfxPrefSetting {
   int32_t index;
   GfxPrefValue value;
 };
 
 sync protocol PGPU
 {
 parent:
   // Sent by the UI process to initiate core settings.
-  async Init(GfxPrefSetting[] prefs);
+  async Init(GfxPrefSetting[] prefs, GfxVarUpdate[] vars);
 
   async InitVsyncBridge(Endpoint<PVsyncBridgeParent> endpoint);
   async InitImageBridge(Endpoint<PImageBridgeParent> endpoint);
   async InitVRManager(Endpoint<PVRManagerParent> endpoint);
 
-  // Called to update a gfx preference.
+  // Called to update a gfx preference or variable.
   async UpdatePref(GfxPrefSetting pref);
+  async UpdateVar(GfxVarUpdate var);
 
   // Create a new top-level compositor.
   async NewWidgetCompositor(Endpoint<PCompositorBridgeParent> endpoint,
                             CSSToLayoutDeviceScale scale,
                             TimeDuration vsyncRate,
                             bool useExternalSurface,
                             IntSize surfaceSize);
 
--- a/gfx/layers/BufferTexture.cpp
+++ b/gfx/layers/BufferTexture.cpp
@@ -87,17 +87,17 @@ public:
 
 protected:
   mozilla::ipc::Shmem mShmem;
 };
 
 static bool UsingX11Compositor()
 {
 #ifdef MOZ_WIDGET_GTK
-  return gfxPlatformGtk::GetPlatform()->UseXRender();
+  return gfx::gfxVars::UseXRender();
 #endif
   return false;
 }
 
 bool ComputeHasIntermediateBuffer(gfx::SurfaceFormat aFormat,
                                   LayersBackend aLayersBackend)
 {
   return aLayersBackend != LayersBackend::LAYERS_BASIC
--- a/gfx/layers/TiledLayerBuffer.h
+++ b/gfx/layers/TiledLayerBuffer.h
@@ -7,18 +7,18 @@
 
 // Debug defines
 //#define GFX_TILEDLAYER_DEBUG_OVERLAY
 //#define GFX_TILEDLAYER_PREF_WARNINGS
 //#define GFX_TILEDLAYER_RETAINING_LOG
 
 #include <stdint.h>                     // for uint16_t, uint32_t
 #include <sys/types.h>                  // for int32_t
-#include "gfxPlatform.h"                // for GetTileWidth/GetTileHeight
 #include "LayersLogging.h"              // for print_stderr
+#include "mozilla/gfx/gfxVars.h"
 #include "mozilla/gfx/Logging.h"        // for gfxCriticalError
 #include "mozilla/layers/LayersTypes.h" // for TextureDumpMode
 #include "nsDebug.h"                    // for NS_ASSERTION
 #include "nsPoint.h"                    // for nsIntPoint
 #include "nsRect.h"                     // for mozilla::gfx::IntRect
 #include "nsRegion.h"                   // for nsIntRegion
 #include "nsTArray.h"                   // for nsTArray
 
@@ -137,18 +137,17 @@ inline int RoundDownToTileEdge(int aX, i
 
 template<typename Derived, typename Tile>
 class TiledLayerBuffer
 {
 public:
   TiledLayerBuffer()
     : mTiles(0, 0, 0, 0)
     , mResolution(1)
-    , mTileSize(gfxPlatform::GetPlatform()->GetTileWidth(),
-                gfxPlatform::GetPlatform()->GetTileHeight())
+    , mTileSize(gfxVars::TileSize())
   {}
 
   ~TiledLayerBuffer() {}
 
   gfx::IntPoint GetTileOffset(TileIntPoint aPosition) const {
     gfx::IntSize scaledTileSize = GetScaledTileSize();
     return gfx::IntPoint(aPosition.x * scaledTileSize.width,
                          aPosition.y * scaledTileSize.height) + mTileOrigin;
new file mode 100644
--- /dev/null
+++ b/gfx/layers/apz/test/mochitest/helper_bug1285070.html
@@ -0,0 +1,44 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <meta charset="utf-8">
+  <meta name="viewport" content="width=device-width; initial-scale=1.0">
+  <title>Test pointer events are dispatched once for touch tap</title>
+  <script type="application/javascript" src="/tests/SimpleTest/paint_listener.js"></script>
+  <script type="application/javascript" src="apz_test_utils.js"></script>
+  <script type="application/javascript" src="apz_test_native_event_utils.js"></script>
+  <script type="application/javascript">
+    function test() {
+      let pointerEventsList = ["pointerover", "pointerenter", "pointerdown",
+                               "pointerup", "pointerleave", "pointerout"];
+      let pointerEventsCount = {};
+
+      pointerEventsList.forEach((eventName) => {
+        pointerEventsCount[eventName] = 0;
+        document.getElementById('div1').addEventListener(eventName, (event) => {
+          dump("Received event " + event.type + "\n");
+          ++pointerEventsCount[event.type];
+        }, false);
+      });
+
+      document.addEventListener("click", (event) => {
+        is(event.target, document.getElementById('div1'), "Clicked on div (at " + event.clientX + "," + event.clientY + ")");
+        for (var key in pointerEventsCount) {
+          is(pointerEventsCount[key], 1, "Event " + key + " should be generated once");
+        }
+        subtestDone();
+      }, false);
+
+      synthesizeNativeTap(document.getElementById('div1'), 100, 100, () => {
+        dump("Finished synthesizing tap, waiting for div to be clicked...\n");
+      });
+    }
+
+    waitUntilApzStable().then(test);
+
+  </script>
+</head>
+<body>
+  <div id="div1" style="width: 200px; height: 200px; background: black"></div>
+</body>
+</html>
--- a/gfx/layers/apz/test/mochitest/mochitest.ini
+++ b/gfx/layers/apz/test/mochitest/mochitest.ini
@@ -1,14 +1,15 @@
 [DEFAULT]
 support-files =
   apz_test_utils.js
   apz_test_native_event_utils.js
   helper_bug982141.html
   helper_bug1151663.html
+  helper_bug1285070.html
   helper_iframe1.html
   helper_iframe2.html
   helper_subframe_style.css
   helper_basic_pan.html
   helper_div_pan.html
   helper_iframe_pan.html
   helper_scrollto_tap.html
   helper_tap.html
@@ -54,8 +55,12 @@ skip-if = (toolkit == 'windows')
 skip-if = (toolkit == 'android') # wheel events not supported on mobile
 [test_group_mouseevents.html]
 [test_touch_listeners_impacting_wheel.html]
 skip-if = (toolkit == 'android') || (toolkit == 'cocoa') # wheel events not supported on mobile, and synthesized wheel smooth-scrolling not supported on OS X
 [test_bug1253683.html]
 skip-if = (os == 'android') || (os == 'b2g') # uses wheel events which are not supported on mobile
 [test_group_zoom.html]
 skip-if = (toolkit != 'android') # only android supports zoom
+[test_bug1285070.html]
+# Windows touch injection doesn't work in automation, but this test can be run locally on a windows touch device.
+# On OS X we don't support touch events at all.
+skip-if = (toolkit == 'windows') || (toolkit == 'cocoa')
new file mode 100644
--- /dev/null
+++ b/gfx/layers/apz/test/mochitest/test_bug1285070.html
@@ -0,0 +1,30 @@
+<!DOCTYPE HTML>
+<html>
+<!--
+https://bugzilla.mozilla.org/show_bug.cgi?id=1285070
+-->
+<head>
+  <meta charset="utf-8">
+  <title>Test for Bug 1285070</title>
+  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <script type="application/javascript" src="apz_test_utils.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+  <script type="application/javascript">
+
+  var subtests = [
+    {'file': 'helper_bug1285070.html', 'prefs': [["dom.w3c_pointer_events.enabled", true]]}
+  ];
+
+  if (isApzEnabled()) {
+    SimpleTest.waitForExplicitFinish();
+    window.onload = function() {
+      runSubtestsSeriallyInFreshWindows(subtests)
+      .then(SimpleTest.finish);
+    };
+  }
+
+  </script>
+</head>
+<body>
+</body>
+</html>
--- a/gfx/layers/apz/test/reftest/reftest.list
+++ b/gfx/layers/apz/test/reftest/reftest.list
@@ -1,16 +1,16 @@
 # The following tests test the async positioning of the scrollbars.
 # Basic root-frame scrollbar with async scrolling
 skip-if(!asyncPan) fuzzy-if(Android,6,8) == async-scrollbar-1-v.html async-scrollbar-1-v-ref.html
 skip-if(!asyncPan) fuzzy-if(Android,6,8) == async-scrollbar-1-h.html async-scrollbar-1-h-ref.html
 skip-if(!asyncPan) fuzzy-if(Android,6,8) == async-scrollbar-1-vh.html async-scrollbar-1-vh-ref.html
 skip-if(!asyncPan) fuzzy-if(Android,6,8) == async-scrollbar-1-v-rtl.html async-scrollbar-1-v-rtl-ref.html
 skip-if(!asyncPan) fuzzy-if(Android,13,8) == async-scrollbar-1-h-rtl.html async-scrollbar-1-h-rtl-ref.html
-skip-if(!asyncPan) fuzzy-if(Android,8,10) == async-scrollbar-1-vh-rtl.html async-scrollbar-1-vh-rtl-ref.html
+skip-if(!asyncPan) fuzzy-if(Android,8,11) == async-scrollbar-1-vh-rtl.html async-scrollbar-1-vh-rtl-ref.html
 
 # Different async zoom levels. Since the scrollthumb gets async-scaled in the
 # compositor, the border-radius ends of the scrollthumb are going to be a little
 # off, hence the fuzzy-if clauses.
 skip-if(!asyncZoom) fuzzy-if(B2G,98,82) == async-scrollbar-zoom-1.html async-scrollbar-zoom-1-ref.html
 skip-if(!asyncZoom) fuzzy-if(B2G,94,146) == async-scrollbar-zoom-2.html async-scrollbar-zoom-2-ref.html
 
 # Meta-viewport tag support
--- a/gfx/layers/apz/util/APZCCallbackHelper.cpp
+++ b/gfx/layers/apz/util/APZCCallbackHelper.cpp
@@ -475,17 +475,20 @@ APZCCallbackHelper::DispatchSynthesizedM
   event.mTime = aTime;
   event.button = WidgetMouseEvent::eLeftButton;
   event.inputSource = nsIDOMMouseEvent::MOZ_SOURCE_TOUCH;
   event.mIgnoreRootScrollFrame = true;
   if (aMsg != eMouseMove) {
     event.mClickCount = 1;
   }
   event.mModifiers = aModifiers;
-
+  // Real touch events will generate corresponding pointer events. We set
+  // convertToPointer to false to prevent the synthesized mouse events generate
+  // pointer events again.
+  event.convertToPointer = false;
   return DispatchWidgetEvent(event);
 }
 
 bool
 APZCCallbackHelper::DispatchMouseEvent(const nsCOMPtr<nsIPresShell>& aPresShell,
                                        const nsString& aType,
                                        const CSSPoint& aPoint,
                                        int32_t aButton,
--- a/gfx/layers/basic/BasicCompositor.cpp
+++ b/gfx/layers/basic/BasicCompositor.cpp
@@ -5,16 +5,17 @@
 
 #include "BasicCompositor.h"
 #include "BasicLayersImpl.h"            // for FillRectWithMask
 #include "TextureHostBasic.h"
 #include "mozilla/layers/Effects.h"
 #include "nsIWidget.h"
 #include "gfx2DGlue.h"
 #include "mozilla/gfx/2D.h"
+#include "mozilla/gfx/gfxVars.h"
 #include "mozilla/gfx/Helpers.h"
 #include "mozilla/gfx/Tools.h"
 #include "mozilla/gfx/ssse3-scaler.h"
 #include "mozilla/layers/ImageDataSerializer.h"
 #include "mozilla/SSE.h"
 #include "gfxUtils.h"
 #include "YCbCrUtils.h"
 #include <algorithm>
@@ -179,18 +180,17 @@ public:
 };
 
 BasicCompositor::BasicCompositor(CompositorBridgeParent* aParent, widget::CompositorWidget* aWidget)
   : Compositor(aWidget, aParent)
   , mDidExternalComposition(false)
 {
   MOZ_COUNT_CTOR(BasicCompositor);
 
-  mMaxTextureSize =
-    Factory::GetMaxSurfaceSize(gfxPlatform::GetPlatform()->GetContentBackendFor(LayersBackend::LAYERS_BASIC));
+  mMaxTextureSize = Factory::GetMaxSurfaceSize(gfxVars::ContentBackend());
 }
 
 BasicCompositor::~BasicCompositor()
 {
   MOZ_COUNT_DTOR(BasicCompositor);
 }
 
 bool
--- a/gfx/layers/client/ClientTiledPaintedLayer.cpp
+++ b/gfx/layers/client/ClientTiledPaintedLayer.cpp
@@ -7,16 +7,17 @@
 #include "Units.h"                      // for ScreenIntRect, CSSPoint, etc
 #include "UnitTransforms.h"             // for TransformTo
 #include "ClientLayerManager.h"         // for ClientLayerManager, etc
 #include "gfxPlatform.h"                // for gfxPlatform
 #include "gfxPrefs.h"                   // for gfxPrefs
 #include "gfxRect.h"                    // for gfxRect
 #include "mozilla/Assertions.h"         // for MOZ_ASSERT, etc
 #include "mozilla/gfx/BaseSize.h"       // for BaseSize
+#include "mozilla/gfx/gfxVars.h"
 #include "mozilla/gfx/Rect.h"           // for Rect, RectTyped
 #include "mozilla/layers/CompositorBridgeChild.h"
 #include "mozilla/layers/LayerMetricsWrapper.h" // for LayerMetricsWrapper
 #include "mozilla/layers/LayersMessages.h"
 #include "mozilla/mozalloc.h"           // for operator delete, etc
 #include "nsISupportsImpl.h"            // for MOZ_COUNT_CTOR, etc
 #include "LayersLogging.h"
 #include "mozilla/layers/SingleTiledContentClient.h"
@@ -418,18 +419,17 @@ ClientTiledPaintedLayer::EndPaint()
 void
 ClientTiledPaintedLayer::RenderLayer()
 {
   LayerManager::DrawPaintedLayerCallback callback =
     ClientManager()->GetPaintedLayerCallback();
   void *data = ClientManager()->GetPaintedLayerCallbackData();
 
   IntSize layerSize = mVisibleRegion.ToUnknownRegion().GetBounds().Size();
-  IntSize tileSize(gfxPlatform::GetPlatform()->GetTileWidth(),
-                   gfxPlatform::GetPlatform()->GetTileHeight());
+  IntSize tileSize = gfx::gfxVars::TileSize();
   bool isHalfTileWidthOrHeight = layerSize.width <= tileSize.width / 2 ||
     layerSize.height <= tileSize.height / 2;
 
   // Use single tile when layer is not scrollable, is smaller than one
   // tile, or when more than half of the tiles' pixels in either
   // dimension would be wasted.
   bool wantSingleTiledContentClient =
       (mCreationHint == LayerManager::NONE ||
@@ -471,19 +471,17 @@ ClientTiledPaintedLayer::RenderLayer()
   // This is handled by PadDrawTargetOutFromRegion in TiledContentClient for mobile
   if (MayResample()) {
     // If we're resampling then bilinear filtering can read up to 1 pixel
     // outside of our texture coords. Make the visible region a single rect,
     // and pad it out by 1 pixel (restricted to tile boundaries) so that
     // we always have valid content or transparent pixels to sample from.
     IntRect bounds = neededRegion.GetBounds();
     IntRect wholeTiles = bounds;
-    wholeTiles.InflateToMultiple(IntSize(
-      gfxPlatform::GetPlatform()->GetTileWidth(),
-      gfxPlatform::GetPlatform()->GetTileHeight()));
+    wholeTiles.InflateToMultiple(gfx::gfxVars::TileSize());
     IntRect padded = bounds;
     padded.Inflate(1);
     padded.IntersectRect(padded, wholeTiles);
     neededRegion = padded;
   }
 #endif
 
   nsIntRegion invalidRegion;
--- a/gfx/layers/client/ContentClient.cpp
+++ b/gfx/layers/client/ContentClient.cpp
@@ -74,17 +74,17 @@ ContentClient::CreateContentClient(Compo
     useDoubleBuffering = gfxWindowsPlatform::GetPlatform()->IsDirect2DBackend();
   } else
 #endif
 #ifdef MOZ_WIDGET_GTK
   // We can't use double buffering when using image content with
   // Xrender support on Linux, as ContentHostDoubleBuffered is not
   // suited for direct uploads to the server.
   if (!gfxPlatformGtk::GetPlatform()->UseImageOffscreenSurfaces() ||
-      !gfxPlatformGtk::GetPlatform()->UseXRender())
+      !gfxVars::UseXRender())
 #endif
   {
     useDoubleBuffering = (LayerManagerComposite::SupportsDirectTexturing() &&
                          backend != LayersBackend::LAYERS_D3D9) ||
                          backend == LayersBackend::LAYERS_BASIC;
   }
 
   if (useDoubleBuffering || gfxEnv::ForceDoubleBuffering()) {
--- a/gfx/layers/ipc/CompositorBridgeChild.cpp
+++ b/gfx/layers/ipc/CompositorBridgeChild.cpp
@@ -11,16 +11,17 @@
 #include "ClientLayerManager.h"         // for ClientLayerManager
 #include "base/message_loop.h"          // for MessageLoop
 #include "base/task.h"                  // for NewRunnableMethod, etc
 #include "gfxPrefs.h"
 #include "mozilla/layers/LayerTransactionChild.h"
 #include "mozilla/layers/PLayerTransactionChild.h"
 #include "mozilla/layers/TextureClient.h"// for TextureClient
 #include "mozilla/layers/TextureClientPool.h"// for TextureClientPool
+#include "mozilla/gfx/gfxVars.h"
 #include "mozilla/gfx/GPUProcessManager.h"
 #include "mozilla/mozalloc.h"           // for operator new, etc
 #include "nsAutoPtr.h"
 #include "nsDebug.h"                    // for NS_RUNTIMEABORT
 #include "nsIObserver.h"                // for nsIObserver
 #include "nsISupportsImpl.h"            // for MOZ_COUNT_CTOR, etc
 #include "nsTArray.h"                   // for nsTArray, nsTArray_Impl
 #include "nsXULAppAPI.h"                // for XRE_GetIOMessageLoop, etc
@@ -177,21 +178,16 @@ CompositorBridgeChild::InitForContent(En
     NS_RUNTIMEABORT("Couldn't Open() Compositor channel.");
     return false;
   }
 
   child->mCanSend = true;
 
   // We release this ref in DeferredDestroyCompositor.
   sCompositorBridge = child;
-
-  int32_t width;
-  int32_t height;
-  sCompositorBridge->SendGetTileSize(&width, &height);
-  gfxPlatform::GetPlatform()->SetTileSize(width, height);
   return true;
 }
 
 CompositorBridgeParent*
 CompositorBridgeChild::InitSameProcess(widget::CompositorWidget* aWidget,
                                        const uint64_t& aLayerTreeId,
                                        CSSToLayoutDeviceScale aScale,
                                        bool aUseAPZ,
@@ -752,26 +748,16 @@ CompositorBridgeChild::SendFlushRenderin
   MOZ_ASSERT(mCanSend);
   if (!mCanSend) {
     return true;
   }
   return PCompositorBridgeChild::SendFlushRendering();
 }
 
 bool
-CompositorBridgeChild::SendGetTileSize(int32_t* tileWidth, int32_t* tileHeight)
-{
-  MOZ_ASSERT(mCanSend);
-  if (!mCanSend) {
-    return true;
-  }
-  return PCompositorBridgeChild::SendGetTileSize(tileWidth, tileHeight);
-}
-
-bool
 CompositorBridgeChild::SendStartFrameTimeRecording(const int32_t& bufferSize, uint32_t* startIndex)
 {
   MOZ_ASSERT(mCanSend);
   if (!mCanSend) {
     return true;
   }
   return PCompositorBridgeChild::SendStartFrameTimeRecording(bufferSize, startIndex);
 }
@@ -939,18 +925,17 @@ CompositorBridgeChild::GetTexturePool(La
         mTexturePools[i]->GetFormat() == aFormat &&
         mTexturePools[i]->GetFlags() == aFlags) {
       return mTexturePools[i];
     }
   }
 
   mTexturePools.AppendElement(
       new TextureClientPool(aBackend, aFormat,
-                            IntSize(gfxPlatform::GetPlatform()->GetTileWidth(),
-                                    gfxPlatform::GetPlatform()->GetTileHeight()),
+                            gfx::gfxVars::TileSize(),
                             aFlags,
                             gfxPrefs::LayersTileInitialPoolSize(),
                             gfxPrefs::LayersTilePoolUnusedSize(),
                             this));
 
   return mTexturePools.LastElement();
 }
 
--- a/gfx/layers/ipc/CompositorBridgeParent.cpp
+++ b/gfx/layers/ipc/CompositorBridgeParent.cpp
@@ -55,17 +55,16 @@
 #endif
 #include "nsCOMPtr.h"                   // for already_AddRefed
 #include "nsDebug.h"                    // for NS_ASSERTION, etc
 #include "nsISupportsImpl.h"            // for MOZ_COUNT_CTOR, etc
 #include "nsIWidget.h"                  // for nsIWidget
 #include "nsTArray.h"                   // for nsTArray
 #include "nsThreadUtils.h"              // for NS_IsMainThread
 #include "nsXULAppAPI.h"                // for XRE_GetIOMessageLoop
-#include "nsIXULRuntime.h"              // for BrowserTabsRemoteAutostart
 #ifdef XP_WIN
 #include "mozilla/layers/CompositorD3D11.h"
 #include "mozilla/layers/CompositorD3D9.h"
 #endif
 #include "GeckoProfiler.h"
 #include "mozilla/ipc/ProtocolTypes.h"
 #include "mozilla/unused.h"
 #include "mozilla/Hal.h"
@@ -816,24 +815,16 @@ CompositorBridgeParent::RecvForcePresent
   // During the shutdown sequence mLayerManager may be null
   if (mLayerManager) {
     mLayerManager->ForcePresent();
   }
   return true;
 }
 
 bool
-CompositorBridgeParent::RecvGetTileSize(int32_t* aWidth, int32_t* aHeight)
-{
-  *aWidth = gfxPlatform::GetPlatform()->GetTileWidth();
-  *aHeight = gfxPlatform::GetPlatform()->GetTileHeight();
-  return true;
-}
-
-bool
 CompositorBridgeParent::RecvNotifyRegionInvalidated(const nsIntRegion& aRegion)
 {
   if (mLayerManager) {
     mLayerManager->AddInvalidRegion(aRegion);
   }
   return true;
 }
 
@@ -1212,17 +1203,17 @@ CompositorBridgeParent::CompositeToTarge
   bool updatePluginsFlag = true;
   AutoResolveRefLayers resolve(mCompositionManager, this,
                                &hasRemoteContent,
                                &updatePluginsFlag);
 
 #if defined(XP_WIN) || defined(MOZ_WIDGET_GTK)
   // We do not support plugins in local content. When switching tabs
   // to local pages, hide every plugin associated with the window.
-  if (!hasRemoteContent && BrowserTabsRemoteAutostart() &&
+  if (!hasRemoteContent && gfxVars::BrowserTabsRemoteAutostart() &&
       mCachedPluginData.Length()) {
     Unused << SendHideAllPlugins(GetWidget()->GetWidgetKey());
     mCachedPluginData.Clear();
   }
 #endif
 
   if (aTarget) {
     mLayerManager->BeginTransactionWithDrawTarget(aTarget, *aRect);
@@ -1580,17 +1571,17 @@ CompositorBridgeParent::NewCompositor(co
     if (aBackendHints[i] == LayersBackend::LAYERS_OPENGL) {
       compositor = new CompositorOGL(this,
                                      mWidget,
                                      mEGLSurfaceSize.width,
                                      mEGLSurfaceSize.height,
                                      mUseExternalSurfaceSize);
     } else if (aBackendHints[i] == LayersBackend::LAYERS_BASIC) {
 #ifdef MOZ_WIDGET_GTK
-      if (gfxPlatformGtk::GetPlatform()->UseXRender()) {
+      if (gfxVars::UseXRender()) {
         compositor = new X11BasicCompositor(this, mWidget);
       } else
 #endif
       {
         compositor = new BasicCompositor(this, mWidget);
       }
 #ifdef XP_WIN
     } else if (aBackendHints[i] == LayersBackend::LAYERS_D3D11) {
@@ -2038,23 +2029,16 @@ public:
     }
 
     parent->UpdateVisibleRegion(aCounter, aGuid, aRegion);
     return true;
   }
 
   virtual bool RecvAllPluginsCaptured() override { return true; }
 
-  virtual bool RecvGetTileSize(int32_t* aWidth, int32_t* aHeight) override
-  {
-    *aWidth = gfxPlatform::GetPlatform()->GetTileWidth();
-    *aHeight = gfxPlatform::GetPlatform()->GetTileHeight();
-    return true;
-  }
-
   virtual bool RecvGetFrameUniformity(FrameUniformityData* aOutData) override
   {
     // Don't support calculating frame uniformity on the child process and
     // this is just a stub for now.
     MOZ_ASSERT(false);
     return true;
   }
 
--- a/gfx/layers/ipc/CompositorBridgeParent.h
+++ b/gfx/layers/ipc/CompositorBridgeParent.h
@@ -242,18 +242,16 @@ public:
   virtual bool RecvFlushRendering() override;
   virtual bool RecvForcePresent() override;
 
   virtual bool RecvAcknowledgeCompositorUpdate(const uint64_t& aLayersId) override {
     MOZ_ASSERT_UNREACHABLE("This message is only sent cross-process");
     return true;
   }
 
-  virtual bool RecvGetTileSize(int32_t* aWidth, int32_t* aHeight) override;
-
   virtual bool RecvNotifyRegionInvalidated(const nsIntRegion& aRegion) override;
   virtual bool RecvStartFrameTimeRecording(const int32_t& aBufferSize, uint32_t* aOutStartIndex) override;
   virtual bool RecvStopFrameTimeRecording(const uint32_t& aStartIndex, InfallibleTArray<float>* intervals) override;
 
   // Unused for chrome <-> compositor communication (which this class does).
   // @see CrossProcessCompositorBridgeParent::RecvRequestNotifyAfterRemotePaint
   virtual bool RecvRequestNotifyAfterRemotePaint() override { return true; };
 
--- a/gfx/layers/ipc/PCompositorBridge.ipdl
+++ b/gfx/layers/ipc/PCompositorBridge.ipdl
@@ -157,20 +157,16 @@ parent:
   // Make sure any pending composites are started immediately and
   // block until they are completed.
   sync FlushRendering();
 
   // Force an additional frame presentation to be executed. This is used to
   // work around a windows presentation bug (See Bug 1232042)
   async ForcePresent();
 
-  // Get the size of the tiles. This number should not change at runtime.
-  sync GetTileSize()
-    returns (int32_t tileWidth, int32_t tileHeight);
-
   sync StartFrameTimeRecording(int32_t bufferSize)
     returns (uint32_t startIndex);
 
   sync StopFrameTimeRecording(uint32_t startIndex)
     returns (float[] intervals);
 
   // layersBackendHints is an ordered list of preffered backends where
   // layersBackendHints[0] is the best backend. If any hints are LayersBackend::LAYERS_NONE
--- a/gfx/qcms/transform_util.h
+++ b/gfx/qcms/transform_util.h
@@ -20,16 +20,17 @@
 // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 
 // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 
 // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 #ifndef _QCMS_TRANSFORM_UTIL_H
 #define _QCMS_TRANSFORM_UTIL_H
 
 #include <stdlib.h>
+#include <math.h>
 
 #define CLU(table,x,y,z) table[(x*len + y*x_len + z*xy_len)*3]
 
 //XXX: could use a bettername
 typedef uint16_t uint16_fract_t;
 
 float lut_interp_linear(double value, uint16_t *table, int length);
 float lut_interp_linear_float(float value, float *table, int length);
--- a/gfx/skia/skia/src/utils/SkDashPath.cpp
+++ b/gfx/skia/skia/src/utils/SkDashPath.cpp
@@ -181,17 +181,17 @@ public:
         dst->incReserve(n);
 
         // we will take care of the stroking
         rec->setFillStyle();
         return true;
     }
 
     void addSegment(SkScalar d0, SkScalar d1, SkPath* path) const {
-        SkASSERT(d0 < fPathLength);
+        SkASSERT(d0 <= fPathLength);
         // clamp the segment to our length
         if (d1 > fPathLength) {
             d1 = fPathLength;
         }
 
         SkScalar x0 = fPts[0].fX + SkScalarMul(fTangent.fX, d0);
         SkScalar x1 = fPts[0].fX + SkScalarMul(fTangent.fX, d1);
         SkScalar y0 = fPts[0].fY + SkScalarMul(fTangent.fY, d0);
--- a/gfx/thebes/gfxPlatform.cpp
+++ b/gfx/thebes/gfxPlatform.cpp
@@ -3,16 +3,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mozilla/layers/CompositorBridgeChild.h"
 #include "mozilla/layers/CompositorThread.h"
 #include "mozilla/layers/ImageBridgeChild.h"
 #include "mozilla/layers/SharedBufferManagerChild.h"
 #include "mozilla/layers/ISurfaceAllocator.h"     // for GfxMemoryImageReporter
+#include "mozilla/gfx/gfxVars.h"
 #include "mozilla/gfx/GPUProcessManager.h"
 #include "mozilla/ClearOnShutdown.h"
 #include "mozilla/Telemetry.h"
 #include "mozilla/TimeStamp.h"
 
 #include "mozilla/Logging.h"
 #include "mozilla/Services.h"
 #include "prprf.h"
@@ -478,19 +479,17 @@ MemoryPressureObserver::Observe(nsISuppo
     gfxGradientCache::PurgeAllCaches();
 
     gfxPlatform::PurgeSkiaFontCache();
     gfxPlatform::GetPlatform()->PurgeSkiaGPUCache();
     return NS_OK;
 }
 
 gfxPlatform::gfxPlatform()
-  : mTileWidth(-1)
-  , mTileHeight(-1)
-  , mAzureCanvasBackendCollector(this, &gfxPlatform::GetAzureBackendInfo)
+  : mAzureCanvasBackendCollector(this, &gfxPlatform::GetAzureBackendInfo)
   , mApzSupportCollector(this, &gfxPlatform::GetApzSupportInfo)
   , mTilesInfoCollector(this, &gfxPlatform::GetTilesSupportInfo)
   , mCompositorBackend(layers::LayersBackend::LAYERS_NONE)
   , mScreenDepth(0)
   , mDeviceCounter(0)
 {
     mAllowDownloadableFonts = UNINITIALIZED_VALUE;
     mFallbackUsesCmaps = UNINITIALIZED_VALUE;
@@ -588,16 +587,17 @@ gfxPlatform::Init()
     if (gEverInitialized) {
         NS_RUNTIMEABORT("Already started???");
     }
     gEverInitialized = true;
 
     // Initialize the preferences by creating the singleton.
     gfxPrefs::GetSingleton();
     MediaPrefs::GetSingleton();
+    gfxVars::Initialize();
 
     gfxConfig::Init();
 
     if (XRE_IsParentProcess()) {
       GPUProcessManager::Initialize();
     }
 
     auto fwd = new CrashStatsLogForwarder("GraphicsCriticalError");
@@ -867,16 +867,17 @@ gfxPlatform::Shutdown()
     // delete it.
     delete mozilla::gfx::Factory::GetLogForwarder();
     mozilla::gfx::Factory::SetLogForwarder(nullptr);
 
     gfx::Factory::ShutDown();
 
     delete gGfxPlatformPrefsLock;
 
+    gfxVars::Shutdown();
     gfxPrefs::DestroySingleton();
     gfxFont::DestroySingletons();
 
     gfxConfig::Shutdown();
 
     delete gPlatform;
     gPlatform = nullptr;
 }
@@ -1153,42 +1154,16 @@ gfxPlatform::GetScaledFontForFont(DrawTa
 {
   NativeFont nativeFont;
   nativeFont.mType = NativeFontType::CAIRO_FONT_FACE;
   nativeFont.mFont = aFont->GetCairoScaledFont();
   return Factory::CreateScaledFontForNativeFont(nativeFont,
                                                 aFont->GetAdjustedSize());
 }
 
-int
-gfxPlatform::GetTileWidth()
-{
-  MOZ_ASSERT(mTileWidth != -1);
-  return mTileWidth;
-}
-
-int
-gfxPlatform::GetTileHeight()
-{
-  MOZ_ASSERT(mTileHeight != -1);
-  return mTileHeight;
-}
-
-void
-gfxPlatform::SetTileSize(int aWidth, int aHeight)
-{
-  // Don't allow changing the tile size after we've set it.
-  // Right now the code assumes that the tile size doesn't change.
-  MOZ_ASSERT((mTileWidth == -1 && mTileHeight == -1) ||
-    (mTileWidth == aWidth && mTileHeight == aHeight));
-
-  mTileWidth = aWidth;
-  mTileHeight = aHeight;
-}
-
 void
 gfxPlatform::ComputeTileSize()
 {
   // The tile size should be picked in the parent processes
   // and sent to the child processes over IPDL GetTileSize.
   if (!XRE_IsParentProcess()) {
     return;
   }
@@ -1213,17 +1188,22 @@ gfxPlatform::ComputeTileSize()
                                      android::GraphicBuffer::USAGE_HW_TEXTURE);
 
     if (alloc.get()) {
       w = alloc->getStride(); // We want the tiles to be gralloc stride aligned.
     }
 #endif
   }
 
-  SetTileSize(w, h);
+  // Don't allow changing the tile size after we've set it.
+  // Right now the code assumes that the tile size doesn't change.
+  MOZ_ASSERT(gfxVars::TileSize().width == -1 &&
+             gfxVars::TileSize().height == -1);
+
+  gfxVars::SetTileSize(IntSize(w, h));
 }
 
 void
 gfxPlatform::PopulateScreenInfo()
 {
   nsCOMPtr<nsIScreenManager> manager = do_GetService("@mozilla.org/gfx/screenmanager;1");
   MOZ_ASSERT(manager, "failed to get nsIScreenManager");
 
@@ -1647,16 +1627,20 @@ gfxPlatform::InitBackendPrefs(uint32_t a
     mContentBackend = GetContentBackendPref(mContentBackendBitmask);
     if (mContentBackend == BackendType::NONE) {
         mContentBackend = aContentDefault;
         // mContentBackendBitmask is our canonical reference for supported
         // backends so we need to add the default if we are using it and
         // overriding the prefs.
         mContentBackendBitmask |= BackendTypeBit(aContentDefault);
     }
+
+    if (XRE_IsParentProcess()) {
+        gfxVars::SetContentBackend(mContentBackend);
+    }
 }
 
 /* static */ BackendType
 gfxPlatform::GetCanvasBackendPref(uint32_t aBackendBitmask)
 {
     return GetBackendPref("gfx.canvas.azure.backends", aBackendBitmask);
 }
 
@@ -2090,17 +2074,16 @@ gfxPlatform::OptimalFormatForContent(gfx
  * that should be consistent for the lifetime of the application (bug 840967).
  * As such, we will evaluate them all as soon as one of them is evaluated
  * and remember the values.  Changing these preferences during the run will
  * not have any effect until we restart.
  */
 static mozilla::Atomic<bool> sLayersSupportsHardwareVideoDecoding(false);
 static bool sLayersHardwareVideoDecodingFailed = false;
 static bool sBufferRotationCheckPref = true;
-static bool sPrefBrowserTabsRemoteAutostart = false;
 
 static mozilla::Atomic<bool> sLayersAccelerationPrefsInitialized(false);
 
 void
 gfxPlatform::InitAcceleration()
 {
   if (sLayersAccelerationPrefsInitialized) {
     return;
@@ -2110,17 +2093,20 @@ gfxPlatform::InitAcceleration()
 
   // If this is called for the first time on a non-main thread, we're screwed.
   // At the moment there's no explicit guarantee that the main thread calls
   // this before the compositor thread, but let's at least make the assumption
   // explicit.
   MOZ_ASSERT(NS_IsMainThread(), "can only initialize prefs on the main thread");
 
   gfxPrefs::GetSingleton();
-  sPrefBrowserTabsRemoteAutostart = BrowserTabsRemoteAutostart();
+
+  if (XRE_IsParentProcess()) {
+    gfxVars::SetBrowserTabsRemoteAutostart(BrowserTabsRemoteAutostart());
+  }
 
   nsCOMPtr<nsIGfxInfo> gfxInfo = services::GetGfxInfo();
   nsCString discardFailureId;
   int32_t status;
 
   if (Preferences::GetBool("media.hardware-video-decoding.enabled", false) &&
 #ifdef XP_WIN
     Preferences::GetBool("media.windows-media-foundation.use-dxva", true) &&
@@ -2252,17 +2238,17 @@ gfxPlatform::GetParentDevicePrefs()
 gfxPlatform::UsesOffMainThreadCompositing()
 {
   static bool firstTime = true;
   static bool result = false;
 
   if (firstTime) {
     MOZ_ASSERT(sLayersAccelerationPrefsInitialized);
     result =
-      sPrefBrowserTabsRemoteAutostart ||
+      gfxVars::BrowserTabsRemoteAutostart() ||
       !gfxPrefs::LayersOffMainThreadCompositionForceDisabled();
 #if defined(MOZ_WIDGET_GTK)
     // Linux users who chose OpenGL are being grandfathered in to OMTC
     result |= gfxPrefs::LayersAccelerationForceEnabledDoNotUseDirectly();
 
 #endif
     firstTime = false;
   }
@@ -2339,18 +2325,19 @@ gfxPlatform::GetApzSupportInfo(mozilla::
 
 void
 gfxPlatform::GetTilesSupportInfo(mozilla::widget::InfoObject& aObj)
 {
   if (!gfxPrefs::LayersTilesEnabled()) {
     return;
   }
 
-  aObj.DefineProperty("TileHeight", mTileHeight);
-  aObj.DefineProperty("TileWidth", mTileWidth);
+  IntSize tileSize = gfxVars::TileSize();
+  aObj.DefineProperty("TileHeight", tileSize.height);
+  aObj.DefineProperty("TileWidth", tileSize.width);
 }
 
 /*static*/ bool
 gfxPlatform::AsyncPanZoomEnabled()
 {
 #if !defined(MOZ_B2G) && !defined(MOZ_WIDGET_ANDROID) && !defined(MOZ_WIDGET_UIKIT)
   // For XUL applications (everything but B2G on mobile and desktop, and
   // Firefox on Android) we only want to use APZ when E10S is enabled. If
--- a/gfx/thebes/gfxPlatform.h
+++ b/gfx/thebes/gfxPlatform.h
@@ -111,16 +111,18 @@ GetBackendName(mozilla::gfx::BackendType
       case mozilla::gfx::BackendType::SKIA:
         return "skia";
       case mozilla::gfx::BackendType::RECORDING:
         return "recording";
       case mozilla::gfx::BackendType::DIRECT2D1_1:
         return "direct2d 1.1";
       case mozilla::gfx::BackendType::NONE:
         return "none";
+      case mozilla::gfx::BackendType::BACKEND_LAST:
+        return "invalid";
   }
   MOZ_CRASH("Incomplete switch");
 }
 
 enum class DeviceResetReason
 {
   OK = 0,
   HUNG,
@@ -309,20 +311,16 @@ public:
      * Fill aListOfFonts with the results of querying the list of font names
      * that correspond to the given language group or generic font family
      * (or both, or neither).
      */
     virtual nsresult GetFontList(nsIAtom *aLangGroup,
                                  const nsACString& aGenericFamily,
                                  nsTArray<nsString>& aListOfFonts);
 
-    int GetTileWidth();
-    int GetTileHeight();
-    void SetTileSize(int aWidth, int aHeight);
-
     /**
      * Rebuilds the any cached system font lists
      */
     virtual nsresult UpdateFontList();
 
     /**
      * Create the platform font-list object (gfxPlatformFontList concrete subclass).
      * This function is responsible to create the appropriate subclass of
@@ -801,19 +799,16 @@ private:
     mozilla::gfx::BackendType mPreferredCanvasBackend;
     // The fallback draw target backend to use for canvas, if the preferred backend fails
     mozilla::gfx::BackendType mFallbackCanvasBackend;
     // The backend to use for content
     mozilla::gfx::BackendType mContentBackend;
     // Bitmask of backend types we can use to render content
     uint32_t mContentBackendBitmask;
 
-    int mTileWidth;
-    int mTileHeight;
-
     mozilla::widget::GfxInfoCollector<gfxPlatform> mAzureCanvasBackendCollector;
     mozilla::widget::GfxInfoCollector<gfxPlatform> mApzSupportCollector;
     mozilla::widget::GfxInfoCollector<gfxPlatform> mTilesInfoCollector;
 
     RefPtr<mozilla::gfx::DrawEventRecorder> mRecorder;
     RefPtr<mozilla::gl::SkiaGLGlue> mSkiaGlue;
 
     // Backend that we are compositing with. NONE, if no compositor has been
--- a/gfx/thebes/gfxPlatformGtk.cpp
+++ b/gfx/thebes/gfxPlatformGtk.cpp
@@ -68,36 +68,37 @@ using namespace mozilla::gfx;
 using namespace mozilla::unicode;
 
 gfxFontconfigUtils *gfxPlatformGtk::sFontconfigUtils = nullptr;
 
 #if (MOZ_WIDGET_GTK == 2)
 static cairo_user_data_key_t cairo_gdk_drawable_key;
 #endif
 
-#ifdef MOZ_X11
-    bool gfxPlatformGtk::sUseXRender = true;
-#endif
-
 bool gfxPlatformGtk::sUseFcFontList = false;
 
 gfxPlatformGtk::gfxPlatformGtk()
 {
     gtk_init(nullptr, nullptr);
 
     sUseFcFontList = mozilla::Preferences::GetBool("gfx.font_rendering.fontconfig.fontlist.enabled");
     if (!sUseFcFontList && !sFontconfigUtils) {
         sFontconfigUtils = gfxFontconfigUtils::GetFontconfigUtils();
     }
 
     mMaxGenericSubstitutions = UNINITIALIZED_VALUE;
 
 #ifdef MOZ_X11
-    sUseXRender = (GDK_IS_X11_DISPLAY(gdk_display_get_default())) ?
-                    mozilla::Preferences::GetBool("gfx.xrender.enabled") : false;
+    if (XRE_IsParentProcess()) {
+      if (GDK_IS_X11_DISPLAY(gdk_display_get_default()) &&
+          mozilla::Preferences::GetBool("gfx.xrender.enabled"))
+      {
+          gfxVars::SetUseXRender(true);
+      }
+    }
 #endif
 
     uint32_t canvasMask = BackendTypeBit(BackendType::CAIRO);
     uint32_t contentMask = BackendTypeBit(BackendType::CAIRO);
 #ifdef USE_SKIA
     canvasMask |= BackendTypeBit(BackendType::SKIA);
     contentMask |= BackendTypeBit(BackendType::SKIA);
 #endif
@@ -127,17 +128,17 @@ gfxPlatformGtk::~gfxPlatformGtk()
       XCloseDisplay(mCompositorDisplay);
     }
 #endif // MOZ_X11
 }
 
 void
 gfxPlatformGtk::FlushContentDrawing()
 {
-    if (UseXRender()) {
+    if (gfxVars::UseXRender()) {
         XFlush(DefaultXDisplay());
     }
 }
 
 already_AddRefed<gfxASurface>
 gfxPlatformGtk::CreateOffscreenSurface(const IntSize& aSize,
                                        gfxImageFormat aFormat)
 {
@@ -146,17 +147,17 @@ gfxPlatformGtk::CreateOffscreenSurface(c
 #ifdef MOZ_X11
     // XXX we really need a different interface here, something that passes
     // in more context, including the display and/or target surface type that
     // we should try to match
     GdkScreen *gdkScreen = gdk_screen_get_default();
     if (gdkScreen) {
         // When forcing PaintedLayers to use image surfaces for content,
         // force creation of gfxImageSurface surfaces.
-        if (UseXRender() && !UseImageOffscreenSurfaces()) {
+        if (gfxVars::UseXRender() && !UseImageOffscreenSurfaces()) {
             Screen *screen = gdk_x11_screen_get_xscreen(gdkScreen);
             XRenderPictFormat* xrenderFormat =
                 gfxXlibSurface::FindRenderFormat(DisplayOfScreen(screen),
                                                  aFormat);
 
             if (xrenderFormat) {
                 newSurface = gfxXlibSurface::Create(screen, xrenderFormat,
                                                     aSize);
--- a/gfx/thebes/gfxPlatformGtk.h
+++ b/gfx/thebes/gfxPlatformGtk.h
@@ -4,16 +4,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef GFX_PLATFORM_GTK_H
 #define GFX_PLATFORM_GTK_H
 
 #include "gfxPlatform.h"
 #include "nsAutoRef.h"
 #include "nsTArray.h"
+#include "mozilla/gfx/gfxVars.h"
 
 #if (MOZ_WIDGET_GTK == 2)
 extern "C" {
     typedef struct _GdkDrawable GdkDrawable;
 }
 #endif
 
 #ifdef MOZ_X11
@@ -98,28 +99,20 @@ public:
     static void SetGdkDrawable(cairo_surface_t *target,
                                GdkDrawable *drawable);
     static GdkDrawable *GetGdkDrawable(cairo_surface_t *target);
 #endif
 
     static int32_t GetDPI();
     static double  GetDPIScale();
 
-    bool UseXRender() {
-#if defined(MOZ_X11)
-        return sUseXRender;
-#else
-        return false;
-#endif
-    }
-
 #ifdef MOZ_X11
     virtual void GetAzureBackendInfo(mozilla::widget::InfoObject &aObj) override {
       gfxPlatform::GetAzureBackendInfo(aObj);
-      aObj.DefineProperty("CairoUseXRender", UseXRender());
+      aObj.DefineProperty("CairoUseXRender", mozilla::gfx::gfxVars::UseXRender());
     }
 #endif
 
     static bool UseFcFontList() { return sUseFcFontList; }
 
     bool UseImageOffscreenSurfaces();
 
     virtual gfxImageFormat GetOffscreenFormat() override;
@@ -154,18 +147,16 @@ protected:
 
     int8_t mMaxGenericSubstitutions;
 
 private:
     virtual void GetPlatformCMSOutputProfile(void *&mem,
                                              size_t &size) override;
 
 #ifdef MOZ_X11
-    static bool sUseXRender;
-
     Display* mCompositorDisplay;
 #endif
 
     // xxx - this will be removed once the new fontconfig platform font list
     // replaces gfxPangoFontGroup
     static bool sUseFcFontList;
 };
 
--- a/image/ISurfaceProvider.h
+++ b/image/ISurfaceProvider.h
@@ -13,94 +13,113 @@
 
 #include "mozilla/Maybe.h"
 #include "mozilla/NotNull.h"
 #include "mozilla/TimeStamp.h"
 #include "mozilla/Variant.h"
 #include "mozilla/gfx/2D.h"
 
 #include "imgFrame.h"
+#include "SurfaceCache.h"
 
 namespace mozilla {
 namespace image {
 
+class CachedSurface;
+
 /**
  * An interface for objects which can either store a surface or dynamically
  * generate one.
  */
 class ISurfaceProvider
 {
 public:
   // Subclasses may or may not be XPCOM classes, so we just require that they
   // implement AddRef and Release.
   NS_IMETHOD_(MozExternalRefCountType) AddRef() = 0;
   NS_IMETHOD_(MozExternalRefCountType) Release() = 0;
 
   /// @return a drawable reference to a surface.
   virtual DrawableFrameRef DrawableRef() = 0;
 
-  /// @return true if this ISurfaceProvider is acting as a placeholder, which is
-  /// to say that it doesn't have a surface and hence can't return a
-  /// non-empty DrawableFrameRef yet, but it will be able to in the future.
-  virtual bool IsPlaceholder() const = 0;
-
   /// @return true if DrawableRef() will return a completely decoded surface.
   virtual bool IsFinished() const = 0;
 
-  /// @return true if this ISurfaceProvider is locked. (@see SetLocked())
-  virtual bool IsLocked() const = 0;
-
-  /// If @aLocked is true, hint that this ISurfaceProvider is in use and it
-  /// should avoid releasing its resources.
-  virtual void SetLocked(bool aLocked) = 0;
-
   /// @return the number of bytes of memory this ISurfaceProvider is expected to
   /// require. Optimizations may result in lower real memory usage. Trivial
   /// overhead is ignored.
   virtual size_t LogicalSizeInBytes() const = 0;
 
+  /// @return the availability state of this ISurfaceProvider, which indicates
+  /// whether DrawableRef() could successfully return a surface. Should only be
+  /// called from SurfaceCache code as it relies on SurfaceCache for
+  /// synchronization.
+  AvailabilityState& Availability() { return mAvailability; }
+  const AvailabilityState& Availability() const { return mAvailability; }
+
 protected:
+  explicit ISurfaceProvider(AvailabilityState aAvailability)
+    : mAvailability(aAvailability)
+  { }
+
   virtual ~ISurfaceProvider() { }
+
+  /// @return true if this ISurfaceProvider is locked. (@see SetLocked())
+  /// Should only be called from SurfaceCache code as it relies on SurfaceCache
+  /// for synchronization.
+  virtual bool IsLocked() const = 0;
+
+  /// If @aLocked is true, hint that this ISurfaceProvider is in use and it
+  /// should avoid releasing its resources. Should only be called from
+  /// SurfaceCache code as it relies on SurfaceCache for synchronization.
+  virtual void SetLocked(bool aLocked) = 0;
+
+private:
+  friend class CachedSurface;
+
+  AvailabilityState mAvailability;
 };
 
 /**
  * An ISurfaceProvider that stores a single surface.
  */
 class SimpleSurfaceProvider final : public ISurfaceProvider
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SimpleSurfaceProvider, override)
 
   explicit SimpleSurfaceProvider(NotNull<imgFrame*> aSurface)
-    : mSurface(aSurface)
+    : ISurfaceProvider(AvailabilityState::StartAvailable())
+    , mSurface(aSurface)
   { }
 
   DrawableFrameRef DrawableRef() override { return mSurface->DrawableRef(); }
-  bool IsPlaceholder() const override { return false; }
   bool IsFinished() const override { return mSurface->IsFinished(); }
+
+  size_t LogicalSizeInBytes() const override
+  {
+    gfx::IntSize size = mSurface->GetSize();
+    return size.width * size.height * mSurface->GetBytesPerPixel();
+  }
+
+protected:
   bool IsLocked() const override { return bool(mLockRef); }
 
   void SetLocked(bool aLocked) override
   {
     if (aLocked == IsLocked()) {
       return;  // Nothing changed.
     }
 
     // If we're locked, hold a DrawableFrameRef to |mSurface|, which will keep
     // any volatile buffer it owns in memory.
     mLockRef = aLocked ? mSurface->DrawableRef()
                        : DrawableFrameRef();
   }
 
-  size_t LogicalSizeInBytes() const override
-  {
-    gfx::IntSize size = mSurface->GetSize();
-    return size.width * size.height * mSurface->GetBytesPerPixel();
-  }
-
 private:
   virtual ~SimpleSurfaceProvider() { }
 
   NotNull<RefPtr<imgFrame>> mSurface;
   DrawableFrameRef mLockRef;
 };
 
 } // namespace image
--- a/image/SurfaceCache.cpp
+++ b/image/SurfaceCache.cpp
@@ -66,16 +66,18 @@ static StaticRefPtr<SurfaceCacheImpl> sI
  * simply an estimate of the size of the surface in bytes, but in the future it
  * may be worth taking into account the cost of rematerializing the surface as
  * well.
  */
 typedef size_t Cost;
 
 // Placeholders do not have surfaces, but need to be given a trivial cost for
 // our invariants to hold.
+// XXX(seth): This is only true of old-style placeholders inserted via
+// InsertPlaceholder().
 static const Cost sPlaceholderCost = 1;
 
 static Cost
 ComputeCost(const IntSize& aSize, uint32_t aBytesPerPixel)
 {
   MOZ_ASSERT(aBytesPerPixel == 1 || aBytesPerPixel == 4);
   return aSize.width * aSize.height * aBytesPerPixel;
 }
@@ -136,18 +138,18 @@ public:
                 const Cost         aCost,
                 const ImageKey     aImageKey,
                 const SurfaceKey&  aSurfaceKey)
     : mProvider(aProvider)
     , mCost(aCost)
     , mImageKey(aImageKey)
     , mSurfaceKey(aSurfaceKey)
   {
-    MOZ_ASSERT(!IsPlaceholder() || mCost == sPlaceholderCost,
-               "Placeholder should have trivial cost");
+    MOZ_ASSERT(aProvider || mCost == sPlaceholderCost,
+               "Old-style placeholders should have trivial cost");
     MOZ_ASSERT(mImageKey, "Must have a valid image key");
   }
 
   already_AddRefed<ISurfaceProvider> Provider() const
   {
     if (MOZ_UNLIKELY(IsPlaceholder())) {
       MOZ_ASSERT_UNREACHABLE("Shouldn't call Provider() on a placeholder");
       return nullptr;
@@ -163,17 +165,21 @@ public:
   {
     if (IsPlaceholder()) {
       return;  // Can't lock a placeholder.
     }
 
     mProvider->SetLocked(aLocked);
   }
 
-  bool IsPlaceholder() const { return !mProvider || mProvider->IsPlaceholder(); }
+  bool IsPlaceholder() const
+  {
+    return !mProvider || mProvider->Availability().IsPlaceholder();
+  }
+
   bool IsLocked() const { return !IsPlaceholder() && mProvider->IsLocked(); }
 
   ImageKey GetImageKey() const { return mImageKey; }
   SurfaceKey GetSurfaceKey() const { return mSurfaceKey; }
   CostEntry GetCostEntry() { return image::CostEntry(this, mCost); }
   nsExpirationState* GetExpirationState() { return &mExpirationState; }
 
   bool IsDecoded() const
@@ -444,17 +450,18 @@ private:
 public:
   void InitMemoryReporter() { RegisterWeakMemoryReporter(this); }
 
   Mutex& GetMutex() { return mMutex; }
 
   InsertOutcome Insert(ISurfaceProvider* aProvider,
                        const Cost        aCost,
                        const ImageKey    aImageKey,
-                       const SurfaceKey& aSurfaceKey)
+                       const SurfaceKey& aSurfaceKey,
+                       bool              aSetAvailable)
   {
     // If this is a duplicate surface, refuse to replace the original.
     // XXX(seth): Calling Lookup() and then RemoveEntry() does the lookup
     // twice. We'll make this more efficient in bug 1185137.
     LookupResult result = Lookup(aImageKey, aSurfaceKey, /* aMarkUsed = */ false);
     if (MOZ_UNLIKELY(result)) {
       return InsertOutcome::FAILURE_ALREADY_PRESENT;
     }
@@ -485,16 +492,21 @@ public:
     // Locate the appropriate per-image cache. If there's not an existing cache
     // for this image, create it.
     RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
     if (!cache) {
       cache = new ImageSurfaceCache;
       mImageCaches.Put(aImageKey, cache);
     }
 
+    // If we were asked to mark the cache entry available, do so.
+    if (aSetAvailable) {
+      aProvider->Availability().SetAvailable();
+    }
+
     RefPtr<CachedSurface> surface =
       new CachedSurface(aProvider, aCost, aImageKey, aSurfaceKey);
 
     // We require that locking succeed if the image is locked and we're not
     // inserting a placeholder; the caller may need to know this to handle
     // errors correctly.
     if (cache->IsLocked() && !surface->IsPlaceholder()) {
       surface->SetLocked(true);
@@ -662,16 +674,35 @@ public:
     return aCost <= mMaxCost;
   }
 
   size_t MaximumCapacity() const
   {
     return size_t(mMaxCost);
   }
 
+  void SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider,
+                        const ImageKey             aImageKey,
+                        const SurfaceKey&          aSurfaceKey)
+  {
+    if (!aProvider->Availability().IsPlaceholder()) {
+      MOZ_ASSERT_UNREACHABLE("Calling SurfaceAvailable on non-placeholder");
+      return;
+    }
+
+    // Reinsert the provider, requesting that Insert() mark it available. This
+    // may or may not succeed, depending on whether some other decoder has
+    // beaten us to the punch and inserted a non-placeholder version of this
+    // surface first, but it's fine either way.
+    // XXX(seth): This could be implemented more efficiently; we should be able
+    // to just update our data structures without reinserting.
+    Cost cost = aProvider->LogicalSizeInBytes();
+    Insert(aProvider, cost, aImageKey, aSurfaceKey, /* aSetAvailable = */ true);
+  }
+
   void LockImage(const ImageKey aImageKey)
   {
     RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
     if (!cache) {
       cache = new ImageSurfaceCache;
       mImageCaches.Put(aImageKey, cache);
     }
 
@@ -1034,29 +1065,31 @@ SurfaceCache::Insert(NotNull<ISurfacePro
                      const SurfaceKey&          aSurfaceKey)
 {
   if (!sInstance) {
     return InsertOutcome::FAILURE;
   }
 
   MutexAutoLock lock(sInstance->GetMutex());
   Cost cost = aProvider->LogicalSizeInBytes();
-  return sInstance->Insert(aProvider.get(), cost, aImageKey, aSurfaceKey);
+  return sInstance->Insert(aProvider.get(), cost, aImageKey, aSurfaceKey,
+                           /* aSetAvailable = */ false);
 }
 
 /* static */ InsertOutcome
 SurfaceCache::InsertPlaceholder(const ImageKey    aImageKey,
                                 const SurfaceKey& aSurfaceKey)
 {
   if (!sInstance) {
     return InsertOutcome::FAILURE;
   }
 
   MutexAutoLock lock(sInstance->GetMutex());
-  return sInstance->Insert(nullptr, sPlaceholderCost, aImageKey, aSurfaceKey);
+  return sInstance->Insert(nullptr, sPlaceholderCost, aImageKey, aSurfaceKey,
+                           /* aSetAvailable = */ false);
 }
 
 /* static */ bool
 SurfaceCache::CanHold(const IntSize& aSize, uint32_t aBytesPerPixel /* = 4 */)
 {
   if (!sInstance) {
     return false;
   }
@@ -1071,16 +1104,29 @@ SurfaceCache::CanHold(size_t aSize)
   if (!sInstance) {
     return false;
   }
 
   return sInstance->CanHold(aSize);
 }
 
 /* static */ void
+SurfaceCache::SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider,
+                               const ImageKey             aImageKey,
+                               const SurfaceKey&          aSurfaceKey)
+{
+  if (!sInstance) {
+    return;
+  }
+
+  MutexAutoLock lock(sInstance->GetMutex());
+  sInstance->SurfaceAvailable(aProvider, aImageKey, aSurfaceKey);
+}
+
+/* static */ void
 SurfaceCache::LockImage(Image* aImageKey)
 {
   if (sInstance) {
     MutexAutoLock lock(sInstance->GetMutex());
     return sInstance->LockImage(aImageKey);
   }
 }
 
--- a/image/SurfaceCache.h
+++ b/image/SurfaceCache.h
@@ -24,16 +24,17 @@
 #include "SVGImageContext.h"         // for SVGImageContext
 
 namespace mozilla {
 namespace image {
 
 class Image;
 class ISurfaceProvider;
 class LookupResult;
+class SurfaceCacheImpl;
 struct SurfaceMemoryCounter;
 
 /*
  * ImageKey contains the information we need to look up all SurfaceCache entries
  * for a particular image.
  */
 typedef Image* ImageKey;
 
@@ -113,16 +114,46 @@ VectorSurfaceKey(const gfx::IntSize& aSi
                  float aAnimationTime)
 {
   // We don't care about aFlags for VectorImage because none of the flags we
   // have right now influence VectorImage's rendering. If we add a new flag that
   // *does* affect how a VectorImage renders, we'll have to change this.
   return SurfaceKey(aSize, aSVGContext, aAnimationTime, DefaultSurfaceFlags());
 }
 
+
+/**
+ * AvailabilityState is used to track whether an ISurfaceProvider has a surface
+ * available or is just a placeholder.
+ *
+ * To ensure that availability changes are atomic (and especially that internal
+ * SurfaceCache code doesn't have to deal with asynchronous availability
+ * changes), an ISurfaceProvider which starts as a placeholder can only reveal
+ * the fact that it now has a surface available via a call to
+ * SurfaceCache::SurfaceAvailable().
+ */
+class AvailabilityState
+{
+public:
+  static AvailabilityState StartAvailable() { return AvailabilityState(true); }
+  static AvailabilityState StartAsPlaceholder() { return AvailabilityState(false); }
+
+  bool IsAvailable() const { return mIsAvailable; }
+  bool IsPlaceholder() const { return !mIsAvailable; }
+
+private:
+  friend class SurfaceCacheImpl;
+
+  explicit AvailabilityState(bool aIsAvailable) : mIsAvailable(aIsAvailable) { }
+
+  void SetAvailable() { mIsAvailable = true; }
+
+  bool mIsAvailable;
+};
+
 enum class InsertOutcome : uint8_t {
   SUCCESS,                 // Success (but see Insert documentation).
   FAILURE,                 // Couldn't insert (e.g., for capacity reasons).
   FAILURE_ALREADY_PRESENT  // A surface with the same key is already present.
 };
 
 /**
  * SurfaceCache is an ImageLib-global service that allows caching of decoded
@@ -279,16 +310,47 @@ struct SurfaceCache
    *         FAILURE if the placeholder could not be inserted for some reason.
    *         FAILURE_ALREADY_PRESENT if an entry with the same ImageKey and
    *           SurfaceKey already exists in the cache.
    */
   static InsertOutcome InsertPlaceholder(const ImageKey    aImageKey,
                                          const SurfaceKey& aSurfaceKey);
 
   /**
+   * Mark the cache entry @aProvider as having an available surface. This turns
+   * a placeholder cache entry into a normal cache entry. The cache entry
+   * becomes locked if the associated image is locked; otherwise, it starts in
+   * the unlocked state.
+   *
+   * If the cache entry containing @aProvider has already been evicted from the
+   * surface cache, this function has no effect.
+   *
+   * It's illegal to call this function if @aProvider is not a placeholder; by
+   * definition, non-placeholder ISurfaceProviders should have a surface
+   * available already.
+   *
+   * XXX(seth): We're currently in a transitional state where two notions of
+   * placeholder exist: the old one (placeholders are an "empty" cache entry
+   * inserted via InsertPlaceholder(), which then gets replaced by inserting a
+   * real cache entry with the same keys via Insert()) and the new one (where
+   * the same cache entry, inserted via Insert(), starts in a placeholder state
+   * and then transitions to being a normal cache entry via this function). The
+   * old mechanism will be removed in bug 1292392.
+   *
+   * @param aProvider       The cache entry that now has a surface available.
+   * @param aImageKey       Key data identifying which image the cache entry
+   *                        belongs to.
+   * @param aSurfaceKey     Key data which uniquely identifies the requested
+   *                        cache entry.
+   */
+  static void SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider,
+                               const ImageKey    aImageKey,
+                               const SurfaceKey& aSurfaceKey);
+
+  /**
    * Checks if a surface of a given size could possibly be stored in the cache.
    * If CanHold() returns false, Insert() will always fail to insert the
    * surface, but the inverse is not true: Insert() may take more information
    * into account than just image size when deciding whether to cache the
    * surface, so Insert() may still fail even if CanHold() returns true.
    *
    * Use CanHold() to avoid the need to create a temporary surface when we know
    * for sure the cache can't hold it.
--- a/ipc/ipdl/ipdl/lower.py
+++ b/ipc/ipdl/ipdl/lower.py
@@ -2495,23 +2495,31 @@ def _generateCxxUnion(ud):
         getconstvalue = MethodDefn(MethodDecl(
             getConstValueVar.name, ret=c.constRefType(),
             const=1, force_inline=1))
         getconstvalue.addstmts([
             StmtExpr(callAssertSanity(expectTypeVar=c.enumvar())),
             StmtReturn(c.getConstValue())
         ])
 
+        readvalue = MethodDefn(MethodDecl(
+            'get', ret=Type.VOID, const=1,
+            params=[Decl(c.ptrToType(), 'aOutValue')]))
+        readvalue.addstmts([
+            StmtExpr(ExprAssn(ExprDeref(ExprVar('aOutValue')),
+                              ExprCall(getConstValueVar)))
+        ])
+
         optype = MethodDefn(MethodDecl('', typeop=c.refType(), force_inline=1))
         optype.addstmt(StmtReturn(ExprCall(getValueVar)))
         opconsttype = MethodDefn(MethodDecl(
             '', const=1, typeop=c.constRefType(), force_inline=1))
         opconsttype.addstmt(StmtReturn(ExprCall(getConstValueVar)))
 
-        cls.addstmts([ getvalue, getconstvalue,
+        cls.addstmts([ getvalue, getconstvalue, readvalue,
                        optype, opconsttype,
                        Whitespace.NL ])
 
     # private vars
     cls.addstmts([
         Label.PRIVATE,
         StmtDecl(Decl(valuetype, mvaluevar.name)),
         StmtDecl(Decl(typetype, mtypevar.name))
--- a/js/src/asmjs/WasmBaselineCompile.cpp
+++ b/js/src/asmjs/WasmBaselineCompile.cpp
@@ -2126,21 +2126,33 @@ class BaseCompiler
             masm.loadPtr(BaseIndex(scratch, ptrReg, ScalePointer, 0), ptrReg);
         }
 
         callDynamic(ptrReg, call);
     }
 
     // Precondition: sync()
 
-    void ffiCall(unsigned globalDataOffset, const FunctionCall& call)
+    void callImport(unsigned globalDataOffset, const FunctionCall& call)
     {
-        Register ptrReg = WasmTableCallPtrReg;
-        masm.loadWasmGlobalPtr(globalDataOffset, ptrReg);
+        // Load the callee, before the caller's registers are clobbered.
+        Register ptrReg = ABINonArgReg0;
+        masm.loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, code), ptrReg);
+
+        // There is no need to preserve WasmTlsReg since it has already been
+        // spilt to a local slot.
+
+        // Switch to the caller's TLS and pinned registers and make the call.
+        masm.loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, tls), WasmTlsReg);
+        masm.loadWasmPinnedRegsFromTls();
         callDynamic(