Merge inbound to m-c. a=merge
authorRyan VanderMeulen <ryanvm@gmail.com>
Mon, 17 Aug 2015 09:06:59 -0400
changeset 258005 a6eeb28458fd2652e12e57334f046b7776d75bb4
parent 257921 0a7e118ca369357717800c7e9b5a545079a01674 (current diff)
parent 258004 3bbd0d9291280d02bac1ed2e73298bc67b70cbda (diff)
child 258006 b3be345ec4d35949c9780af39840f466f705300b
child 258009 9b980b4f4358c1e15fa08082cdc67c5fea940ae3
child 258038 64c69057a183e8b6314576c8e5bddd7ffe2ee00c
child 258058 1f4f80d459cea87f7f70c188f349449967f42ee2
push id29238
push userryanvm@gmail.com
push dateMon, 17 Aug 2015 13:06:57 +0000
treeherdermozilla-central@a6eeb28458fd [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone43.0a1
first release with
nightly linux32
a6eeb28458fd / 43.0a1 / 20150817061040 / files
nightly linux64
a6eeb28458fd / 43.0a1 / 20150817061040 / files
nightly mac
a6eeb28458fd / 43.0a1 / 20150817061040 / files
nightly win32
a6eeb28458fd / 43.0a1 / 20150817061040 / files
nightly win64
a6eeb28458fd / 43.0a1 / 20150817061040 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge inbound to m-c. a=merge
--- a/browser/base/content/browser-sets.inc
+++ b/browser/base/content/browser-sets.inc
@@ -312,17 +312,17 @@
 
     <key id="key_scratchpad" keycode="&scratchpad.keycode;" modifiers="shift"
          keytext="&scratchpad.keytext;" command="Tools:Scratchpad"/>
     <key id="openFileKb" key="&openFileCmd.commandkey;" command="Browser:OpenFile"  modifiers="accel"/>
     <key id="key_savePage" key="&savePageCmd.commandkey;" command="Browser:SavePage" modifiers="accel"/>
     <key id="printKb" key="&printCmd.commandkey;" command="cmd_print"  modifiers="accel"/>
     <key id="key_close" key="&closeCmd.key;" command="cmd_close" modifiers="accel"/>
     <key id="key_closeWindow" key="&closeCmd.key;" command="cmd_closeWindow" modifiers="accel,shift"/>
-    <key id="key_toggleMute" key="&toggleMuteCmd.key;" command="cmd_toggleMute" modifiers="alt,shift"/>
+    <key id="key_toggleMute" key="&toggleMuteCmd.key;" command="cmd_toggleMute" modifiers="control"/>
     <key id="key_undo"
          key="&undoCmd.key;"
          modifiers="accel"/>
 #ifdef XP_UNIX
     <key id="key_redo" key="&undoCmd.key;" modifiers="accel,shift"/>
 #else
     <key id="key_redo" key="&redoCmd.key;" modifiers="accel"/>
 #endif
--- a/browser/base/content/test/general/browser_audioTabIcon.js
+++ b/browser/base/content/test/general/browser_audioTabIcon.js
@@ -39,24 +39,28 @@ function leave_icon(icon) {
 function* test_tooltip(icon, expectedTooltip) {
   let tooltip = document.getElementById("tabbrowser-tab-tooltip");
 
   yield hover_icon(icon, tooltip);
   is(tooltip.getAttribute("label").indexOf(expectedTooltip), 0, "Correct tooltip expected");
   leave_icon(icon);
 }
 
-function* test_mute_tab(tab, icon, expectMuted) {
-  let mutedPromise = BrowserTestUtils.waitForEvent(tab, "TabAttrModified", false, (event) => {
+function get_wait_for_mute_promise(tab, expectMuted) {
+  return BrowserTestUtils.waitForEvent(tab, "TabAttrModified", false, event => {
     if (event.detail.changed.indexOf("muted") >= 0) {
       is(tab.hasAttribute("muted"), expectMuted, "The tab should " + (expectMuted ? "" : "not ") + "be muted");
       return true;
     }
     return false;
   });
+}
+
+function* test_mute_tab(tab, icon, expectMuted) {
+  let mutedPromise = test_mute_keybinding(tab, expectMuted);
 
   let activeTab = gBrowser.selectedTab;
 
   let tooltip = document.getElementById("tabbrowser-tab-tooltip");
 
   yield hover_icon(icon, tooltip);
   EventUtils.synthesizeMouseAtCenter(icon, {button: 0});
   leave_icon(icon);
@@ -255,16 +259,76 @@ function* test_cross_process_load() {
   }
 
   yield BrowserTestUtils.withNewTab({
     gBrowser,
     url: PAGE
   }, test_on_browser);
 }
 
+function* test_mute_keybinding() {
+  function* test_muting_using_keyboard(tab) {
+    let mutedPromise = get_wait_for_mute_promise(tab, true);
+    EventUtils.synthesizeKey("m", {ctrlKey: true});
+    yield mutedPromise;
+    mutedPromise = get_wait_for_mute_promise(tab, false);
+    EventUtils.synthesizeKey("m", {ctrlKey: true});
+    yield mutedPromise;
+  }
+  function* test_on_browser(browser) {
+    let tab = gBrowser.getTabForBrowser(browser);
+
+    // Make sure it's possible to mute before the tab is playing.
+    yield test_muting_using_keyboard(tab);
+
+    // Start playback.
+    yield ContentTask.spawn(browser, {}, function* () {
+      let audio = content.document.querySelector("audio");
+      audio.play();
+    });
+
+    // Wait for playback to start.
+    yield wait_for_tab_playing_event(tab, true);
+
+    // Make sure it's possible to mute after the tab is playing.
+    yield test_muting_using_keyboard(tab);
+
+    // Start playback.
+    yield ContentTask.spawn(browser, {}, function* () {
+      let audio = content.document.querySelector("audio");
+      audio.pause();
+    });
+
+    // Make sure things work if the tab is pinned.
+    gBrowser.pinTab(tab);
+
+    // Make sure it's possible to mute before the tab is playing.
+    yield test_muting_using_keyboard(tab);
+
+    // Start playback.
+    yield ContentTask.spawn(browser, {}, function* () {
+      let audio = content.document.querySelector("audio");
+      audio.play();
+    });
+
+    // Wait for playback to start.
+    yield wait_for_tab_playing_event(tab, true);
+
+    // Make sure it's possible to mute after the tab is playing.
+    yield test_muting_using_keyboard(tab);
+
+    gBrowser.unpinTab(tab);
+  }
+
+  yield BrowserTestUtils.withNewTab({
+    gBrowser,
+    url: PAGE
+  }, test_on_browser);
+}
+
 function* test_on_browser(browser) {
   let tab = gBrowser.getTabForBrowser(browser);
 
   // Test the icon in a normal tab.
   yield test_playing_icon_on_tab(tab, browser, false);
 
   gBrowser.pinTab(tab);
 
@@ -299,8 +363,10 @@ add_task(function* test_page() {
     gBrowser,
     url: PAGE
   }, test_on_browser);
 });
 
 add_task(test_click_on_pinned_tab_after_mute);
 
 add_task(test_cross_process_load);
+
+add_task(test_mute_keybinding);
--- a/build/clang-plugin/clang-plugin.cpp
+++ b/build/clang-plugin/clang-plugin.cpp
@@ -98,27 +98,33 @@ private:
     virtual void run(const MatchFinder::MatchResult &Result);
   };
 
   class ExplicitImplicitChecker : public MatchFinder::MatchCallback {
   public:
     virtual void run(const MatchFinder::MatchResult &Result);
   };
 
+  class NoAutoTypeChecker : public MatchFinder::MatchCallback {
+  public:
+    virtual void run(const MatchFinder::MatchResult &Result);
+  };
+
   ScopeChecker scopeChecker;
   ArithmeticArgChecker arithmeticArgChecker;
   TrivialCtorDtorChecker trivialCtorDtorChecker;
   NaNExprChecker nanExprChecker;
   NoAddRefReleaseOnReturnChecker noAddRefReleaseOnReturnChecker;
   RefCountedInsideLambdaChecker refCountedInsideLambdaChecker;
   ExplicitOperatorBoolChecker explicitOperatorBoolChecker;
   NoDuplicateRefCntMemberChecker noDuplicateRefCntMemberChecker;
   NeedsNoVTableTypeChecker needsNoVTableTypeChecker;
   NonMemMovableChecker nonMemMovableChecker;
   ExplicitImplicitChecker explicitImplicitChecker;
+  NoAutoTypeChecker noAutoTypeChecker;
   MatchFinder astMatcher;
 };
 
 namespace {
 
 std::string getDeclarationNamespace(const Decl *decl) {
   const DeclContext *DC = decl->getDeclContext()->getEnclosingNamespaceContext();
   const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
@@ -246,16 +252,25 @@ public:
     : Spelling(Spelling), Pretty(Pretty) {};
 
   // Checks if this custom annotation "effectively affects" the given type.
   bool hasEffectiveAnnotation(QualType T) {
     return directAnnotationReason(T).valid();
   }
   void dumpAnnotationReason(DiagnosticsEngine &Diag, QualType T, SourceLocation Loc);
 
+  void reportErrorIfAbsent(DiagnosticsEngine &Diag, QualType T, SourceLocation Loc,
+                           unsigned ErrorID, unsigned NoteID) {
+    if (hasEffectiveAnnotation(T)) {
+      Diag.Report(Loc, ErrorID) << T;
+      Diag.Report(Loc, NoteID);
+      dumpAnnotationReason(Diag, T, Loc);
+    }
+  }
+
 private:
   bool hasLiteralAnnotation(QualType T) const;
   AnnotationReason directAnnotationReason(QualType T);
 };
 
 static CustomTypeAnnotation StackClass =
   CustomTypeAnnotation("moz_stack_class", "stack");
 static CustomTypeAnnotation GlobalClass =
@@ -772,16 +787,25 @@ AST_MATCHER(CXXConstructorDecl, isIntere
 AST_MATCHER(CXXConstructorDecl, isMarkedImplicit) {
   return MozChecker::hasCustomAnnotation(&Node, "moz_implicit");
 }
 
 AST_MATCHER(CXXRecordDecl, isConcreteClass) {
   return !Node.isAbstract();
 }
 
+AST_MATCHER(QualType, autoNonAutoableType) {
+  if (const AutoType *T = Node->getContainedAutoType()) {
+    if (const CXXRecordDecl *Rec = T->getAsCXXRecordDecl()) {
+      return MozChecker::hasCustomAnnotation(Rec, "moz_non_autoable");
+    }
+  }
+  return false;
+}
+
 }
 }
 
 namespace {
 
 void CustomTypeAnnotation::dumpAnnotationReason(DiagnosticsEngine &Diag, QualType T, SourceLocation Loc) {
   unsigned InheritsID = Diag.getDiagnosticIDs()->getCustomDiagID(
     DiagnosticIDs::Note, "%1 is a %0 type because it inherits from a %0 type %2");
@@ -1018,16 +1042,19 @@ DiagnosticsMatcher::DiagnosticsMatcher()
       ).bind("specialization"),
       &nonMemMovableChecker);
 
   astMatcher.addMatcher(
       constructorDecl(isInterestingImplicitCtor(),
                       ofClass(allOf(isConcreteClass(), decl().bind("class"))),
                       unless(isMarkedImplicit())).bind("ctor"),
       &explicitImplicitChecker);
+
+  astMatcher.addMatcher(varDecl(hasType(autoNonAutoableType())
+                          ).bind("node"), &noAutoTypeChecker);
 }
 
 // These enum variants determine whether an allocation has occured in the code.
 enum AllocationVariety {
   AV_None,
   AV_Global,
   AV_Automatic,
   AV_Temporary,
@@ -1095,70 +1122,34 @@ void DiagnosticsMatcher::ScopeChecker::r
       DiagnosticIDs::Note, "value incorrectly allocated in a temporary");
 
   // Report errors depending on the annotations on the input types.
   switch (Variety) {
   case AV_None:
     return;
 
   case AV_Global:
-    if (StackClass.hasEffectiveAnnotation(T)) {
-      Diag.Report(Loc, StackID) << T;
-      Diag.Report(Loc, GlobalNoteID);
-      StackClass.dumpAnnotationReason(Diag, T, Loc);
-    }
-    if (HeapClass.hasEffectiveAnnotation(T)) {
-      Diag.Report(Loc, HeapID) << T;
-      Diag.Report(Loc, GlobalNoteID);
-      HeapClass.dumpAnnotationReason(Diag, T, Loc);
-    }
+    StackClass.reportErrorIfAbsent(Diag, T, Loc, StackID, GlobalNoteID);
+    HeapClass.reportErrorIfAbsent(Diag, T, Loc, HeapID, GlobalNoteID);
     break;
 
   case AV_Automatic:
-    if (GlobalClass.hasEffectiveAnnotation(T)) {
-      Diag.Report(Loc, GlobalID) << T;
-      Diag.Report(Loc, StackNoteID);
-      GlobalClass.dumpAnnotationReason(Diag, T, Loc);
-    }
-    if (HeapClass.hasEffectiveAnnotation(T)) {
-      Diag.Report(Loc, HeapID) << T;
-      Diag.Report(Loc, StackNoteID);
-      HeapClass.dumpAnnotationReason(Diag, T, Loc);
-    }
+    GlobalClass.reportErrorIfAbsent(Diag, T, Loc, GlobalID, StackNoteID);
+    HeapClass.reportErrorIfAbsent(Diag, T, Loc, HeapID, StackNoteID);
     break;
 
   case AV_Temporary:
-    if (GlobalClass.hasEffectiveAnnotation(T)) {
-      Diag.Report(Loc, GlobalID) << T;
-      Diag.Report(Loc, TemporaryNoteID);
-      GlobalClass.dumpAnnotationReason(Diag, T, Loc);
-    }
-    if (HeapClass.hasEffectiveAnnotation(T)) {
-      Diag.Report(Loc, HeapID) << T;
-      Diag.Report(Loc, TemporaryNoteID);
-      HeapClass.dumpAnnotationReason(Diag, T, Loc);
-    }
+    GlobalClass.reportErrorIfAbsent(Diag, T, Loc, GlobalID, TemporaryNoteID);
+    HeapClass.reportErrorIfAbsent(Diag, T, Loc, HeapID, TemporaryNoteID);
     break;
 
   case AV_Heap:
-    if (GlobalClass.hasEffectiveAnnotation(T)) {
-      Diag.Report(Loc, GlobalID) << T;
-      Diag.Report(Loc, HeapNoteID);
-      GlobalClass.dumpAnnotationReason(Diag, T, Loc);
-    }
-    if (StackClass.hasEffectiveAnnotation(T)) {
-      Diag.Report(Loc, StackID) << T;
-      Diag.Report(Loc, HeapNoteID);
-      StackClass.dumpAnnotationReason(Diag, T, Loc);
-    }
-    if (NonHeapClass.hasEffectiveAnnotation(T)) {
-      Diag.Report(Loc, NonHeapID) << T;
-      Diag.Report(Loc, HeapNoteID);
-      NonHeapClass.dumpAnnotationReason(Diag, T, Loc);
-    }
+    GlobalClass.reportErrorIfAbsent(Diag, T, Loc, GlobalID, HeapNoteID);
+    StackClass.reportErrorIfAbsent(Diag, T, Loc, StackID, HeapNoteID);
+    NonHeapClass.reportErrorIfAbsent(Diag, T, Loc, NonHeapID, HeapNoteID);
     break;
   }
 }
 
 void DiagnosticsMatcher::ArithmeticArgChecker::run(
     const MatchFinder::MatchResult &Result) {
   DiagnosticsEngine &Diag = Result.Context->getDiagnostics();
   unsigned errorID = Diag.getDiagnosticIDs()->getCustomDiagID(
@@ -1370,16 +1361,30 @@ void DiagnosticsMatcher::ExplicitImplici
 
   const CXXConstructorDecl *Ctor = Result.Nodes.getNodeAs<CXXConstructorDecl>("ctor");
   const CXXRecordDecl *Decl = Result.Nodes.getNodeAs<CXXRecordDecl>("class");
 
   Diag.Report(Ctor->getLocation(), ErrorID) << Decl->getDeclName();
   Diag.Report(Ctor->getLocation(), NoteID);
 }
 
+void DiagnosticsMatcher::NoAutoTypeChecker::run(
+    const MatchFinder::MatchResult &Result) {
+  DiagnosticsEngine &Diag = Result.Context->getDiagnostics();
+  unsigned ErrorID = Diag.getDiagnosticIDs()->getCustomDiagID(
+      DiagnosticIDs::Error, "Cannot use auto to declare a variable of type %0");
+  unsigned NoteID = Diag.getDiagnosticIDs()->getCustomDiagID(
+      DiagnosticIDs::Note, "Please write out this type explicitly");
+
+  const VarDecl *D = Result.Nodes.getNodeAs<VarDecl>("node");
+
+  Diag.Report(D->getLocation(), ErrorID) << D->getType();
+  Diag.Report(D->getLocation(), NoteID);
+}
+
 class MozCheckAction : public PluginASTAction {
 public:
   ASTConsumerPtr CreateASTConsumer(CompilerInstance &CI, StringRef fileName) override {
 #if CLANG_VERSION_FULL >= 306
     std::unique_ptr<MozChecker> checker(llvm::make_unique<MozChecker>(CI));
     ASTConsumerPtr other(checker->getOtherConsumer());
 
     std::vector<ASTConsumerPtr> consumers;
new file mode 100644
--- /dev/null
+++ b/build/clang-plugin/tests/TestNoAutoType.cpp
@@ -0,0 +1,41 @@
+#define MOZ_NON_AUTOABLE __attribute__((annotate("moz_non_autoable")))
+
+template<class T>
+struct MOZ_NON_AUTOABLE ExplicitTypeTemplate {};
+struct MOZ_NON_AUTOABLE ExplicitType {};
+struct NonExplicitType {};
+
+void f() {
+  {
+    ExplicitType a;
+    auto b = a; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitType'}} expected-note {{Please write out this type explicitly}}
+    auto &br = a; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitType &'}} expected-note {{Please write out this type explicitly}}
+    const auto &brc = a; // expected-error {{Cannot use auto to declare a variable of type 'const ExplicitType &'}} expected-note {{Please write out this type explicitly}}
+    auto *bp = &a; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitType *'}} expected-note {{Please write out this type explicitly}}
+    const auto *bpc = &a; // expected-error {{Cannot use auto to declare a variable of type 'const ExplicitType *'}} expected-note {{Please write out this type explicitly}}
+  }
+
+  {
+    ExplicitTypeTemplate<int> a;
+    auto b = a; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitTypeTemplate<int>'}} expected-note {{Please write out this type explicitly}}
+    auto &br = a; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitTypeTemplate<int> &'}} expected-note {{Please write out this type explicitly}}
+    const auto &brc = a; // expected-error {{Cannot use auto to declare a variable of type 'const ExplicitTypeTemplate<int> &'}} expected-note {{Please write out this type explicitly}}
+    auto *bp = &a; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitTypeTemplate<int> *'}} expected-note {{Please write out this type explicitly}}
+    const auto *bpc = &a; // expected-error {{Cannot use auto to declare a variable of type 'const ExplicitTypeTemplate<int> *'}} expected-note {{Please write out this type explicitly}}
+  }
+
+  {
+    NonExplicitType c;
+    auto d = c;
+    auto &dr = c;
+    const auto &drc = c;
+    auto *dp = &c;
+    const auto *dpc = &c;
+  }
+}
+
+ExplicitType A;
+auto B = A; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitType'}} expected-note {{Please write out this type explicitly}}
+
+NonExplicitType C;
+auto D = C;
--- a/build/clang-plugin/tests/moz.build
+++ b/build/clang-plugin/tests/moz.build
@@ -14,16 +14,17 @@ SOURCES += [
     'TestMultipleAnnotations.cpp',
     'TestMustOverride.cpp',
     'TestMustUse.cpp',
     'TestNANTestingExpr.cpp',
     'TestNANTestingExprC.c',
     'TestNeedsNoVTableType.cpp',
     'TestNoAddRefReleaseOnReturn.cpp',
     'TestNoArithmeticExprInArgument.cpp',
+    'TestNoAutoType.cpp',
     'TestNoDuplicateRefCntMember.cpp',
     'TestNonHeapClass.cpp',
     'TestNonMemMovable.cpp',
     'TestNoRefcountedInsideLambdas.cpp',
     'TestStackClass.cpp',
     'TestTrivialCtorDtor.cpp',
 ]
 
--- a/build/upload.py
+++ b/build/upload.py
@@ -186,16 +186,18 @@ def UploadFiles(user, host, path, files,
                 print "Uploading " + file
             DoSCPFile(file, remote_path, user, host, port=port, ssh_key=ssh_key)
             remote_files.append(remote_path + '/' + os.path.basename(file))
         if post_upload_command is not None:
             if verbose:
                 print "Running post-upload command: " + post_upload_command
             file_list = '"' + '" "'.join(remote_files) + '"'
             output = DoSSHCommand('%s "%s" %s' % (post_upload_command, path, file_list), user, host, port=port, ssh_key=ssh_key)
+            # We print since mozharness may parse URLs from the output stream.
+            print output
             if properties_file:
                 with open(properties_file, 'w') as outfile:
                     properties = GetUrlProperties(output, package)
                     properties['packageFilename'] = package
                     properties['uploadFiles'] = [os.path.abspath(f) for f in files]
                     json.dump(properties, outfile, indent=4)
     finally:
         if upload_to_temp_dir:
--- a/dom/animation/Animation.cpp
+++ b/dom/animation/Animation.cpp
@@ -46,24 +46,28 @@ Animation::WrapObject(JSContext* aCx, JS
 //
 // Animation interface:
 //
 // ---------------------------------------------------------------------------
 
 void
 Animation::SetEffect(KeyframeEffectReadOnly* aEffect)
 {
+  // FIXME: We should perform an early return if aEffect == mEffect but
+  // current nsAnimationManager::CheckAnimationRule is relying on this
+  // method updating timing even in that case.
   if (mEffect) {
     mEffect->SetParentTime(Nullable<TimeDuration>());
   }
   mEffect = aEffect;
   if (mEffect) {
     mEffect->SetParentTime(GetCurrentTime());
   }
-  UpdateRelevance();
+
+  UpdateTiming(SeekFlag::NoSeek, SyncNotifyFlag::Async);
 }
 
 void
 Animation::SetTimeline(AnimationTimeline* aTimeline)
 {
   if (mTimeline == aTimeline) {
     return;
   }
--- a/dom/base/nsDOMWindowUtils.cpp
+++ b/dom/base/nsDOMWindowUtils.cpp
@@ -2256,32 +2256,38 @@ nsDOMWindowUtils::GetLayerManagerRemote(
   if (!mgr)
     return NS_ERROR_FAILURE;
 
   *retval = !!mgr->AsShadowForwarder();
   return NS_OK;
 }
 
 NS_IMETHODIMP
-nsDOMWindowUtils::GetSupportsHardwareH264Decoding(bool* retval)
+nsDOMWindowUtils::GetSupportsHardwareH264Decoding(nsAString& aRetval)
 {
   MOZ_RELEASE_ASSERT(nsContentUtils::IsCallerChrome());
 
 #ifdef MOZ_FMP4
   nsCOMPtr<nsIWidget> widget = GetWidget();
   if (!widget)
     return NS_ERROR_FAILURE;
 
   LayerManager *mgr = widget->GetLayerManager();
   if (!mgr)
     return NS_ERROR_FAILURE;
 
-  *retval = MP4Decoder::IsVideoAccelerated(mgr->GetCompositorBackendType());
+  nsCString failureReason;
+  if (MP4Decoder::IsVideoAccelerated(mgr->GetCompositorBackendType(), failureReason)) {
+    aRetval.AssignLiteral("Yes");
+  } else {
+    aRetval.AssignLiteral("No; ");
+    AppendUTF8toUTF16(failureReason, aRetval);
+  }
 #else
-  *retval = false;
+  aRetval.AssignLiteral("No; Compiled without MP4 support.");
 #endif
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsDOMWindowUtils::StartFrameTimeRecording(uint32_t *startIndex)
 {
   MOZ_RELEASE_ASSERT(nsContentUtils::IsCallerChrome());
--- a/dom/base/nsDataDocumentContentPolicy.cpp
+++ b/dom/base/nsDataDocumentContentPolicy.cpp
@@ -78,22 +78,22 @@ nsDataDocumentContentPolicy::ShouldLoad(
 
   if (doc->IsBeingUsedAsImage()) {
     // We only allow SVG images to load content from URIs that are local and
     // also satisfy one of the following conditions:
     //  - URI inherits security context, e.g. data URIs
     //   OR
     //  - URI loadable by subsumers, e.g. blob URIs
     // Any URI that doesn't meet these requirements will be rejected below.
-    if (!HasFlags(aContentLocation,
-                  nsIProtocolHandler::URI_IS_LOCAL_RESOURCE) ||
-        (!HasFlags(aContentLocation,
-                   nsIProtocolHandler::URI_INHERITS_SECURITY_CONTEXT) &&
-         !HasFlags(aContentLocation,
-                   nsIProtocolHandler::URI_LOADABLE_BY_SUBSUMERS))) {
+    if (!(HasFlags(aContentLocation,
+                   nsIProtocolHandler::URI_IS_LOCAL_RESOURCE) &&
+          (HasFlags(aContentLocation,
+                    nsIProtocolHandler::URI_INHERITS_SECURITY_CONTEXT) ||
+           HasFlags(aContentLocation,
+                    nsIProtocolHandler::URI_LOADABLE_BY_SUBSUMERS)))) {
       *aDecision = nsIContentPolicy::REJECT_TYPE;
 
       // Report error, if we can.
       if (node) {
         nsIPrincipal* requestingPrincipal = node->NodePrincipal();
         nsRefPtr<nsIURI> principalURI;
         nsresult rv =
           requestingPrincipal->GetURI(getter_AddRefs(principalURI));
--- a/dom/html/HTMLMediaElement.cpp
+++ b/dom/html/HTMLMediaElement.cpp
@@ -2866,16 +2866,20 @@ public:
   void Forget() { mElement = nullptr; }
 
   // Main thread
   void DoNotifyFinished()
   {
     if (mElement) {
       nsRefPtr<HTMLMediaElement> deathGrip = mElement;
       mElement->PlaybackEnded();
+      // Update NextFrameStatus() to move to NEXT_FRAME_UNAVAILABLE and
+      // HAVE_CURRENT_DATA.
+      mElement = nullptr;
+      NotifyWatchers();
     }
   }
 
   MediaDecoderOwner::NextFrameStatus NextFrameStatus()
   {
     if (!mElement || !mHaveCurrentData) {
       return MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
     }
--- a/dom/interfaces/base/nsIDOMWindowUtils.idl
+++ b/dom/interfaces/base/nsIDOMWindowUtils.idl
@@ -44,17 +44,17 @@ interface nsIDOMClientRect;
 interface nsIURI;
 interface nsIDOMEventTarget;
 interface nsIRunnable;
 interface nsITranslationNodeList;
 interface nsIJSRAIIHelper;
 interface nsIContentPermissionRequest;
 interface nsIObserver;
 
-[scriptable, uuid(6064615a-a782-4d08-86db-26ef3851208a)]
+[scriptable, uuid(47fa312b-2ad1-4b80-8a0a-c9822e2d1ec9)]
 interface nsIDOMWindowUtils : nsISupports {
 
   /**
    * Image animation mode of the window. When this attribute's value
    * is changed, the implementation should set all images in the window
    * to the given value. That is, when set to kDontAnimMode, all images
    * will stop animating. The attribute's value must be one of the
    * animationMode values from imgIContainer.
@@ -1337,17 +1337,17 @@ interface nsIDOMWindowUtils : nsISupport
    */
   readonly attribute boolean layerManagerRemote;
 
   /**
    * True if we can initialize a hardware-backed h264 decoder for a simple
    * test video, does not mean that all h264 video decoding will be done
    * in hardware.
    */
-  readonly attribute boolean supportsHardwareH264Decoding;
+  readonly attribute AString supportsHardwareH264Decoding;
 
   /**
    * Record (and return) frame-intervals for frames which were presented
    *   between calling StartFrameTimeRecording and StopFrameTimeRecording.
    *
    * - Uses a cyclic buffer and serves concurrent consumers, so if Stop is called too late
    *     (elements were overwritten since Start), result is considered invalid and hence empty.
    * - Buffer is capable of holding 10 seconds @ 60fps (or more if frames were less frequent).
--- a/dom/ipc/ContentChild.cpp
+++ b/dom/ipc/ContentChild.cpp
@@ -607,17 +607,16 @@ NS_INTERFACE_MAP_BEGIN(ContentChild)
 NS_INTERFACE_MAP_END
 
 bool
 ContentChild::Init(MessageLoop* aIOLoop,
                    base::ProcessId aParentPid,
                    IPC::Channel* aChannel)
 {
 #ifdef MOZ_WIDGET_GTK
-    // sigh
     gtk_init(nullptr, nullptr);
 #endif
 
 #ifdef MOZ_WIDGET_QT
     // sigh, seriously
     nsQAppInstance::AddRef();
 #endif
 
--- a/dom/ipc/ContentParent.cpp
+++ b/dom/ipc/ContentParent.cpp
@@ -5191,16 +5191,19 @@ ContentParent::RecvBeginDriverCrashGuard
   UniquePtr<gfx::DriverCrashGuard> guard;
   switch (gfx::CrashGuardType(aGuardType)) {
     case gfx::CrashGuardType::D3D11Layers:
       guard = MakeUnique<gfx::D3D11LayersCrashGuard>(this);
       break;
     case gfx::CrashGuardType::D3D9Video:
       guard = MakeUnique<gfx::D3D9VideoCrashGuard>(this);
       break;
+    case gfx::CrashGuardType::GLContext:
+      guard = MakeUnique<gfx::GLContextCrashGuard>(this);
+      break;
     default:
       MOZ_ASSERT_UNREACHABLE("unknown crash guard type");
       return false;
   }
 
   if (guard->Crashed()) {
     *aOutCrashed = true;
     return true;
--- a/dom/media/DecodedStream.cpp
+++ b/dom/media/DecodedStream.cpp
@@ -1,33 +1,37 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
+#include "AudioSegment.h"
 #include "DecodedStream.h"
+#include "MediaData.h"
+#include "MediaQueue.h"
 #include "MediaStreamGraph.h"
-#include "AudioSegment.h"
+#include "SharedBuffer.h"
 #include "VideoSegment.h"
-#include "MediaQueue.h"
-#include "MediaData.h"
-#include "SharedBuffer.h"
 #include "VideoUtils.h"
 
 namespace mozilla {
 
 class DecodedStreamGraphListener : public MediaStreamListener {
   typedef MediaStreamListener::MediaStreamGraphEvent MediaStreamGraphEvent;
 public:
-  explicit DecodedStreamGraphListener(MediaStream* aStream)
+  DecodedStreamGraphListener(MediaStream* aStream,
+                             MozPromiseHolder<GenericPromise>&& aPromise)
     : mMutex("DecodedStreamGraphListener::mMutex")
     , mStream(aStream)
     , mLastOutputTime(aStream->StreamTimeToMicroseconds(aStream->GetCurrentTime()))
-    , mStreamFinishedOnMainThread(false) {}
+    , mStreamFinishedOnMainThread(false)
+  {
+    mFinishPromise = Move(aPromise);
+  }
 
   void NotifyOutput(MediaStreamGraph* aGraph, GraphTime aCurrentTime) override
   {
     MutexAutoLock lock(mMutex);
     if (mStream) {
       mLastOutputTime = mStream->StreamTimeToMicroseconds(mStream->GraphTimeToStreamTime(aCurrentTime));
     }
   }
@@ -38,45 +42,49 @@ public:
       nsCOMPtr<nsIRunnable> event =
         NS_NewRunnableMethod(this, &DecodedStreamGraphListener::DoNotifyFinished);
       aGraph->DispatchToMainThreadAfterStreamStateUpdate(event.forget());
     }
   }
 
   void DoNotifyFinished()
   {
+    mFinishPromise.ResolveIfExists(true, __func__);
     MutexAutoLock lock(mMutex);
     mStreamFinishedOnMainThread = true;
   }
 
   int64_t GetLastOutputTime()
   {
     MutexAutoLock lock(mMutex);
     return mLastOutputTime;
   }
 
   void Forget()
   {
     MOZ_ASSERT(NS_IsMainThread());
+    mFinishPromise.ResolveIfExists(true, __func__);
     MutexAutoLock lock(mMutex);
     mStream = nullptr;
   }
 
   bool IsFinishedOnMainThread()
   {
     MutexAutoLock lock(mMutex);
     return mStreamFinishedOnMainThread;
   }
 
 private:
   Mutex mMutex;
   // Members below are protected by mMutex.
   nsRefPtr<MediaStream> mStream;
   int64_t mLastOutputTime; // microseconds
   bool mStreamFinishedOnMainThread;
+  // Main thread only.
+  MozPromiseHolder<GenericPromise> mFinishPromise;
 };
 
 static void
 UpdateStreamBlocking(MediaStream* aStream, bool aBlocking)
 {
   int32_t delta = aBlocking ? 1 : -1;
   if (NS_IsMainThread()) {
     aStream->ChangeExplicitBlockerCount(delta);
@@ -126,31 +134,36 @@ public:
 
   // The decoder is responsible for calling Destroy() on this stream.
   const nsRefPtr<SourceMediaStream> mStream;
   nsRefPtr<DecodedStreamGraphListener> mListener;
   bool mPlaying;
   // True if we need to send a compensation video frame to ensure the
   // StreamTime going forward.
   bool mEOSVideoCompensation;
+  // This promise will be resolved when the SourceMediaStream is finished.
+  nsRefPtr<GenericPromise> mFinishPromise;
 };
 
 DecodedStreamData::DecodedStreamData(SourceMediaStream* aStream, bool aPlaying)
   : mAudioFramesWritten(0)
   , mNextVideoTime(-1)
   , mNextAudioTime(-1)
   , mStreamInitialized(false)
   , mHaveSentFinish(false)
   , mHaveSentFinishAudio(false)
   , mHaveSentFinishVideo(false)
   , mStream(aStream)
   , mPlaying(aPlaying)
   , mEOSVideoCompensation(false)
 {
-  mListener = new DecodedStreamGraphListener(mStream);
+  MozPromiseHolder<GenericPromise> promise;
+  mFinishPromise = promise.Ensure(__func__);
+  // DecodedStreamGraphListener will resolve this promise.
+  mListener = new DecodedStreamGraphListener(mStream, Move(promise));
   mStream->AddListener(mListener);
 
   // Block the stream if we are not playing.
   if (!aPlaying) {
     UpdateStreamBlocking(mStream, true);
   }
 }
 
@@ -239,24 +252,29 @@ DecodedStream::DecodedStream(MediaQueue<
   , mVideoQueue(aVideoQueue)
 {
 }
 
 DecodedStream::~DecodedStream()
 {
 }
 
-void
+nsRefPtr<GenericPromise>
 DecodedStream::StartPlayback(int64_t aStartTime, const MediaInfo& aInfo)
 {
   ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
-  if (mStartTime.isNothing()) {
-    mStartTime.emplace(aStartTime);
-    mInfo = aInfo;
-  }
+  MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");
+  mStartTime.emplace(aStartTime);
+  mInfo = aInfo;
+
+  // TODO: Unfortunately, current call flow of MDSM guarantees mData is non-null
+  // when StartPlayback() is called which imposes an obscure dependency on MDSM.
+  // We will align the life cycle of mData with {Start,Stop}Playback so that
+  // DecodedStream doesn't need to make assumptions about mData's life cycle.
+  return mData->mFinishPromise;
 }
 
 void DecodedStream::StopPlayback()
 {
   ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
   mStartTime.reset();
 }
 
@@ -675,36 +693,39 @@ DecodedStream::AdvanceTracks()
     endPosition = std::max(endPosition, videoEnd);
   }
 
   if (!mData->mHaveSentFinish) {
     mData->mStream->AdvanceKnownTracksTime(endPosition);
   }
 }
 
-bool
+void
 DecodedStream::SendData()
 {
   ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
   MOZ_ASSERT(mStartTime.isSome(), "Must be called after StartPlayback()");
 
+  // Nothing to do when the stream is finished.
+  if (mData->mHaveSentFinish) {
+    return;
+  }
+
   InitTracks();
   SendAudio(mVolume, mSameOrigin);
   SendVideo(mSameOrigin);
   AdvanceTracks();
 
   bool finished = (!mInfo.HasAudio() || mAudioQueue.IsFinished()) &&
                   (!mInfo.HasVideo() || mVideoQueue.IsFinished());
 
   if (finished && !mData->mHaveSentFinish) {
     mData->mHaveSentFinish = true;
     mData->mStream->Finish();
   }
-
-  return finished;
 }
 
 int64_t
 DecodedStream::AudioEndTime() const
 {
   ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
   if (mStartTime.isSome() && mInfo.HasAudio()) {
     CheckedInt64 t = mStartTime.ref() +
--- a/dom/media/DecodedStream.h
+++ b/dom/media/DecodedStream.h
@@ -7,16 +7,17 @@
 #ifndef DecodedStream_h_
 #define DecodedStream_h_
 
 #include "nsTArray.h"
 #include "MediaInfo.h"
 
 #include "mozilla/CheckedInt.h"
 #include "mozilla/Maybe.h"
+#include "mozilla/MozPromise.h"
 #include "mozilla/nsRefPtr.h"
 #include "mozilla/ReentrantMonitor.h"
 #include "mozilla/UniquePtr.h"
 #include "mozilla/gfx/Point.h"
 
 namespace mozilla {
 
 class DecodedStream;
@@ -48,17 +49,21 @@ public:
 class DecodedStream {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStream);
 public:
   DecodedStream(MediaQueue<MediaData>& aAudioQueue,
                 MediaQueue<MediaData>& aVideoQueue);
 
   // Mimic MDSM::StartAudioThread.
   // Must be called before any calls to SendData().
-  void StartPlayback(int64_t aStartTime, const MediaInfo& aInfo);
+  //
+  // Return a promise which will be resolved when the stream is finished
+  // or rejected if any error.
+  nsRefPtr<GenericPromise> StartPlayback(int64_t aStartTime,
+                                         const MediaInfo& aInfo);
   // Mimic MDSM::StopAudioThread.
   void StopPlayback();
 
   void DestroyData();
   void RecreateData();
   void Connect(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
   void Remove(MediaStream* aStream);
 
@@ -66,18 +71,17 @@ public:
   void SetVolume(double aVolume);
   void SetSameOrigin(bool aSameOrigin);
 
   int64_t AudioEndTime() const;
   int64_t GetPosition() const;
   bool IsFinished() const;
   bool HasConsumers() const;
 
-  // Return true if stream is finished.
-  bool SendData();
+  void SendData();
 
 protected:
   virtual ~DecodedStream();
 
 private:
   ReentrantMonitor& GetReentrantMonitor() const;
   void RecreateData(MediaStreamGraph* aGraph);
   void Connect(OutputStreamData* aStream);
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -40,36 +40,33 @@ struct AutoProfilerUnregisterThread
   {
     profiler_unregister_thread();
   }
 };
 
 GraphDriver::GraphDriver(MediaStreamGraphImpl* aGraphImpl)
   : mIterationStart(0),
     mIterationEnd(0),
-    mStateComputedTime(0),
     mGraphImpl(aGraphImpl),
     mWaitState(WAITSTATE_RUNNING),
     mCurrentTimeStamp(TimeStamp::Now()),
     mPreviousDriver(nullptr),
     mNextDriver(nullptr)
 { }
 
 void GraphDriver::SetGraphTime(GraphDriver* aPreviousDriver,
                                GraphTime aLastSwitchNextIterationStart,
-                               GraphTime aLastSwitchNextIterationEnd,
-                               GraphTime aLastSwitchStateComputedTime)
+                               GraphTime aLastSwitchNextIterationEnd)
 {
   // We set mIterationEnd here, because the first thing a driver do when it
   // does an iteration is to update graph times, so we are in fact setting
   // mIterationStart of the next iteration by setting the end of the previous
   // iteration.
   mIterationStart = aLastSwitchNextIterationStart;
   mIterationEnd = aLastSwitchNextIterationEnd;
-  mStateComputedTime = aLastSwitchStateComputedTime;
 
   STREAM_LOG(LogLevel::Debug, ("Setting previous driver: %p (%s)", aPreviousDriver, aPreviousDriver->AsAudioCallbackDriver() ? "AudioCallbackDriver" : "SystemClockDriver"));
   MOZ_ASSERT(!mPreviousDriver);
   mPreviousDriver = aPreviousDriver;
 }
 
 void GraphDriver::SwitchAtNextIteration(GraphDriver* aNextDriver)
 {
@@ -94,27 +91,20 @@ void GraphDriver::SwitchAtNextIteration(
 void GraphDriver::EnsureImmediateWakeUpLocked()
 {
   mGraphImpl->GetMonitor().AssertCurrentThreadOwns();
   mWaitState = WAITSTATE_WAKING_UP;
   mGraphImpl->mGraphDriverAsleep = false; // atomic
   mGraphImpl->GetMonitor().Notify();
 }
 
-void GraphDriver::UpdateStateComputedTime(GraphTime aStateComputedTime)
+GraphTime
+GraphDriver::StateComputedTime() const
 {
-  MOZ_ASSERT(aStateComputedTime >= mIterationEnd);
-  // The next state computed time can be the same as the previous, here: it
-  // means the driver would be have been blocking indefinitly, but the graph has
-  // been woken up right after having been to sleep.
-  if (aStateComputedTime < mStateComputedTime) {
-    printf("State time can't go backward %ld < %ld.\n", static_cast<long>(aStateComputedTime), static_cast<long>(mStateComputedTime));
-  }
-
-  mStateComputedTime = aStateComputedTime;
+  return mGraphImpl->mStateComputedTime;
 }
 
 void GraphDriver::EnsureNextIteration()
 {
   mGraphImpl->EnsureNextIteration();
 }
 
 class MediaStreamGraphShutdownThreadRunnable : public nsRunnable {
@@ -233,18 +223,17 @@ ThreadedDriver::Revive()
 {
   // Note: only called on MainThread, without monitor
   // We know were weren't in a running state
   STREAM_LOG(LogLevel::Debug, ("AudioCallbackDriver reviving."));
   // If we were switching, switch now. Otherwise, tell thread to run the main
   // loop again.
   MonitorAutoLock mon(mGraphImpl->GetMonitor());
   if (mNextDriver) {
-    mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd,
-                              mStateComputedTime);
+    mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
     mGraphImpl->SetCurrentDriver(mNextDriver);
     mNextDriver->Start();
   } else {
     nsCOMPtr<nsIRunnable> event = new MediaStreamGraphInitThreadRunnable(this);
     mThread->Dispatch(event, NS_DISPATCH_NORMAL);
   }
 }
 
@@ -275,64 +264,70 @@ ThreadedDriver::RunThread()
 {
   AutoProfilerUnregisterThread autoUnregister;
 
   bool stillProcessing = true;
   while (stillProcessing) {
     mIterationStart = IterationEnd();
     mIterationEnd += GetIntervalForIteration();
 
-    if (mStateComputedTime < mIterationEnd) {
+    GraphTime stateComputedTime = StateComputedTime();
+    if (stateComputedTime < mIterationEnd) {
       STREAM_LOG(LogLevel::Warning, ("Media graph global underrun detected"));
-      mIterationEnd = mStateComputedTime;
+      mIterationEnd = stateComputedTime;
     }
 
     if (mIterationStart >= mIterationEnd) {
       NS_ASSERTION(mIterationStart == mIterationEnd ,
                    "Time can't go backwards!");
       // This could happen due to low clock resolution, maybe?
       STREAM_LOG(LogLevel::Debug, ("Time did not advance"));
     }
 
     GraphTime nextStateComputedTime =
       mGraphImpl->RoundUpToNextAudioBlock(
         mIterationEnd + mGraphImpl->MillisecondsToMediaTime(AUDIO_TARGET_MS));
+    if (nextStateComputedTime < stateComputedTime) {
+      // A previous driver may have been processing further ahead of
+      // iterationEnd.
+      STREAM_LOG(LogLevel::Warning,
+                 ("Prevent state from going backwards. interval[%ld; %ld] state[%ld; %ld]",
+                  (long)mIterationStart, (long)mIterationEnd,
+                  (long)stateComputedTime, (long)nextStateComputedTime));
+      nextStateComputedTime = stateComputedTime;
+    }
     STREAM_LOG(LogLevel::Debug,
                ("interval[%ld; %ld] state[%ld; %ld]",
                (long)mIterationStart, (long)mIterationEnd,
-               (long)mStateComputedTime, (long)nextStateComputedTime));
+               (long)stateComputedTime, (long)nextStateComputedTime));
 
     mGraphImpl->mFlushSourcesNow = mGraphImpl->mFlushSourcesOnNextIteration;
     mGraphImpl->mFlushSourcesOnNextIteration = false;
-    stillProcessing = mGraphImpl->OneIteration(mIterationStart,
-                                               mIterationEnd,
-                                               StateComputedTime(),
-                                               nextStateComputedTime);
+    stillProcessing = mGraphImpl->OneIteration(nextStateComputedTime);
 
     if (mNextDriver && stillProcessing) {
       STREAM_LOG(LogLevel::Debug, ("Switching to AudioCallbackDriver"));
-      mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd,
-                                mStateComputedTime);
+      mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
       mGraphImpl->SetCurrentDriver(mNextDriver);
       mNextDriver->Start();
       return;
     }
   }
 }
 
 MediaTime
 SystemClockDriver::GetIntervalForIteration()
 {
   TimeStamp now = TimeStamp::Now();
   MediaTime interval =
     mGraphImpl->SecondsToMediaTime((now - mCurrentTimeStamp).ToSeconds());
   mCurrentTimeStamp = now;
 
   MOZ_LOG(gMediaStreamGraphLog, LogLevel::Verbose,
-          ("Updating current time to %f (real %f, mStateComputedTime %f)",
+          ("Updating current time to %f (real %f, StateComputedTime() %f)",
            mGraphImpl->MediaTimeToSeconds(IterationEnd() + interval),
            (now - mInitialTimeStamp).ToSeconds(),
            mGraphImpl->MediaTimeToSeconds(StateComputedTime())));
 
   return interval;
 }
 
 TimeStamp
@@ -568,18 +563,17 @@ AudioCallbackDriver::Init()
   if (cubeb_stream_init(CubebUtils::GetCubebContext(), &stream,
                         "AudioCallbackDriver", params, latency,
                         DataCallback_s, StateCallback_s, this) == CUBEB_OK) {
     mAudioStream.own(stream);
   } else {
     NS_WARNING("Could not create a cubeb stream for MediaStreamGraph, falling back to a SystemClockDriver");
     // Fall back to a driver using a normal thread.
     mNextDriver = new SystemClockDriver(GraphImpl());
-    mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd,
-                              mStateComputedTime);
+    mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
     mGraphImpl->SetCurrentDriver(mNextDriver);
     DebugOnly<bool> found = mGraphImpl->RemoveMixerCallback(this);
     NS_WARN_IF_FALSE(!found, "Mixer callback not added when switching?");
     mNextDriver->Start();
     return;
   }
 
   cubeb_stream_register_device_changed_callback(mAudioStream,
@@ -662,18 +656,17 @@ void
 AudioCallbackDriver::Revive()
 {
   // Note: only called on MainThread, without monitor
   // We know were weren't in a running state
   STREAM_LOG(LogLevel::Debug, ("AudioCallbackDriver reviving."));
   // If we were switching, switch now. Otherwise, start the audio thread again.
   MonitorAutoLock mon(mGraphImpl->GetMonitor());
   if (mNextDriver) {
-    mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd,
-                              mStateComputedTime);
+    mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
     mGraphImpl->SetCurrentDriver(mNextDriver);
     mNextDriver->Start();
   } else {
     STREAM_LOG(LogLevel::Debug, ("Starting audio threads for MediaStreamGraph %p from a new thread.", mGraphImpl));
     nsRefPtr<AsyncCubebTask> initEvent =
       new AsyncCubebTask(this, AsyncCubebOperation::INIT);
     initEvent->Dispatch();
   }
@@ -781,17 +774,18 @@ AudioCallbackDriver::DataCallback(AudioD
 
 #ifdef DEBUG
   // DebugOnly<> doesn't work here... it forces an initialization that will cause
   // mInCallback to be set back to false before we exit the statement.  Do it by
   // hand instead.
   AutoInCallback aic(this);
 #endif
 
-  if (mStateComputedTime == 0) {
+  GraphTime stateComputedTime = StateComputedTime();
+  if (stateComputedTime == 0) {
     MonitorAutoLock mon(mGraphImpl->GetMonitor());
     // Because this function is called during cubeb_stream_init (to prefill the
     // audio buffers), it can be that we don't have a message here (because this
     // driver is the first one for this graph), and the graph would exit. Simply
     // return here until we have messages.
     if (!mGraphImpl->MessagesQueued()) {
       PodZero(aBuffer, aFrames * mGraphImpl->AudioChannelCount());
       return aFrames;
@@ -817,48 +811,45 @@ AudioCallbackDriver::DataCallback(AudioD
   // if we totally filled the buffer (and mScratchBuffer isn't empty),
   // we don't need to run an iteration and if we do so we may overflow.
   if (mBuffer.Available()) {
 
     // State computed time is decided by the audio callback's buffer length. We
     // compute the iteration start and end from there, trying to keep the amount
     // of buffering in the graph constant.
     GraphTime nextStateComputedTime =
-      mGraphImpl->RoundUpToNextAudioBlock(mStateComputedTime + mBuffer.Available());
+      mGraphImpl->RoundUpToNextAudioBlock(stateComputedTime + mBuffer.Available());
 
     mIterationStart = mIterationEnd;
     // inGraph is the number of audio frames there is between the state time and
     // the current time, i.e. the maximum theoretical length of the interval we
     // could use as [mIterationStart; mIterationEnd].
-    GraphTime inGraph = mStateComputedTime - mIterationStart;
+    GraphTime inGraph = stateComputedTime - mIterationStart;
     // We want the interval [mIterationStart; mIterationEnd] to be before the
-    // interval [mStateComputedTime; nextStateComputedTime]. We also want
+    // interval [stateComputedTime; nextStateComputedTime]. We also want
     // the distance between these intervals to be roughly equivalent each time, to
     // ensure there is no clock drift between current time and state time. Since
     // we can't act on the state time because we have to fill the audio buffer, we
     // reclock the current time against the state time, here.
     mIterationEnd = mIterationStart + 0.8 * inGraph;
 
     STREAM_LOG(LogLevel::Debug, ("interval[%ld; %ld] state[%ld; %ld] (frames: %ld) (durationMS: %u) (duration ticks: %ld)\n",
                               (long)mIterationStart, (long)mIterationEnd,
-                              (long)mStateComputedTime, (long)nextStateComputedTime,
+                              (long)stateComputedTime, (long)nextStateComputedTime,
                               (long)aFrames, (uint32_t)durationMS,
-                              (long)(nextStateComputedTime - mStateComputedTime)));
+                              (long)(nextStateComputedTime - stateComputedTime)));
 
     mCurrentTimeStamp = TimeStamp::Now();
 
-    if (mStateComputedTime < mIterationEnd) {
+    if (stateComputedTime < mIterationEnd) {
       STREAM_LOG(LogLevel::Warning, ("Media graph global underrun detected"));
-      mIterationEnd = mStateComputedTime;
+      mIterationEnd = stateComputedTime;
     }
 
-    stillProcessing = mGraphImpl->OneIteration(mIterationStart,
-                                               mIterationEnd,
-                                               mStateComputedTime,
-                                               nextStateComputedTime);
+    stillProcessing = mGraphImpl->OneIteration(nextStateComputedTime);
   } else {
     NS_WARNING("DataCallback buffer filled entirely from scratch buffer, skipping iteration.");
     stillProcessing = true;
   }
 
   mBuffer.BufferFilled();
 
   if (mNextDriver && stillProcessing) {
@@ -866,18 +857,17 @@ AudioCallbackDriver::DataCallback(AudioD
       // If the audio stream has not been started by the previous driver or
       // the graph itself, keep it alive.
       MonitorAutoLock mon(mGraphImpl->GetMonitor());
       if (!IsStarted()) {
         return aFrames;
       }
     }
     STREAM_LOG(LogLevel::Debug, ("Switching to system driver."));
-    mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd,
-                              mStateComputedTime);
+    mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
     mGraphImpl->SetCurrentDriver(mNextDriver);
     mNextDriver->Start();
     // Returning less than aFrames starts the draining and eventually stops the
     // audio thread. This function will never get called again.
     return aFrames - 1;
   }
 
   if (!stillProcessing) {
@@ -974,18 +964,17 @@ AudioCallbackDriver::DeviceChangedCallba
   if (mSelfReference) {
     return;
   }
   STREAM_LOG(LogLevel::Error, ("Switching to SystemClockDriver during output switch"));
   mSelfReference.Take(this);
   mCallbackReceivedWhileSwitching = 0;
   mGraphImpl->mFlushSourcesOnNextIteration = true;
   mNextDriver = new SystemClockDriver(GraphImpl());
-  mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd,
-                            mStateComputedTime);
+  mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
   mGraphImpl->SetCurrentDriver(mNextDriver);
   mNextDriver->Start();
 #endif
 }
 
 void
 AudioCallbackDriver::SetMicrophoneActive(bool aActive)
 {
--- a/dom/media/GraphDriver.h
+++ b/dom/media/GraphDriver.h
@@ -122,20 +122,16 @@ public:
   GraphTime IterationStart() {
     return mIterationStart;
   }
 
   GraphTime IterationEnd() {
     return mIterationEnd;
   }
 
-  GraphTime StateComputedTime() {
-    return mStateComputedTime;
-  }
-
   virtual void GetAudioBuffer(float** aBuffer, long& aFrames) {
     MOZ_CRASH("This is not an Audio GraphDriver!");
   }
 
   virtual AudioCallbackDriver* AsAudioCallbackDriver() {
     return nullptr;
   }
 
@@ -150,25 +146,17 @@ public:
   virtual void SwitchAtNextIteration(GraphDriver* aDriver);
 
   /**
    * Set the time for a graph, on a driver. This is used so a new driver just
    * created can start at the right point in time.
    */
   void SetGraphTime(GraphDriver* aPreviousDriver,
                     GraphTime aLastSwitchNextIterationStart,
-                    GraphTime aLastSwitchNextIterationEnd,
-                    GraphTime aLastSwitchStateComputedTime);
-
-  /**
-   * Whenever the graph has computed the time until it has all state
-   * (mStateComputedState), it calls this to indicate the new time until which
-   * we have computed state.
-   */
-  void UpdateStateComputedTime(GraphTime aStateComputedTime);
+                    GraphTime aLastSwitchNextIterationEnd);
 
   /**
    * Call this to indicate that another iteration of the control loop is
    * required immediately. The monitor must already be held.
    */
   void EnsureImmediateWakeUpLocked();
 
   /**
@@ -185,22 +173,22 @@ public:
 
   MediaStreamGraphImpl* GraphImpl() {
     return mGraphImpl;
   }
 
   virtual bool OnThread() = 0;
 
 protected:
+  GraphTime StateComputedTime() const;
+
   // Time of the start of this graph iteration.
   GraphTime mIterationStart;
   // Time of the end of this graph iteration.
   GraphTime mIterationEnd;
-  // Time, in the future, for which blocking has been computed.
-  GraphTime mStateComputedTime;
   // The MediaStreamGraphImpl that owns this driver. This has a lifetime longer
   // than the driver, and will never be null.
   MediaStreamGraphImpl* mGraphImpl;
 
   // This enum specifies the wait state of the driver.
   enum WaitState {
     // RunThread() is running normally
     WAITSTATE_RUNNING,
--- a/dom/media/MediaCache.cpp
+++ b/dom/media/MediaCache.cpp
@@ -1979,16 +1979,19 @@ MediaCacheStream::AreAllStreamsForResour
   return true;
 }
 
 void
 MediaCacheStream::Close()
 {
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
 
+  if (!mInitialized)
+    return;
+
   ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
   CloseInternal(mon);
   // Queue an Update since we may have created more free space. Don't do
   // it from CloseInternal since that gets called by Update() itself
   // sometimes, and we try to not to queue updates from Update().
   gMediaCache->QueueUpdate();
 }
 
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -303,17 +303,17 @@ public:
   bool mSentToCompositor;
 
   VideoData(int64_t aOffset,
             int64_t aTime,
             int64_t aDuration,
             bool aKeyframe,
             int64_t aTimecode,
             IntSize aDisplay,
-            int32_t aFrameID);
+            uint32_t aFrameID);
 
 protected:
   ~VideoData();
 };
 
 class CryptoTrack
 {
 public:
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -371,38 +371,32 @@ int64_t MediaDecoderStateMachine::GetDec
 }
 
 void MediaDecoderStateMachine::SendStreamData()
 {
   MOZ_ASSERT(OnTaskQueue());
   AssertCurrentThreadInMonitor();
   MOZ_ASSERT(!mAudioSink, "Should've been stopped in RunStateMachine()");
 
-  bool finished = mDecodedStream->SendData();
+  mDecodedStream->SendData();
 
   const auto clockTime = GetClock();
   while (true) {
     const MediaData* a = AudioQueue().PeekFront();
 
     // If we discard audio samples fed to the stream immediately, we will
     // keep decoding audio samples till the end and consume a lot of memory.
     // Therefore we only discard those behind the stream clock to throttle
     // the decoding speed.
     if (a && a->mTime <= clockTime) {
       nsRefPtr<MediaData> releaseMe = AudioQueue().PopFront();
       continue;
     }
     break;
   }
-
-  // To be consistent with AudioSink, |mAudioCompleted| is not set
-  // until all samples are drained.
-  if (finished && AudioQueue().GetSize() == 0) {
-    mAudioCompleted = true;
-  }
 }
 
 bool MediaDecoderStateMachine::HaveEnoughDecodedAudio(int64_t aAmpleAudioUSecs)
 {
   MOZ_ASSERT(OnTaskQueue());
   AssertCurrentThreadInMonitor();
 
   if (AudioQueue().GetSize() == 0 ||
@@ -1074,22 +1068,17 @@ void MediaDecoderStateMachine::MaybeStar
 
   DECODER_LOG("MaybeStartPlayback() starting playback");
 
   mDecoder->DispatchPlaybackStarted();
   SetPlayStartTime(TimeStamp::Now());
   MOZ_ASSERT(IsPlaying());
 
   StartAudioThread();
-
-  // Tell DecodedStream to start playback with specified start time and media
-  // info. This is consistent with how we create AudioSink in StartAudioThread().
-  if (mAudioCaptured) {
-    mDecodedStream->StartPlayback(GetMediaTime(), mInfo);
-  }
+  StartDecodedStream();
 
   DispatchDecodeTasksIfNeeded();
 }
 
 void MediaDecoderStateMachine::UpdatePlaybackPositionInternal(int64_t aTime)
 {
   MOZ_ASSERT(OnTaskQueue());
   SAMPLE_LOG("UpdatePlaybackPositionInternal(%lld)", aTime);
@@ -1787,16 +1776,42 @@ MediaDecoderStateMachine::StartAudioThre
         &MediaDecoderStateMachine::OnAudioSinkError));
 
     mAudioSink->SetVolume(mVolume);
     mAudioSink->SetPlaybackRate(mPlaybackRate);
     mAudioSink->SetPreservesPitch(mPreservesPitch);
   }
 }
 
+void
+MediaDecoderStateMachine::StopDecodedStream()
+{
+  MOZ_ASSERT(OnTaskQueue());
+  AssertCurrentThreadInMonitor();
+  mDecodedStream->StopPlayback();
+  mDecodedStreamPromise.DisconnectIfExists();
+}
+
+void
+MediaDecoderStateMachine::StartDecodedStream()
+{
+  MOZ_ASSERT(OnTaskQueue());
+  AssertCurrentThreadInMonitor();
+
+  // Tell DecodedStream to start playback with specified start time and media
+  // info. This is consistent with how we create AudioSink in StartAudioThread().
+  if (mAudioCaptured && !mDecodedStreamPromise.Exists()) {
+    mDecodedStreamPromise.Begin(
+      mDecodedStream->StartPlayback(GetMediaTime(), mInfo)->Then(
+        OwnerThread(), __func__, this,
+        &MediaDecoderStateMachine::OnDecodedStreamFinish,
+        &MediaDecoderStateMachine::OnDecodedStreamError));
+  }
+}
+
 int64_t MediaDecoderStateMachine::AudioDecodedUsecs()
 {
   MOZ_ASSERT(OnTaskQueue());
   NS_ASSERTION(HasAudio(),
                "Should only call AudioDecodedUsecs() when we have audio");
   // The amount of audio we have decoded is the amount of audio data we've
   // already decoded and pushed to the hardware, plus the amount of audio
   // data waiting to be pushed to the hardware.
@@ -2390,17 +2405,17 @@ nsresult MediaDecoderStateMachine::RunSt
           NS_NewRunnableMethod(mDecoder, &MediaDecoder::PlaybackEnded);
         AbstractThread::MainThread()->Dispatch(event.forget());
 
         mSentPlaybackEndedEvent = true;
 
         // Stop audio sink after call to AudioEndTime() above, otherwise it will
         // return an incorrect value due to a null mAudioSink.
         StopAudioThread();
-        mDecodedStream->StopPlayback();
+        StopDecodedStream();
       }
 
       return NS_OK;
     }
   }
 
   return NS_OK;
 }
@@ -2420,17 +2435,17 @@ MediaDecoderStateMachine::Reset()
              mState == DECODER_STATE_SEEKING ||
              mState == DECODER_STATE_DORMANT ||
              mState == DECODER_STATE_DECODING_NONE);
 
   // Stop the audio thread. Otherwise, AudioSink might be accessing AudioQueue
   // outside of the decoder monitor while we are clearing the queue and causes
   // crash for no samples to be popped.
   StopAudioThread();
-  mDecodedStream->StopPlayback();
+  StopDecodedStream();
 
   mVideoFrameEndTime = -1;
   mDecodedVideoEndTime = -1;
   mDecodedAudioEndTime = -1;
   mAudioCompleted = false;
   AudioQueue().Reset();
   VideoQueue().Reset();
   mFirstVideoFrameAfterSeek = nullptr;
@@ -2521,17 +2536,17 @@ void MediaDecoderStateMachine::RenderVid
     }
 
     ImageContainer::NonOwningImage* img = images.AppendElement();
     img->mTimeStamp = t;
     img->mImage = frame->mImage;
     img->mFrameID = frame->mFrameID;
     img->mProducerID = mProducerID;
 
-    VERBOSE_LOG("playing video frame %lld (id=%d) (queued=%i, state-machine=%i, decoder-queued=%i)",
+    VERBOSE_LOG("playing video frame %lld (id=%x) (queued=%i, state-machine=%i, decoder-queued=%i)",
                 frame->mTime, frame->mFrameID,
                 VideoQueue().GetSize() + mReader->SizeOfVideoQueueInFrames(),
                 VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames());
   }
 
   container->SetCurrentFrames(frames[0]->As<VideoData>()->mDisplay, images);
 }
 
@@ -3071,16 +3086,42 @@ void MediaDecoderStateMachine::OnAudioSi
     return;
   }
 
   // Otherwise notify media decoder/element about this error for it makes
   // no sense to play an audio-only file without sound output.
   DecodeError();
 }
 
+void
+MediaDecoderStateMachine::OnDecodedStreamFinish()
+{
+  MOZ_ASSERT(OnTaskQueue());
+  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+  MOZ_ASSERT(mAudioCaptured, "Audio should be captured.");
+
+  mDecodedStreamPromise.Complete();
+  if (mInfo.HasAudio()) {
+    mAudioCompleted = true;
+  }
+  // To notify PlaybackEnded as soon as possible.
+  ScheduleStateMachine();
+}
+
+void
+MediaDecoderStateMachine::OnDecodedStreamError()
+{
+  MOZ_ASSERT(OnTaskQueue());
+  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+  MOZ_ASSERT(mAudioCaptured, "Audio should be captured.");
+
+  mDecodedStreamPromise.Complete();
+  DecodeError();
+}
+
 uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const
 {
   MOZ_ASSERT(OnTaskQueue());
   AssertCurrentThreadInMonitor();
   return (mReader->IsAsync() && mReader->VideoIsHardwareAccelerated())
     ? std::max<uint32_t>(sVideoQueueHWAccelSize, MIN_VIDEO_QUEUE_SIZE)
     : std::max<uint32_t>(sVideoQueueDefaultSize, MIN_VIDEO_QUEUE_SIZE);
 }
@@ -3094,33 +3135,34 @@ void MediaDecoderStateMachine::DispatchA
     ReentrantMonitorAutoEnter mon(self->mDecoder->GetReentrantMonitor());
     if (!self->mAudioCaptured) {
       // Stop the audio sink if it's running.
       self->StopAudioThread();
       self->mAudioCaptured = true;
       // Start DecodedStream if we are already playing. Otherwise it will be
       // handled in MaybeStartPlayback().
       if (self->IsPlaying()) {
-        self->mDecodedStream->StartPlayback(self->GetMediaTime(), self->mInfo);
+        self->StartDecodedStream();
       }
       self->ScheduleStateMachine();
     }
   });
   OwnerThread()->Dispatch(r.forget());
 }
 
 void MediaDecoderStateMachine::DispatchAudioUncaptured()
 {
   nsRefPtr<MediaDecoderStateMachine> self = this;
   nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self] () -> void
   {
     MOZ_ASSERT(self->OnTaskQueue());
     ReentrantMonitorAutoEnter mon(self->mDecoder->GetReentrantMonitor());
     if (self->mAudioCaptured) {
-      // Start again the audio sink
+      self->StopDecodedStream();
+      // Start again the audio sink.
       self->mAudioCaptured = false;
       if (self->IsPlaying()) {
         self->StartAudioThread();
       }
       self->ScheduleStateMachine();
     }
   });
   OwnerThread()->Dispatch(r.forget());
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -513,16 +513,20 @@ protected:
   // Stops the audio thread. The decoder monitor must be held with exactly
   // one lock count. Called on the state machine thread.
   void StopAudioThread();
 
   // Starts the audio thread. The decoder monitor must be held with exactly
   // one lock count. Called on the state machine thread.
   void StartAudioThread();
 
+  void StopDecodedStream();
+
+  void StartDecodedStream();
+
   // Notification method invoked when mPlayState changes.
   void PlayStateChanged();
 
   // Notification method invoked when mLogicallySeeking changes.
   void LogicallySeekingChanged();
 
   // Notification method invoked when mSameOriginMedia changes.
   void SameOriginMediaChanged();
@@ -665,16 +669,20 @@ protected:
 private:
   // Resolved by the AudioSink to signal that all outstanding work is complete
   // and the sink is shutting down.
   void OnAudioSinkComplete();
 
   // Rejected by the AudioSink to signal errors.
   void OnAudioSinkError();
 
+  void OnDecodedStreamFinish();
+
+  void OnDecodedStreamError();
+
   // Return true if the video decoder's decode speed can not catch up the
   // play time.
   bool NeedToSkipToNextKeyframe();
 
   // The decoder object that created this state machine. The state machine
   // holds a strong reference to the decoder to ensure that the decoder stays
   // alive once media element has started the decoder shutdown process, and has
   // dropped its reference to the decoder. This enables the state machine to
@@ -1279,16 +1287,17 @@ private:
   // can be read on any thread while holding the monitor, or on the main thread
   // without holding the monitor.
   nsRefPtr<DecodedStream> mDecodedStream;
 
   // Media data resource from the decoder.
   nsRefPtr<MediaResource> mResource;
 
   MozPromiseRequestHolder<GenericPromise> mAudioSinkPromise;
+  MozPromiseRequestHolder<GenericPromise> mDecodedStreamPromise;
 
   MediaEventListener mAudioQueueListener;
   MediaEventListener mVideoQueueListener;
 
 private:
   // The buffered range. Mirrored from the decoder thread.
   Mirror<media::TimeIntervals> mBuffered;
 
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -1033,17 +1033,21 @@ MediaFormatReader::DecodeDemuxedSamples(
     LOGV("Input:%lld (dts:%lld kf:%d)",
          sample->mTime, sample->mTimecode, sample->mKeyframe);
     decoder.mOutputRequested = true;
     decoder.mNumSamplesInput++;
     decoder.mSizeOfQueue++;
     if (aTrack == TrackInfo::kVideoTrack) {
       aA.mParsed++;
     }
-    decoder.mDecoder->Input(sample);
+    if (NS_FAILED(decoder.mDecoder->Input(sample))) {
+      LOG("Unable to pass frame to decoder");
+      NotifyError(aTrack);
+      return;
+    }
     decoder.mQueuedSamples.RemoveElementAt(0);
     samplesPending = true;
   }
 
   // We have serviced the decoder's request for more data.
   decoder.mInputExhausted = false;
 }
 
@@ -1109,18 +1113,19 @@ MediaFormatReader::Update(TrackType aTra
     mLastReportedNumDecodedFrames = decoder.mNumSamplesOutput;
   }
 
   if (decoder.HasPromise()) {
     needOutput = true;
     if (!decoder.mOutput.IsEmpty()) {
       // We have a decoded sample ready to be returned.
       if (aTrack == TrackType::kVideoTrack) {
+        nsCString error;
         mVideo.mIsHardwareAccelerated =
-          mVideo.mDecoder && mVideo.mDecoder->IsHardwareAccelerated();
+          mVideo.mDecoder && mVideo.mDecoder->IsHardwareAccelerated(error);
       }
       while (decoder.mOutput.Length()) {
         nsRefPtr<MediaData> output = decoder.mOutput[0];
         decoder.mOutput.RemoveElementAt(0);
         decoder.mSizeOfQueue -= 1;
         if (decoder.mTimeThreshold.isNothing() ||
             media::TimeUnit::FromMicroseconds(output->mTime) >= decoder.mTimeThreshold.ref()) {
           ReturnOutput(output, aTrack);
--- a/dom/media/MediaSegment.h
+++ b/dom/media/MediaSegment.h
@@ -279,16 +279,36 @@ public:
     void Next() { ++mIndex; }
     const Chunk& operator*() { return mSegment.mChunks[mIndex]; }
     const Chunk* operator->() { return &mSegment.mChunks[mIndex]; }
   private:
     const MediaSegmentBase<C, Chunk>& mSegment;
     uint32_t mIndex;
   };
 
+  Chunk* FindChunkContaining(StreamTime aOffset, StreamTime* aStart = nullptr)
+  {
+    if (aOffset < 0) {
+      return nullptr;
+    }
+    StreamTime offset = 0;
+    for (uint32_t i = 0; i < mChunks.Length(); ++i) {
+      Chunk& c = mChunks[i];
+      StreamTime nextOffset = offset + c.GetDuration();
+      if (aOffset < nextOffset) {
+        if (aStart) {
+          *aStart = offset;
+        }
+        return &c;
+      }
+      offset = nextOffset;
+    }
+    return nullptr;
+  }
+
   void RemoveLeading(StreamTime aDuration)
   {
     RemoveLeading(aDuration, 0);
   }
 
 #ifdef MOZILLA_INTERNAL_API
   void GetStartTime(TimeStamp &aTime) {
     aTime = mChunks[0].mTimeStamp;
@@ -351,36 +371,16 @@ protected:
   {
     MOZ_ASSERT(aDuration >= 0);
     Chunk* c = mChunks.AppendElement();
     c->mDuration = aDuration;
     mDuration += aDuration;
     return c;
   }
 
-  Chunk* FindChunkContaining(StreamTime aOffset, StreamTime* aStart = nullptr)
-  {
-    if (aOffset < 0) {
-      return nullptr;
-    }
-    StreamTime offset = 0;
-    for (uint32_t i = 0; i < mChunks.Length(); ++i) {
-      Chunk& c = mChunks[i];
-      StreamTime nextOffset = offset + c.GetDuration();
-      if (aOffset < nextOffset) {
-        if (aStart) {
-          *aStart = offset;
-        }
-        return &c;
-      }
-      offset = nextOffset;
-    }
-    return nullptr;
-  }
-
   Chunk* GetLastChunk()
   {
     if (mChunks.IsEmpty()) {
       return nullptr;
     }
     return &mChunks[mChunks.Length() - 1];
   }
 
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -74,20 +74,16 @@ MediaStreamGraphImpl::~MediaStreamGraphI
 void
 MediaStreamGraphImpl::FinishStream(MediaStream* aStream)
 {
   if (aStream->mFinished)
     return;
   STREAM_LOG(LogLevel::Debug, ("MediaStream %p will finish", aStream));
   aStream->mFinished = true;
   aStream->mBuffer.AdvanceKnownTracksTime(STREAM_TIME_MAX);
-  // Force at least one more iteration of the control loop, since we rely
-  // on UpdateCurrentTimeForStreams to notify our listeners once the stream end
-  // has been reached.
-  EnsureNextIteration();
 
   SetStreamOrderDirty();
 }
 
 static const GraphTime START_TIME_DELAYED = -1;
 
 void
 MediaStreamGraphImpl::AddStream(MediaStream* aStream)
@@ -103,17 +99,17 @@ MediaStreamGraphImpl::AddStream(MediaStr
     }
   }
 
   if (contextSuspended) {
     aStream->mBufferStartTime = START_TIME_DELAYED;
     mSuspendedStreams.AppendElement(aStream);
     STREAM_LOG(LogLevel::Debug, ("Adding media stream %p to the graph, in the suspended stream array", aStream));
   } else {
-    aStream->mBufferStartTime = IterationEnd();
+    aStream->mBufferStartTime = mProcessedTime;
     mStreams.AppendElement(aStream);
     STREAM_LOG(LogLevel::Debug, ("Adding media stream %p to the graph", aStream));
   }
 
   SetStreamOrderDirty();
 }
 
 void
@@ -166,18 +162,18 @@ MediaStreamGraphImpl::ExtractPendingInpu
   {
     MutexAutoLock lock(aStream->mMutex);
     if (aStream->mPullEnabled && !aStream->mFinished &&
         !aStream->mListeners.IsEmpty()) {
       // Compute how much stream time we'll need assuming we don't block
       // the stream at all between mBlockingDecisionsMadeUntilTime and
       // aDesiredUpToTime.
       StreamTime t =
-        GraphTimeToStreamTime(aStream, CurrentDriver()->StateComputedTime()) +
-        (aDesiredUpToTime - CurrentDriver()->StateComputedTime());
+        GraphTimeToStreamTime(aStream, mStateComputedTime) +
+        (aDesiredUpToTime - mStateComputedTime);
       STREAM_LOG(LogLevel::Verbose, ("Calling NotifyPull aStream=%p t=%f current end=%f", aStream,
                                   MediaTimeToSeconds(t),
                                   MediaTimeToSeconds(aStream->mBuffer.GetEnd())));
       if (t > aStream->mBuffer.GetEnd()) {
         *aEnsureNextIteration = true;
 #ifdef DEBUG
         if (aStream->mListeners.Length() == 0) {
           STREAM_LOG(LogLevel::Error, ("No listeners in NotifyPull aStream=%p desired=%f current end=%f",
@@ -249,68 +245,69 @@ MediaStreamGraphImpl::ExtractPendingInpu
     FinishStream(aStream);
   }
 }
 
 StreamTime
 MediaStreamGraphImpl::GraphTimeToStreamTime(MediaStream* aStream,
                                             GraphTime aTime)
 {
-  MOZ_ASSERT(aTime <= CurrentDriver()->StateComputedTime(),
+  MOZ_ASSERT(aTime <= mStateComputedTime,
                "Don't ask about times where we haven't made blocking decisions yet");
-  if (aTime <= IterationEnd()) {
+  if (aTime <= mProcessedTime) {
     return std::max<StreamTime>(0, aTime - aStream->mBufferStartTime);
   }
-  GraphTime t = IterationEnd();
+  GraphTime t = mProcessedTime;
   StreamTime s = t - aStream->mBufferStartTime;
   while (t < aTime) {
     GraphTime end;
     if (!aStream->mBlocked.GetAt(t, &end)) {
       s += std::min(aTime, end) - t;
     }
     t = end;
   }
   return std::max<StreamTime>(0, s);
 }
 
 StreamTime
 MediaStreamGraphImpl::GraphTimeToStreamTimeOptimistic(MediaStream* aStream,
                                                       GraphTime aTime)
 {
-  GraphTime computedUpToTime = std::min(CurrentDriver()->StateComputedTime(), aTime);
+  GraphTime computedUpToTime = std::min(mStateComputedTime, aTime);
   StreamTime s = GraphTimeToStreamTime(aStream, computedUpToTime);
   return s + (aTime - computedUpToTime);
 }
 
 GraphTime
 MediaStreamGraphImpl::StreamTimeToGraphTime(MediaStream* aStream,
                                             StreamTime aTime, uint32_t aFlags)
 {
   if (aTime >= STREAM_TIME_MAX) {
     return GRAPH_TIME_MAX;
   }
-  MediaTime bufferElapsedToCurrentTime =  IterationEnd() - aStream->mBufferStartTime;
+  MediaTime bufferElapsedToCurrentTime =
+    mProcessedTime - aStream->mBufferStartTime;
   if (aTime < bufferElapsedToCurrentTime ||
       (aTime == bufferElapsedToCurrentTime && !(aFlags & INCLUDE_TRAILING_BLOCKED_INTERVAL))) {
     return aTime + aStream->mBufferStartTime;
   }
 
   MediaTime streamAmount = aTime - bufferElapsedToCurrentTime;
   NS_ASSERTION(streamAmount >= 0, "Can't answer queries before current time");
 
-  GraphTime t = IterationEnd();
+  GraphTime t = mProcessedTime;
   while (t < GRAPH_TIME_MAX) {
     if (!(aFlags & INCLUDE_TRAILING_BLOCKED_INTERVAL) && streamAmount == 0) {
       return t;
     }
     bool blocked;
     GraphTime end;
-    if (t < CurrentDriver()->StateComputedTime()) {
+    if (t < mStateComputedTime) {
       blocked = aStream->mBlocked.GetAt(t, &end);
-      end = std::min(end, CurrentDriver()->StateComputedTime());
+      end = std::min(end, mStateComputedTime);
     } else {
       blocked = false;
       end = GRAPH_TIME_MAX;
     }
     if (blocked) {
       t = end;
     } else {
       if (streamAmount == 0) {
@@ -331,34 +328,31 @@ MediaStreamGraphImpl::IterationEnd() con
   return CurrentDriver()->IterationEnd();
 }
 
 void
 MediaStreamGraphImpl::StreamNotifyOutput(MediaStream* aStream)
 {
   for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) {
     MediaStreamListener* l = aStream->mListeners[j];
-    l->NotifyOutput(this, IterationEnd());
+    l->NotifyOutput(this, mProcessedTime);
   }
 }
 
 void
 MediaStreamGraphImpl::StreamReadyToFinish(MediaStream* aStream)
 {
   MOZ_ASSERT(aStream->mFinished);
   MOZ_ASSERT(!aStream->mNotifiedFinished);
 
   // The stream is fully finished when all of its track data has been played
   // out.
-  if (IterationEnd() >=
+  if (mProcessedTime >=
       aStream->StreamTimeToGraphTime(aStream->GetStreamBuffer().GetAllTracksEnd()))  {
-    NS_WARN_IF_FALSE(aStream->mNotifiedBlocked,
-      "Should've notified blocked=true for a fully finished stream");
     aStream->mNotifiedFinished = true;
-    aStream->mLastPlayedVideoFrame.SetNull();
     SetStreamOrderDirty();
     for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) {
       MediaStreamListener* l = aStream->mListeners[j];
       l->NotifyEvent(this, MediaStreamListener::EVENT_FINISHED);
     }
   }
 }
 
@@ -401,19 +395,17 @@ MediaStreamGraphImpl::UpdateCurrentTimeF
       stream->AdvanceTimeVaryingValuesToCurrentTime(aNextCurrentTime,
                                                     blockedTime);
       // Advance mBlocked last so that AdvanceTimeVaryingValuesToCurrentTime
       // can rely on the value of mBlocked.
       stream->mBlocked.AdvanceCurrentTime(aNextCurrentTime);
 
       if (runningAndSuspendedPair[array] == &mStreams) {
         bool streamHasOutput = blockedTime < aNextCurrentTime - aPrevCurrentTime;
-        // Make this an assertion when bug 957832 is fixed.
-        NS_WARN_IF_FALSE(
-          !streamHasOutput || !stream->mNotifiedFinished,
+        NS_ASSERTION(!streamHasOutput || !stream->mNotifiedFinished,
           "Shouldn't have already notified of finish *and* have output!");
 
         if (streamHasOutput) {
           StreamNotifyOutput(stream);
         }
 
         if (stream->mFinished && !stream->mNotifiedFinished) {
           StreamReadyToFinish(stream);
@@ -436,23 +428,23 @@ MediaStreamGraphImpl::WillUnderrun(Media
   // unless they block on some other stream.
   if (aStream->mFinished || aStream->AsProcessedStream()) {
     return false;
   }
   GraphTime bufferEnd =
     StreamTimeToGraphTime(aStream, aStream->GetBufferEnd(),
                           INCLUDE_TRAILING_BLOCKED_INTERVAL);
 #ifdef DEBUG
-  if (bufferEnd < IterationEnd()) {
+  if (bufferEnd < mProcessedTime) {
     STREAM_LOG(LogLevel::Error, ("MediaStream %p underrun, "
-                              "bufferEnd %f < IterationEnd() %f (%lld < %lld), Streamtime %lld",
-                              aStream, MediaTimeToSeconds(bufferEnd), MediaTimeToSeconds(IterationEnd()),
-                              bufferEnd, IterationEnd(), aStream->GetBufferEnd()));
+                              "bufferEnd %f < mProcessedTime %f (%lld < %lld), Streamtime %lld",
+                              aStream, MediaTimeToSeconds(bufferEnd), MediaTimeToSeconds(mProcessedTime),
+                              bufferEnd, mProcessedTime, aStream->GetBufferEnd()));
     aStream->DumpTrackInfo();
-    NS_ASSERTION(bufferEnd >= IterationEnd(), "Buffer underran");
+    NS_ASSERTION(bufferEnd >= mProcessedTime, "Buffer underran");
   }
 #endif
   // We should block after bufferEnd.
   if (bufferEnd <= aTime) {
     STREAM_LOG(LogLevel::Verbose, ("MediaStream %p will block due to data underrun at %ld, "
                                 "bufferEnd %ld",
                                 aStream, aTime, bufferEnd));
     return true;
@@ -744,62 +736,51 @@ MediaStreamGraphImpl::UpdateStreamOrder(
   }
 
   MOZ_ASSERT(orderedStreamCount == mFirstCycleBreaker);
 }
 
 void
 MediaStreamGraphImpl::RecomputeBlocking(GraphTime aEndBlockingDecisions)
 {
-  bool blockingDecisionsWillChange = false;
-
   STREAM_LOG(LogLevel::Verbose, ("Media graph %p computing blocking for time %f",
-                              this, MediaTimeToSeconds(CurrentDriver()->StateComputedTime())));
+                              this, MediaTimeToSeconds(mStateComputedTime)));
   nsTArray<MediaStream*>* runningAndSuspendedPair[2];
   runningAndSuspendedPair[0] = &mStreams;
   runningAndSuspendedPair[1] = &mSuspendedStreams;
 
   for (uint32_t array = 0; array < 2; array++) {
     for (uint32_t i = 0; i < (*runningAndSuspendedPair[array]).Length(); ++i) {
       MediaStream* stream = (*runningAndSuspendedPair[array])[i];
       if (!stream->mInBlockingSet) {
         // Compute a partition of the streams containing 'stream' such that we
         // can
         // compute the blocking status of each subset independently.
         nsAutoTArray<MediaStream*, 10> streamSet;
         AddBlockingRelatedStreamsToSet(&streamSet, stream);
 
         GraphTime end;
-        for (GraphTime t = CurrentDriver()->StateComputedTime();
+        for (GraphTime t = mStateComputedTime;
              t < aEndBlockingDecisions; t = end) {
           end = GRAPH_TIME_MAX;
           RecomputeBlockingAt(streamSet, t, aEndBlockingDecisions, &end);
-          if (end < GRAPH_TIME_MAX) {
-            blockingDecisionsWillChange = true;
-          }
         }
       }
-
-      GraphTime end;
-      stream->mBlocked.GetAt(IterationEnd(), &end);
-      if (end < GRAPH_TIME_MAX) {
-        blockingDecisionsWillChange = true;
-      }
     }
   }
   STREAM_LOG(LogLevel::Verbose, ("Media graph %p computed blocking for interval %f to %f",
-                              this, MediaTimeToSeconds(CurrentDriver()->StateComputedTime()),
+                              this, MediaTimeToSeconds(mStateComputedTime),
                               MediaTimeToSeconds(aEndBlockingDecisions)));
 
-  CurrentDriver()->UpdateStateComputedTime(aEndBlockingDecisions);
-
-  if (blockingDecisionsWillChange) {
-    // Make sure we wake up to notify listeners about these changes.
-    EnsureNextIteration();
-  }
+  MOZ_ASSERT(aEndBlockingDecisions >= mProcessedTime);
+  // The next state computed time can be the same as the previous: it
+  // means the driver would be have been blocking indefinitly, but the graph has
+  // been woken up right after having been to sleep.
+  MOZ_ASSERT(aEndBlockingDecisions >= mStateComputedTime);
+  mStateComputedTime = aEndBlockingDecisions;
 }
 
 void
 MediaStreamGraphImpl::AddBlockingRelatedStreamsToSet(nsTArray<MediaStream*>* aStreams,
                                                      MediaStream* aStream)
 {
   if (aStream->mInBlockingSet)
     return;
@@ -1118,79 +1099,150 @@ private:
 void
 MediaStreamGraphImpl::PlayVideo(MediaStream* aStream)
 {
   MOZ_ASSERT(mRealtime, "Should only attempt to play video in realtime mode");
 
   if (aStream->mVideoOutputs.IsEmpty())
     return;
 
-  // Display the next frame a bit early. This is better than letting the current
-  // frame be displayed for too long. Because depending on the GraphDriver in
-  // use, we can't really estimate the graph interval duration, we clamp it to
-  // the current state computed time.
-  GraphTime framePosition = IterationEnd() + MillisecondsToMediaTime(CurrentDriver()->IterationDuration());
-  if (framePosition > CurrentDriver()->StateComputedTime()) {
-#ifdef DEBUG
-    if (std::abs(framePosition - CurrentDriver()->StateComputedTime()) >= MillisecondsToMediaTime(5)) {
-      STREAM_LOG(LogLevel::Debug, ("Graph thread slowdown?"));
-    }
-#endif
-    framePosition = CurrentDriver()->StateComputedTime();
-  }
-  MOZ_ASSERT(framePosition >= aStream->mBufferStartTime, "frame position before buffer?");
-  StreamTime frameBufferTime = GraphTimeToStreamTime(aStream, framePosition);
-
+  TimeStamp currentTimeStamp = CurrentDriver()->GetCurrentTimeStamp();
+
+  // Collect any new frames produced in this iteration.
+  nsAutoTArray<ImageContainer::NonOwningImage,4> newImages;
+  nsRefPtr<Image> blackImage;
+
+  MOZ_ASSERT(mProcessedTime >= aStream->mBufferStartTime, "frame position before buffer?");
+  StreamTime frameBufferTime = GraphTimeToStreamTime(aStream, mProcessedTime);
+  StreamTime bufferEndTime = GraphTimeToStreamTime(aStream, mStateComputedTime);
   StreamTime start;
-  const VideoFrame* frame = nullptr;
-  for (StreamBuffer::TrackIter tracks(aStream->GetStreamBuffer(), MediaSegment::VIDEO);
-       !tracks.IsEnded(); tracks.Next()) {
-    VideoSegment* segment = tracks->Get<VideoSegment>();
-    StreamTime thisStart;
-    const VideoFrame* thisFrame =
-        segment->GetFrameAt(frameBufferTime, &thisStart);
-    if (thisFrame && thisFrame->GetImage()) {
-      start = thisStart;
-      frame = thisFrame;
+  const VideoChunk* chunk;
+  for ( ;
+       frameBufferTime < bufferEndTime;
+       frameBufferTime = start + chunk->GetDuration()) {
+    // Pick the last track that has a video chunk for the time, and
+    // schedule its frame.
+    chunk = nullptr;
+    for (StreamBuffer::TrackIter tracks(aStream->GetStreamBuffer(),
+                                        MediaSegment::VIDEO);
+         !tracks.IsEnded();
+         tracks.Next()) {
+      VideoSegment* segment = tracks->Get<VideoSegment>();
+      StreamTime thisStart;
+      const VideoChunk* thisChunk =
+        segment->FindChunkContaining(frameBufferTime, &thisStart);
+      if (thisChunk && thisChunk->mFrame.GetImage()) {
+        start = thisStart;
+        chunk = thisChunk;
+      }
+    }
+    if (!chunk)
+      break;
+
+    const VideoFrame* frame = &chunk->mFrame;
+    if (*frame == aStream->mLastPlayedVideoFrame) {
+      continue;
     }
+
+    Image* image = frame->GetImage();
+    STREAM_LOG(LogLevel::Verbose,
+               ("MediaStream %p writing video frame %p (%dx%d)",
+                aStream, image, frame->GetIntrinsicSize().width,
+                frame->GetIntrinsicSize().height));
+    // Schedule this frame after the previous frame finishes, instead of at
+    // its start time.  These times only differ in the case of multiple
+    // tracks.
+    GraphTime frameTime =
+      StreamTimeToGraphTime(aStream, frameBufferTime,
+                            INCLUDE_TRAILING_BLOCKED_INTERVAL);
+    TimeStamp targetTime = currentTimeStamp +
+      TimeDuration::FromSeconds(MediaTimeToSeconds(frameTime - IterationEnd()));
+
+    if (frame->GetForceBlack()) {
+      if (!blackImage) {
+        blackImage = aStream->mVideoOutputs[0]->
+          GetImageContainer()->CreateImage(ImageFormat::PLANAR_YCBCR);
+        if (blackImage) {
+          // Sets the image to a single black pixel, which will be scaled to
+          // fill the rendered size.
+          SetImageToBlackPixel(static_cast<PlanarYCbCrImage*>
+                               (blackImage.get()));
+        }
+      }
+      if (blackImage) {
+        image = blackImage;
+      }
+    }
+    newImages.AppendElement(ImageContainer::NonOwningImage(image, targetTime));
+
+    aStream->mLastPlayedVideoFrame = *frame;
   }
-  if (!frame || *frame == aStream->mLastPlayedVideoFrame)
+
+  if (!aStream->mLastPlayedVideoFrame.GetImage())
     return;
 
-  STREAM_LOG(LogLevel::Verbose, ("MediaStream %p writing video frame %p (%dx%d)",
-                              aStream, frame->GetImage(), frame->GetIntrinsicSize().width,
-                              frame->GetIntrinsicSize().height));
-  GraphTime startTime = StreamTimeToGraphTime(aStream,
-      start, INCLUDE_TRAILING_BLOCKED_INTERVAL);
-  TimeStamp targetTime = CurrentDriver()->GetCurrentTimeStamp() +
-      TimeDuration::FromMilliseconds(double(startTime - IterationEnd()));
+  nsAutoTArray<ImageContainer::NonOwningImage,4> images;
+  bool haveMultipleImages = false;
+
   for (uint32_t i = 0; i < aStream->mVideoOutputs.Length(); ++i) {
     VideoFrameContainer* output = aStream->mVideoOutputs[i];
 
-    if (frame->GetForceBlack()) {
-      nsRefPtr<Image> image =
-        output->GetImageContainer()->CreateImage(ImageFormat::PLANAR_YCBCR);
-      if (image) {
-        // Sets the image to a single black pixel, which will be scaled to fill
-        // the rendered size.
-        SetImageToBlackPixel(static_cast<PlanarYCbCrImage*>(image.get()));
+    // Find previous frames that may still be valid.
+    nsAutoTArray<ImageContainer::OwningImage,4> previousImages;
+    output->GetImageContainer()->GetCurrentImages(&previousImages);
+    uint32_t j = previousImages.Length();
+    if (j) {
+      // Re-use the most recent frame before currentTimeStamp and subsequent,
+      // always keeping at least one frame.
+      do {
+        --j;
+      } while (j > 0 && previousImages[j].mTimeStamp > currentTimeStamp);
+    }
+    if (previousImages.Length() - j + newImages.Length() > 1) {
+      haveMultipleImages = true;
+    }
+
+    // Don't update if there are no changes.
+    if (j == 0 && newImages.IsEmpty())
+      continue;
+
+    for ( ; j < previousImages.Length(); ++j) {
+      const auto& image = previousImages[j];
+      // Cope with potential clock skew with AudioCallbackDriver.
+      if (newImages.Length() && image.mTimeStamp > newImages[0].mTimeStamp) {
+        STREAM_LOG(LogLevel::Warning,
+                   ("Dropping %u video frames due to clock skew",
+                    unsigned(previousImages.Length() - j)));
+        break;
       }
-      output->SetCurrentFrame(frame->GetIntrinsicSize(), image,
-                              targetTime);
-    } else {
-      output->SetCurrentFrame(frame->GetIntrinsicSize(), frame->GetImage(),
-                              targetTime);
+
+      images.AppendElement(ImageContainer::
+                           NonOwningImage(image.mImage,
+                                          image.mTimeStamp, image.mFrameID));
     }
 
+    // Add the frames from this iteration.
+    for (auto& image : newImages) {
+      image.mFrameID = output->NewFrameID();
+      images.AppendElement(image);
+    }
+    output->SetCurrentFrames(aStream->mLastPlayedVideoFrame.GetIntrinsicSize(),
+                             images);
+
     nsCOMPtr<nsIRunnable> event =
       new VideoFrameContainerInvalidateRunnable(output);
     DispatchToMainThreadAfterStreamStateUpdate(event.forget());
+
+    images.ClearAndRetainStorage();
   }
-  if (!aStream->mNotifiedFinished) {
-    aStream->mLastPlayedVideoFrame = *frame;
+
+  // If the stream has finished and the timestamps of all frames have expired
+  // then no more updates are required.
+  if (aStream->mFinished && !haveMultipleImages) {
+    aStream->mLastPlayedVideoFrame.SetNull();
   }
 }
 
 bool
 MediaStreamGraphImpl::ShouldUpdateMainThread()
 {
   if (mRealtime) {
     return true;
@@ -1216,17 +1268,17 @@ MediaStreamGraphImpl::PrepareUpdatesToMa
     for (uint32_t i = 0; i < mStreams.Length(); ++i) {
       MediaStream* stream = mStreams[i];
       if (!stream->MainThreadNeedsUpdates()) {
         continue;
       }
       StreamUpdate* update = mStreamUpdates.AppendElement();
       update->mStream = stream;
       update->mNextMainThreadCurrentTime =
-        GraphTimeToStreamTime(stream, IterationEnd());
+        GraphTimeToStreamTime(stream, mProcessedTime);
       update->mNextMainThreadFinished = stream->mNotifiedFinished;
     }
     if (!mPendingUpdateRunnables.IsEmpty()) {
       mUpdateRunnables.AppendElements(Move(mPendingUpdateRunnables));
     }
   }
 
   // Don't send the message to the main thread if it's not going to have
@@ -1311,23 +1363,23 @@ MediaStreamGraphImpl::UpdateGraph(GraphT
     SourceMediaStream* is = mStreams[i]->AsSourceStream();
     if (is) {
       UpdateConsumptionState(is);
       ExtractPendingInput(is, aEndBlockingDecision, &ensureNextIteration);
     }
   }
 
   // The loop is woken up so soon that IterationEnd() barely advances and we
-  // end up having aEndBlockingDecision == CurrentDriver()->StateComputedTime().
+  // end up having aEndBlockingDecision == mStateComputedTime.
   // Since stream blocking is computed in the interval of
-  // [CurrentDriver()->StateComputedTime(), aEndBlockingDecision), it won't be computed at all.
+  // [mStateComputedTime, aEndBlockingDecision), it won't be computed at all.
   // We should ensure next iteration so that pending blocking changes will be
   // computed in next loop.
   if (ensureNextIteration ||
-      aEndBlockingDecision == CurrentDriver()->StateComputedTime()) {
+      aEndBlockingDecision == mStateComputedTime) {
     EnsureNextIteration();
   }
 
   // Figure out which streams are blocked and when.
   RecomputeBlocking(aEndBlockingDecision);
 }
 
 void
@@ -1413,18 +1465,17 @@ MediaStreamGraphImpl::Process(GraphTime 
   }
 
   if (!allBlockedForever) {
     EnsureNextIteration();
   }
 }
 
 bool
-MediaStreamGraphImpl::OneIteration(GraphTime aFrom, GraphTime aTo,
-                                   GraphTime aStateFrom, GraphTime aStateEnd)
+MediaStreamGraphImpl::OneIteration(GraphTime aStateEnd)
 {
   {
     MonitorAutoLock lock(mMemoryReportMonitor);
     if (mNeedsMemoryReport) {
       mNeedsMemoryReport = false;
 
       for (uint32_t i = 0; i < mStreams.Length(); ++i) {
         AudioNodeStream* stream = mStreams[i]->AsAudioNodeStream();
@@ -1434,22 +1485,24 @@ MediaStreamGraphImpl::OneIteration(Graph
           mAudioStreamSizes.AppendElement(usage);
         }
       }
 
       lock.Notify();
     }
   }
 
-  UpdateCurrentTimeForStreams(aFrom, aTo);
-
+  GraphTime stateFrom = mStateComputedTime;
   GraphTime stateEnd = std::min(aStateEnd, mEndTime);
   UpdateGraph(stateEnd);
 
-  Process(aStateFrom, stateEnd);
+  Process(stateFrom, stateEnd);
+  mProcessedTime = stateEnd;
+
+  UpdateCurrentTimeForStreams(stateFrom, stateEnd);
 
   // Send updates to the main thread and wait for the next control loop
   // iteration.
   {
     MonitorAutoLock lock(mMonitor);
     bool finalUpdate = mForceShutDown ||
       (stateEnd >= mEndTime && AllFinishedStreamsNotified()) ||
       (IsEmpty() && mBackMessageQueue.IsEmpty());
@@ -1584,17 +1637,16 @@ private:
  * Control messages forwarded from main thread to graph manager thread
  */
 class CreateMessage : public ControlMessage {
 public:
   explicit CreateMessage(MediaStream* aStream) : ControlMessage(aStream) {}
   virtual void Run() override
   {
     mStream->GraphImpl()->AddStream(mStream);
-    mStream->Init();
   }
   virtual void RunDuringShutdown() override
   {
     // Make sure to run this message during shutdown too, to make sure
     // that we balance the number of streams registered with the graph
     // as they're destroyed during shutdown.
     Run();
   }
@@ -1893,25 +1945,16 @@ MediaStream::SizeOfExcludingThis(MallocS
 }
 
 size_t
 MediaStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
 {
   return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
 }
 
-void
-MediaStream::Init()
-{
-  MediaStreamGraphImpl* graph = GraphImpl();
-  mBlocked.SetAtAndAfter(graph->IterationEnd(), true);
-  mExplicitBlockerCount.SetAtAndAfter(graph->IterationEnd(), true);
-  mExplicitBlockerCount.SetAtAndAfter(graph->CurrentDriver()->StateComputedTime(), false);
-}
-
 MediaStreamGraphImpl*
 MediaStream::GraphImpl()
 {
   return mGraph;
 }
 
 MediaStreamGraph*
 MediaStream::Graph()
@@ -2134,17 +2177,17 @@ MediaStream::ChangeExplicitBlockerCount(
 {
   class Message : public ControlMessage {
   public:
     Message(MediaStream* aStream, int32_t aDelta) :
       ControlMessage(aStream), mDelta(aDelta) {}
     virtual void Run()
     {
       mStream->ChangeExplicitBlockerCountImpl(
-          mStream->GraphImpl()->CurrentDriver()->StateComputedTime(), mDelta);
+          mStream->GraphImpl()->mStateComputedTime, mDelta);
     }
     int32_t mDelta;
   };
 
   // This can happen if this method has been called asynchronously, and the
   // stream has been destroyed since then.
   if (mMainThreadDestroyed) {
     return;
@@ -2157,17 +2200,17 @@ MediaStream::BlockStreamIfNeeded()
 {
   class Message : public ControlMessage {
   public:
     explicit Message(MediaStream* aStream) : ControlMessage(aStream)
     { }
     virtual void Run()
     {
       mStream->BlockStreamIfNeededImpl(
-          mStream->GraphImpl()->CurrentDriver()->StateComputedTime());
+          mStream->GraphImpl()->mStateComputedTime);
     }
   };
 
   if (mMainThreadDestroyed) {
     return;
   }
   GraphImpl()->AppendMessage(new Message(this));
 }
@@ -2177,17 +2220,17 @@ MediaStream::UnblockStreamIfNeeded()
 {
   class Message : public ControlMessage {
   public:
     explicit Message(MediaStream* aStream) : ControlMessage(aStream)
     { }
     virtual void Run()
     {
       mStream->UnblockStreamIfNeededImpl(
-          mStream->GraphImpl()->CurrentDriver()->StateComputedTime());
+          mStream->GraphImpl()->mStateComputedTime);
     }
   };
 
   if (mMainThreadDestroyed) {
     return;
   }
   GraphImpl()->AppendMessage(new Message(this));
 }
@@ -2630,17 +2673,17 @@ SourceMediaStream::EndAllTrackAndFinish(
 StreamTime
 SourceMediaStream::GetBufferedTicks(TrackID aID)
 {
   StreamBuffer::Track* track  = mBuffer.FindTrack(aID);
   if (track) {
     MediaSegment* segment = track->GetSegment();
     if (segment) {
       return segment->GetDuration() -
-          GraphTimeToStreamTime(GraphImpl()->CurrentDriver()->StateComputedTime());
+          GraphTimeToStreamTime(GraphImpl()->mStateComputedTime);
     }
   }
   return 0;
 }
 
 void
 SourceMediaStream::RegisterForAudioMixing()
 {
@@ -3254,17 +3297,17 @@ MediaStreamGraphImpl::MoveStreams(AudioC
       from.RemoveElementAt(i);
       to.AppendElement(stream);
     }
 
     // If streams got added during a period where an AudioContext was suspended,
     // set their buffer start time to the appropriate value now:
     if (aAudioContextOperation == AudioContextOperation::Resume &&
         stream->mBufferStartTime == START_TIME_DELAYED) {
-      stream->mBufferStartTime = IterationEnd();
+      stream->mBufferStartTime = mProcessedTime;
     }
 
     stream->remove();
   }
   STREAM_LOG(LogLevel::Debug, ("Moving streams between suspended and running"
       "state: mStreams: %d, mSuspendedStreams: %d\n", mStreams.Length(),
       mSuspendedStreams.Length()));
 #ifdef DEBUG
@@ -3440,17 +3483,17 @@ MediaStreamGraph::StartNonRealtimeProces
 
   MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
   NS_ASSERTION(!graph->mRealtime, "non-realtime only");
 
   if (graph->mNonRealtimeProcessing)
     return;
 
   graph->mEndTime =
-    graph->RoundUpToNextAudioBlock(graph->CurrentDriver()->StateComputedTime() +
+    graph->RoundUpToNextAudioBlock(graph->mStateComputedTime +
                                    aTicksToProcess - 1);
   graph->mNonRealtimeProcessing = true;
   graph->EnsureRunInStableState();
 }
 
 void
 ProcessedMediaStream::AddInput(MediaInputPort* aPort)
 {
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -428,18 +428,16 @@ public:
   friend class MediaInputPort;
   friend class AudioNodeExternalInputStream;
 
   virtual SourceMediaStream* AsSourceStream() { return nullptr; }
   virtual ProcessedMediaStream* AsProcessedStream() { return nullptr; }
   virtual AudioNodeStream* AsAudioNodeStream() { return nullptr; }
   virtual CameraPreviewMediaStream* AsCameraPreviewStream() { return nullptr; }
 
-  // media graph thread only
-  void Init();
   // These Impl methods perform the core functionality of the control methods
   // above, on the media graph thread.
   /**
    * Stop all stream activity and disconnect it from all inputs and outputs.
    * This must be idempotent.
    */
   virtual void DestroyImpl();
   StreamTime GetBufferEnd() { return mBuffer.GetEnd(); }
@@ -627,18 +625,18 @@ protected:
   // Client-set volume of this stream
   struct AudioOutput {
     explicit AudioOutput(void* aKey) : mKey(aKey), mVolume(1.0f) {}
     void* mKey;
     float mVolume;
   };
   nsTArray<AudioOutput> mAudioOutputs;
   nsTArray<nsRefPtr<VideoFrameContainer> > mVideoOutputs;
-  // We record the last played video frame to avoid redundant setting
-  // of the current video frame.
+  // We record the last played video frame to avoid playing the frame again
+  // with a different frame id.
   VideoFrame mLastPlayedVideoFrame;
   // The number of times this stream has been explicitly blocked by the control
   // API, minus the number of times it has been explicitly unblocked.
   TimeVarying<GraphTime,uint32_t,0> mExplicitBlockerCount;
   nsTArray<nsRefPtr<MediaStreamListener> > mListeners;
   nsTArray<MainThreadMediaStreamListener*> mMainThreadListeners;
   nsRefPtr<nsRunnable> mNotificationMainThreadRunnable;
   nsTArray<TrackID> mDisabledTrackIDs;
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -49,18 +49,18 @@ public:
     MOZ_COUNT_CTOR(ControlMessage);
   }
   // All these run on the graph thread
   virtual ~ControlMessage()
   {
     MOZ_COUNT_DTOR(ControlMessage);
   }
   // Do the action of this message on the MediaStreamGraph thread. Any actions
-  // affecting graph processing should take effect at mStateComputedTime.
-  // All stream data for times < mStateComputedTime has already been
+  // affecting graph processing should take effect at mProcessedTime.
+  // All stream data for times < mProcessedTime has already been
   // computed.
   virtual void Run() = 0;
   // When we're shutting down the application, most messages are ignored but
   // some cleanup messages should still be processed (on the main thread).
   // This must not add new control messages to the graph.
   virtual void RunDuringShutdown() {}
   MediaStream* GetStream() { return mStream; }
 
@@ -168,18 +168,17 @@ public:
 #endif
   }
   /*
    * This does the actual iteration: Message processing, MediaStream ordering,
    * blocking computation and processing.
    */
   void DoIteration();
 
-  bool OneIteration(GraphTime aFrom, GraphTime aTo,
-                    GraphTime aStateFrom, GraphTime aStateEnd);
+  bool OneIteration(GraphTime aStateEnd);
 
   bool Running() const
   {
     mMonitor.AssertCurrentThreadOwns();
     return mLifecycleState == LIFECYCLE_RUNNING;
   }
 
   // Get the message queue, from the current GraphDriver thread.
@@ -443,17 +442,18 @@ public:
     mStreamOrderDirty = true;
   }
 
   // Always stereo for now.
   uint32_t AudioChannelCount() const { return 2; }
 
   double MediaTimeToSeconds(GraphTime aTime) const
   {
-    NS_ASSERTION(0 <= aTime && aTime <= STREAM_TIME_MAX, "Bad time");
+    NS_ASSERTION(aTime > -STREAM_TIME_MAX && aTime <= STREAM_TIME_MAX,
+                 "Bad time");
     return static_cast<double>(aTime)/GraphRate();
   }
 
   GraphTime SecondsToMediaTime(double aS) const
   {
     NS_ASSERTION(0 <= aS && aS <= TRACK_TICKS_MAX/TRACK_RATE_MAX,
                  "Bad seconds");
     return GraphRate() * aS;
@@ -557,16 +557,27 @@ public:
   nsTArray<MediaStream*> mSuspendedStreams;
   /**
    * Streams from mFirstCycleBreaker to the end of mStreams produce output
    * before they receive input.  They correspond to DelayNodes that are in
    * cycles.
    */
   uint32_t mFirstCycleBreaker;
   /**
+   * Blocking decisions have been computed up to this time.
+   * Between each iteration, this is the same as mProcessedTime.
+   */
+  GraphTime mStateComputedTime = 0;
+  /**
+   * All stream contents have been computed up to this time.
+   * The next batch of updates from the main thread will be processed
+   * at this time.  This is behind mStateComputedTime during processing.
+   */
+  GraphTime mProcessedTime = 0;
+  /**
    * Date of the last time we updated the main thread with the graph state.
    */
   TimeStamp mLastMainThreadUpdate;
   /**
    * Number of active MediaInputPorts
    */
   int32_t mPortCount;
 
--- a/dom/media/VideoFrameContainer.h
+++ b/dom/media/VideoFrameContainer.h
@@ -51,16 +51,25 @@ public:
     SetCurrentFrames(aIntrinsicSize, nsTArray<ImageContainer::NonOwningImage>());
   }
 
   void ClearCurrentFrame();
   // Time in seconds by which the last painted video frame was late by.
   // E.g. if the last painted frame should have been painted at time t,
   // but was actually painted at t+n, this returns n in seconds. Threadsafe.
   double GetFrameDelay();
+
+  // Returns a new frame ID for SetCurrentFrames().  The client must either
+  // call this on only one thread or provide barriers.  Do not use together
+  // with SetCurrentFrame().
+  ImageContainer::FrameID NewFrameID()
+  {
+    return ++mFrameID;
+  }
+
   // Call on main thread
   enum {
     INVALIDATE_DEFAULT,
     INVALIDATE_FORCE
   };
   void Invalidate() { InvalidateWithFlags(INVALIDATE_DEFAULT); }
   B2G_ACL_EXPORT void InvalidateWithFlags(uint32_t aFlags);
   B2G_ACL_EXPORT ImageContainer* GetImageContainer();
@@ -78,18 +87,18 @@ protected:
   // mMutex protects all the fields below.
   Mutex mMutex;
   // The intrinsic size is the ideal size which we should render the
   // ImageContainer's current Image at.
   // This can differ from the Image's actual size when the media resource
   // specifies that the Image should be stretched to have the correct aspect
   // ratio.
   gfxIntSize mIntrinsicSize;
-  // For SetCurrentFrame callers we maintain our own mFrameID which is auto-
-  // incremented at every SetCurrentFrame.
+  // We maintain our own mFrameID which is auto-incremented at every
+  // SetCurrentFrame() or NewFrameID() call.
   ImageContainer::FrameID mFrameID;
   // True when the intrinsic size has been changed by SetCurrentFrame() since
   // the last call to Invalidate().
   // The next call to Invalidate() will recalculate
   // and update the intrinsic size on the element, request a frame reflow and
   // then reset this flag.
   bool mIntrinsicSizeChanged;
   // True when the Image size has changed since the last time Invalidate() was
--- a/dom/media/VideoSegment.h
+++ b/dom/media/VideoSegment.h
@@ -112,24 +112,16 @@ public:
 
   VideoSegment();
   ~VideoSegment();
 
   void AppendFrame(already_AddRefed<Image>&& aImage,
                    StreamTime aDuration,
                    const IntSize& aIntrinsicSize,
                    bool aForceBlack = false);
-  const VideoFrame* GetFrameAt(StreamTime aOffset, StreamTime* aStart = nullptr)
-  {
-    VideoChunk* c = FindChunkContaining(aOffset, aStart);
-    if (!c) {
-      return nullptr;
-    }
-    return &c->mFrame;
-  }
   const VideoFrame* GetLastFrame(StreamTime* aStart = nullptr)
   {
     VideoChunk* c = GetLastChunk();
     if (!c) {
       return nullptr;
     }
     if (aStart) {
       *aStart = mDuration - c->mDuration;
--- a/dom/media/fmp4/MP4Decoder.cpp
+++ b/dom/media/fmp4/MP4Decoder.cpp
@@ -278,24 +278,25 @@ CreateTestH264Decoder(layers::LayersBack
   if (!decoder) {
     return nullptr;
   }
 
   return decoder.forget();
 }
 
 /* static */ bool
-MP4Decoder::IsVideoAccelerated(layers::LayersBackend aBackend)
+MP4Decoder::IsVideoAccelerated(layers::LayersBackend aBackend, nsACString& aFailureReason)
 {
   VideoInfo config;
   nsRefPtr<MediaDataDecoder> decoder(CreateTestH264Decoder(aBackend, config));
   if (!decoder) {
+    aFailureReason.AssignLiteral("Failed to create H264 decoder");
     return false;
   }
-  bool result = decoder->IsHardwareAccelerated();
+  bool result = decoder->IsHardwareAccelerated(aFailureReason);
   return result;
 }
 
 /* static */ bool
 MP4Decoder::CanCreateH264Decoder()
 {
 #ifdef XP_WIN
   static bool haveCachedResult = false;
--- a/dom/media/fmp4/MP4Decoder.h
+++ b/dom/media/fmp4/MP4Decoder.h
@@ -37,16 +37,16 @@ public:
                                  const nsAString& aCodecs,
                                  bool& aOutContainsAAC,
                                  bool& aOutContainsH264,
                                  bool& aOutContainsMP3);
 
   // Returns true if the MP4 backend is preffed on.
   static bool IsEnabled();
 
-  static bool IsVideoAccelerated(layers::LayersBackend aBackend);
+  static bool IsVideoAccelerated(layers::LayersBackend aBackend, nsACString& aReason);
   static bool CanCreateAACDecoder();
   static bool CanCreateH264Decoder();
 };
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/platforms/PlatformDecoderModule.h
+++ b/dom/media/platforms/PlatformDecoderModule.h
@@ -258,17 +258,17 @@ public:
   // The reader will delete the decoder once Shutdown() returns.
   // The MediaDataDecoderCallback *must* not be called after Shutdown() has
   // returned.
   virtual nsresult Shutdown() = 0;
 
   // Called from the state machine task queue or main thread.
   // Decoder needs to decide whether or not hardware accelearation is supported
   // after creating. It doesn't need to call Init() before calling this function.
-  virtual bool IsHardwareAccelerated() const { return false; }
+  virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const { return false; }
 
   // ConfigurationChanged will be called to inform the video or audio decoder
   // that the format of the next input sample is about to change.
   // If video decoder, aConfig will be a VideoInfo object.
   // If audio decoder, aConfig will be a AudioInfo object.
   virtual nsresult ConfigurationChanged(const TrackInfo& aConfig)
   {
     return NS_OK;
--- a/dom/media/platforms/SharedDecoderManager.cpp
+++ b/dom/media/platforms/SharedDecoderManager.cpp
@@ -294,14 +294,14 @@ SharedDecoderProxy::Drain()
 nsresult
 SharedDecoderProxy::Shutdown()
 {
   mManager->SetIdle(this);
   return NS_OK;
 }
 
 bool
-SharedDecoderProxy::IsHardwareAccelerated() const
+SharedDecoderProxy::IsHardwareAccelerated(nsACString& aFailureReason) const
 {
-  return mManager->mDecoder->IsHardwareAccelerated();
+  return mManager->mDecoder->IsHardwareAccelerated(aFailureReason);
 }
 
 } // namespace mozilla
--- a/dom/media/platforms/SharedDecoderManager.h
+++ b/dom/media/platforms/SharedDecoderManager.h
@@ -74,17 +74,17 @@ public:
                      MediaDataDecoderCallback* aCallback);
   virtual ~SharedDecoderProxy();
 
   virtual nsRefPtr<MediaDataDecoder::InitPromise> Init() override;
   virtual nsresult Input(MediaRawData* aSample) override;
   virtual nsresult Flush() override;
   virtual nsresult Drain() override;
   virtual nsresult Shutdown() override;
-  virtual bool IsHardwareAccelerated() const override;
+  virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
 
   friend class SharedDecoderManager;
 
 private:
   nsRefPtr<SharedDecoderManager> mManager;
   MediaDataDecoderCallback* mCallback;
 };
 
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -24,17 +24,16 @@ extern PRLogModuleInfo* gMediaDecoderLog
 
 VPXDecoder::VPXDecoder(const VideoInfo& aConfig,
                        ImageContainer* aImageContainer,
                        FlushableTaskQueue* aTaskQueue,
                        MediaDataDecoderCallback* aCallback)
   : mImageContainer(aImageContainer)
   , mTaskQueue(aTaskQueue)
   , mCallback(aCallback)
-  , mIter(nullptr)
   , mInfo(aConfig)
 {
   MOZ_COUNT_CTOR(VPXDecoder);
   if (aConfig.mMimeType.EqualsLiteral("video/webm; codecs=vp8")) {
     mCodec = Codec::VP8;
   } else if (aConfig.mMimeType.EqualsLiteral("video/webm; codecs=vp9")) {
     mCodec = Codec::VP9;
   } else {
@@ -69,17 +68,16 @@ VPXDecoder::Init()
   }
   return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
 }
 
 nsresult
 VPXDecoder::Flush()
 {
   mTaskQueue->Flush();
-  mIter = nullptr;
   return NS_OK;
 }
 
 int
 VPXDecoder::DoDecodeFrame(MediaRawData* aSample)
 {
 #if defined(DEBUG)
   vpx_codec_stream_info_t si;
@@ -94,19 +92,20 @@ VPXDecoder::DoDecodeFrame(MediaRawData* 
                "VPX Decode Keyframe error sample->mKeyframe and si.si_kf out of sync");
 #endif
 
   if (vpx_codec_err_t r = vpx_codec_decode(&mVPX, aSample->Data(), aSample->Size(), nullptr, 0)) {
     LOG("VPX Decode error: %s", vpx_codec_err_to_string(r));
     return -1;
   }
 
+  vpx_codec_iter_t  iter = nullptr;
   vpx_image_t      *img;
 
-  if ((img = vpx_codec_get_frame(&mVPX, &mIter))) {
+  while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
     NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420, "WebM image format not I420");
 
     // Chroma shifts are rounded down as per the decoding examples in the SDK
     VideoData::YCbCrBuffer b;
     b.mPlanes[0].mData = img->planes[0];
     b.mPlanes[0].mStride = img->stride[0];
     b.mPlanes[0].mHeight = img->d_h;
     b.mPlanes[0].mWidth = img->d_w;
@@ -138,19 +137,17 @@ VPXDecoder::DoDecodeFrame(MediaRawData* 
 
     if (!v) {
       LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
           img->d_w, img->d_h, mInfo.mDisplay.width, mInfo.mDisplay.height,
           mInfo.mImage.width, mInfo.mImage.height);
       return -1;
     }
     mCallback->Output(v);
-    return 1;
   }
-  mIter = nullptr;
   return 0;
 }
 
 void
 VPXDecoder::DecodeFrame(MediaRawData* aSample)
 {
   if (DoDecodeFrame(aSample) == -1) {
     mCallback->Error();
--- a/dom/media/platforms/agnostic/VPXDecoder.h
+++ b/dom/media/platforms/agnostic/VPXDecoder.h
@@ -49,17 +49,16 @@ private:
   void OutputDelayedFrames ();
 
   nsRefPtr<ImageContainer> mImageContainer;
   RefPtr<FlushableTaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
 
   // VPx decoder state
   vpx_codec_ctx_t mVPX;
-  vpx_codec_iter_t mIter;
 
   const VideoInfo& mInfo;
 
   int mCodec;
 };
 
 } // namespace mozilla
 
--- a/dom/media/platforms/apple/AppleVDADecoder.h
+++ b/dom/media/platforms/apple/AppleVDADecoder.h
@@ -71,17 +71,17 @@ public:
                   MediaDataDecoderCallback* aCallback,
                   layers::ImageContainer* aImageContainer);
   virtual ~AppleVDADecoder();
   virtual nsRefPtr<InitPromise> Init() override;
   virtual nsresult Input(MediaRawData* aSample) override;
   virtual nsresult Flush() override;
   virtual nsresult Drain() override;
   virtual nsresult Shutdown() override;
-  virtual bool IsHardwareAccelerated() const override
+  virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const override
   {
     return true;
   }
 
   void DispatchOutputTask(already_AddRefed<nsIRunnable> aTask)
   {
     nsCOMPtr<nsIRunnable> task = aTask;
     if (mIsShutDown || mIsFlushing) {
--- a/dom/media/platforms/apple/AppleVTDecoder.h
+++ b/dom/media/platforms/apple/AppleVTDecoder.h
@@ -17,17 +17,17 @@ class AppleVTDecoder : public AppleVDADe
 public:
   AppleVTDecoder(const VideoInfo& aConfig,
                  FlushableTaskQueue* aVideoTaskQueue,
                  MediaDataDecoderCallback* aCallback,
                  layers::ImageContainer* aImageContainer);
   virtual ~AppleVTDecoder();
   virtual nsRefPtr<InitPromise> Init() override;
   virtual nsresult Input(MediaRawData* aSample) override;
-  virtual bool IsHardwareAccelerated() const override
+  virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const override
   {
     return mIsHardwareAccelerated;
   }
 
 protected:
   void ProcessFlush() override;
   void ProcessDrain() override;
   void ProcessShutdown() override;
--- a/dom/media/platforms/wmf/DXVA2Manager.cpp
+++ b/dom/media/platforms/wmf/DXVA2Manager.cpp
@@ -6,20 +6,22 @@
 
 #include "DXVA2Manager.h"
 #include <d3d11.h>
 #include "nsThreadUtils.h"
 #include "ImageContainer.h"
 #include "gfxWindowsPlatform.h"
 #include "D3D9SurfaceImage.h"
 #include "mozilla/layers/D3D11ShareHandleImage.h"
+#include "mozilla/layers/ImageBridgeChild.h"
 #include "mozilla/Preferences.h"
 #include "mfapi.h"
 #include "MFTDecoder.h"
 #include "DriverCrashGuard.h"
+#include "nsPrintfCString.h"
 
 const CLSID CLSID_VideoProcessorMFT =
 {
   0x88753b26,
   0x5b24,
   0x49bd,
   { 0xb2, 0xe7, 0xc, 0x44, 0x5c, 0x78, 0xc9, 0x82 }
 };
@@ -35,42 +37,150 @@ const GUID MF_XVP_PLAYBACK_MODE =
 DEFINE_GUID(MF_LOW_LATENCY,
   0x9c27891a, 0xed7a, 0x40e1, 0x88, 0xe8, 0xb2, 0x27, 0x27, 0xa0, 0x24, 0xee);
 
 namespace mozilla {
 
 using layers::Image;
 using layers::ImageContainer;
 using layers::D3D9SurfaceImage;
+using layers::D3D9RecycleAllocator;
 using layers::D3D11ShareHandleImage;
 
 class D3D9DXVA2Manager : public DXVA2Manager
 {
 public:
   D3D9DXVA2Manager();
   virtual ~D3D9DXVA2Manager();
 
-  HRESULT Init();
+  HRESULT Init(nsACString& aFailureReason);
 
   IUnknown* GetDXVADeviceManager() override;
 
   // Copies a region (aRegion) of the video frame stored in aVideoSample
   // into an image which is returned by aOutImage.
   HRESULT CopyToImage(IMFSample* aVideoSample,
                       const nsIntRect& aRegion,
                       ImageContainer* aContainer,
                       Image** aOutImage) override;
 
+  virtual bool SupportsConfig(IMFMediaType* aType) override;
+
 private:
   nsRefPtr<IDirect3D9Ex> mD3D9;
   nsRefPtr<IDirect3DDevice9Ex> mDevice;
   nsRefPtr<IDirect3DDeviceManager9> mDeviceManager;
+  RefPtr<D3D9RecycleAllocator> mTextureClientAllocator;
+  nsRefPtr<IDirectXVideoDecoderService> mDecoderService;
   UINT32 mResetToken;
 };
 
+void GetDXVA2ExtendedFormatFromMFMediaType(IMFMediaType *pType,
+                                           DXVA2_ExtendedFormat *pFormat)
+{
+  // Get the interlace mode.
+  MFVideoInterlaceMode interlace =
+    (MFVideoInterlaceMode)MFGetAttributeUINT32(pType, MF_MT_INTERLACE_MODE, MFVideoInterlace_Unknown);
+
+  if (interlace == MFVideoInterlace_MixedInterlaceOrProgressive) {
+    pFormat->SampleFormat = DXVA2_SampleFieldInterleavedEvenFirst;
+  } else {
+    pFormat->SampleFormat = (UINT)interlace;
+  }
+
+  pFormat->VideoChromaSubsampling =
+    MFGetAttributeUINT32(pType, MF_MT_VIDEO_CHROMA_SITING, MFVideoChromaSubsampling_Unknown);
+  pFormat->NominalRange =
+    MFGetAttributeUINT32(pType, MF_MT_VIDEO_NOMINAL_RANGE, MFNominalRange_Unknown);
+  pFormat->VideoTransferMatrix =
+    MFGetAttributeUINT32(pType, MF_MT_YUV_MATRIX, MFVideoTransferMatrix_Unknown);
+  pFormat->VideoLighting =
+    MFGetAttributeUINT32(pType, MF_MT_VIDEO_LIGHTING, MFVideoLighting_Unknown);
+  pFormat->VideoPrimaries =
+    MFGetAttributeUINT32(pType, MF_MT_VIDEO_PRIMARIES, MFVideoPrimaries_Unknown);
+  pFormat->VideoTransferFunction =
+    MFGetAttributeUINT32(pType, MF_MT_TRANSFER_FUNCTION, MFVideoTransFunc_Unknown);
+}
+
+HRESULT ConvertMFTypeToDXVAType(IMFMediaType *pType, DXVA2_VideoDesc *pDesc)
+{
+  ZeroMemory(pDesc, sizeof(*pDesc));
+
+  // The D3D format is the first DWORD of the subtype GUID.
+  GUID subtype = GUID_NULL;
+  HRESULT hr = pType->GetGUID(MF_MT_SUBTYPE, &subtype);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+  pDesc->Format = (D3DFORMAT)subtype.Data1;
+
+  UINT32 width = 0;
+  UINT32 height = 0;
+  hr = MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &width, &height);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+  pDesc->SampleWidth = width;
+  pDesc->SampleHeight = height;
+
+  UINT32 fpsNumerator = 0;
+  UINT32 fpsDenominator = 0;
+  hr = MFGetAttributeRatio(pType, MF_MT_FRAME_RATE, &fpsNumerator, &fpsDenominator);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+  pDesc->InputSampleFreq.Numerator = fpsNumerator;
+  pDesc->InputSampleFreq.Denominator = fpsDenominator;
+
+  GetDXVA2ExtendedFormatFromMFMediaType(pType, &pDesc->SampleFormat);
+  pDesc->OutputFrameFreq = pDesc->InputSampleFreq;
+  if ((pDesc->SampleFormat.SampleFormat == DXVA2_SampleFieldInterleavedEvenFirst) ||
+      (pDesc->SampleFormat.SampleFormat == DXVA2_SampleFieldInterleavedOddFirst)) {
+    pDesc->OutputFrameFreq.Numerator *= 2;
+  }
+
+  return S_OK;
+}
+
+static const GUID DXVA2_ModeH264_E = {
+  0x1b81be68, 0xa0c7, 0x11d3, { 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5 }
+};
+
+// This tests if a DXVA video decoder can be created for the given media type/resolution.
+// It uses the same decoder device (DXVA2_ModeH264_E - DXVA2_ModeH264_VLD_NoFGT) as the H264
+// decoder MFT provided by windows (CLSID_CMSH264DecoderMFT) uses, so we can use it to determine
+// if the MFT will use software fallback or not.
+bool
+D3D9DXVA2Manager::SupportsConfig(IMFMediaType* aType)
+{
+  DXVA2_VideoDesc desc;
+  HRESULT hr = ConvertMFTypeToDXVAType(aType, &desc);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+  UINT configCount;
+  DXVA2_ConfigPictureDecode* configs = nullptr;
+  hr = mDecoderService->GetDecoderConfigurations(DXVA2_ModeH264_E, &desc, nullptr, &configCount, &configs);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+  nsRefPtr<IDirect3DSurface9> surface;
+  hr = mDecoderService->CreateSurface(desc.SampleWidth, desc.SampleHeight, 0, (D3DFORMAT)MAKEFOURCC('N', 'V', '1', '2'),
+  D3DPOOL_DEFAULT, 0, DXVA2_VideoDecoderRenderTarget,
+  surface.StartAssignment(), NULL);
+  if (!SUCCEEDED(hr)) {
+    CoTaskMemFree(configs);
+    return false;
+  }
+
+  for (UINT i = 0; i < configCount; i++) {
+    nsRefPtr<IDirectXVideoDecoder> decoder;
+    IDirect3DSurface9* surfaces = surface;
+    hr = mDecoderService->CreateVideoDecoder(DXVA2_ModeH264_E, &desc, &configs[i], &surfaces, 1, decoder.StartAssignment());
+    if (SUCCEEDED(hr) && decoder) {
+      CoTaskMemFree(configs);
+      return true;
+    }
+  }
+  CoTaskMemFree(configs);
+  return false;
+}
+
 D3D9DXVA2Manager::D3D9DXVA2Manager()
   : mResetToken(0)
 {
   MOZ_COUNT_CTOR(D3D9DXVA2Manager);
   MOZ_ASSERT(NS_IsMainThread());
 }
 
 D3D9DXVA2Manager::~D3D9DXVA2Manager()
@@ -82,45 +192,50 @@ D3D9DXVA2Manager::~D3D9DXVA2Manager()
 IUnknown*
 D3D9DXVA2Manager::GetDXVADeviceManager()
 {
   MutexAutoLock lock(mLock);
   return mDeviceManager;
 }
 
 HRESULT
-D3D9DXVA2Manager::Init()
+D3D9DXVA2Manager::Init(nsACString& aFailureReason)
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   gfx::D3D9VideoCrashGuard crashGuard;
   if (crashGuard.Crashed()) {
     NS_WARNING("DXVA2D3D9 crash detected");
+    aFailureReason.AssignLiteral("DXVA2D3D9 crashes detected in the past");
     return E_FAIL;
   }
 
   // Create D3D9Ex.
   HMODULE d3d9lib = LoadLibraryW(L"d3d9.dll");
   NS_ENSURE_TRUE(d3d9lib, E_FAIL);
   decltype(Direct3DCreate9Ex)* d3d9Create =
     (decltype(Direct3DCreate9Ex)*) GetProcAddress(d3d9lib, "Direct3DCreate9Ex");
   nsRefPtr<IDirect3D9Ex> d3d9Ex;
   HRESULT hr = d3d9Create(D3D_SDK_VERSION, getter_AddRefs(d3d9Ex));
   if (!d3d9Ex) {
     NS_WARNING("Direct3DCreate9 failed");
+    aFailureReason.AssignLiteral("Direct3DCreate9 failed");
     return E_FAIL;
   }
 
   // Ensure we can do the YCbCr->RGB conversion in StretchRect.
   // Fail if we can't.
   hr = d3d9Ex->CheckDeviceFormatConversion(D3DADAPTER_DEFAULT,
                                            D3DDEVTYPE_HAL,
                                            (D3DFORMAT)MAKEFOURCC('N','V','1','2'),
                                            D3DFMT_X8R8G8B8);
-  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+  if (!SUCCEEDED(hr)) {
+    aFailureReason = nsPrintfCString("CheckDeviceFormatConversion failed with error %X", hr);
+    return hr;
+  }
 
   // Create D3D9DeviceEx.
   D3DPRESENT_PARAMETERS params = {0};
   params.BackBufferWidth = 1;
   params.BackBufferHeight = 1;
   params.BackBufferFormat = D3DFMT_UNKNOWN;
   params.BackBufferCount = 1;
   params.SwapEffect = D3DSWAPEFFECT_DISCARD;
@@ -133,42 +248,87 @@ D3D9DXVA2Manager::Init()
                               D3DDEVTYPE_HAL,
                               ::GetShellWindow(),
                               D3DCREATE_FPU_PRESERVE |
                               D3DCREATE_MULTITHREADED |
                               D3DCREATE_MIXED_VERTEXPROCESSING,
                               &params,
                               nullptr,
                               getter_AddRefs(device));
-  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+  if (!SUCCEEDED(hr)) {
+    aFailureReason = nsPrintfCString("CreateDeviceEx failed with error %X", hr);
+    return hr;
+  }
 
   // Ensure we can create queries to synchronize operations between devices.
   // Without this, when we make a copy of the frame in order to share it with
   // another device, we can't be sure that the copy has finished before the
   // other device starts using it.
   nsRefPtr<IDirect3DQuery9> query;
 
   hr = device->CreateQuery(D3DQUERYTYPE_EVENT, getter_AddRefs(query));
-  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+  if (!SUCCEEDED(hr)) {
+    aFailureReason = nsPrintfCString("CreateQuery failed with error %X", hr);
+    return hr;
+  }
 
   // Create and initialize IDirect3DDeviceManager9.
   UINT resetToken = 0;
   nsRefPtr<IDirect3DDeviceManager9> deviceManager;
 
   hr = wmf::DXVA2CreateDirect3DDeviceManager9(&resetToken,
                                               getter_AddRefs(deviceManager));
-  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+  if (!SUCCEEDED(hr)) {
+    aFailureReason = nsPrintfCString("DXVA2CreateDirect3DDeviceManager9 failed with error %X", hr);
+    return hr;
+  }
   hr = deviceManager->ResetDevice(device, resetToken);
+  if (!SUCCEEDED(hr)) {
+    aFailureReason = nsPrintfCString("IDirect3DDeviceManager9::ResetDevice failed with error %X", hr);
+    return hr;
+  }
+
+  HANDLE deviceHandle;
+  nsRefPtr<IDirectXVideoDecoderService> decoderService;
+  hr = deviceManager->OpenDeviceHandle(&deviceHandle);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
+  hr = deviceManager->GetVideoService(deviceHandle, IID_PPV_ARGS(decoderService.StartAssignment()));
+  deviceManager->CloseDeviceHandle(deviceHandle);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  UINT deviceCount;
+  GUID* decoderDevices = nullptr;
+  hr = decoderService->GetDecoderDeviceGuids(&deviceCount, &decoderDevices);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  bool found = false;
+  for (UINT i = 0; i < deviceCount; i++) {
+    if (decoderDevices[i] == DXVA2_ModeH264_E) {
+      found = true;
+      break;
+    }
+  }
+  CoTaskMemFree(decoderDevices);
+
+  if (!found) {
+    return E_FAIL;
+  }
+
+  mDecoderService = decoderService;
+
   mResetToken = resetToken;
   mD3D9 = d3d9Ex;
   mDevice = device;
   mDeviceManager = deviceManager;
 
+  mTextureClientAllocator = new D3D9RecycleAllocator(layers::ImageBridgeChild::GetSingleton(),
+                                                     mDevice);
+  mTextureClientAllocator->SetMaxPoolSize(5);
+
   return S_OK;
 }
 
 HRESULT
 D3D9DXVA2Manager::CopyToImage(IMFSample* aSample,
                               const nsIntRect& aRegion,
                               ImageContainer* aImageContainer,
                               Image** aOutImage)
@@ -185,59 +345,60 @@ D3D9DXVA2Manager::CopyToImage(IMFSample*
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   nsRefPtr<Image> image = aImageContainer->CreateImage(ImageFormat::D3D9_RGB32_TEXTURE);
   NS_ENSURE_TRUE(image, E_FAIL);
   NS_ASSERTION(image->GetFormat() == ImageFormat::D3D9_RGB32_TEXTURE,
                "Wrong format?");
 
   D3D9SurfaceImage* videoImage = static_cast<D3D9SurfaceImage*>(image.get());
-  hr = videoImage->SetData(D3D9SurfaceImage::Data(surface, aRegion));
+  hr = videoImage->SetData(D3D9SurfaceImage::Data(surface, aRegion, mTextureClientAllocator));
 
   image.forget(aOutImage);
 
   return S_OK;
 }
 
 // Count of the number of DXVAManager's we've created. This is also the
 // number of videos we're decoding with DXVA. Use on main thread only.
 static uint32_t sDXVAVideosCount = 0;
 
 /* static */
 DXVA2Manager*
-DXVA2Manager::CreateD3D9DXVA()
+DXVA2Manager::CreateD3D9DXVA(nsACString& aFailureReason)
 {
   MOZ_ASSERT(NS_IsMainThread());
   HRESULT hr;
 
   // DXVA processing takes up a lot of GPU resources, so limit the number of
   // videos we use DXVA with at any one time.
   const uint32_t dxvaLimit =
     Preferences::GetInt("media.windows-media-foundation.max-dxva-videos", 8);
   if (sDXVAVideosCount == dxvaLimit) {
+    aFailureReason.AssignLiteral("Too many DXVA videos playing");
     return nullptr;
   }
 
   nsAutoPtr<D3D9DXVA2Manager> d3d9Manager(new D3D9DXVA2Manager());
-  hr = d3d9Manager->Init();
+  hr = d3d9Manager->Init(aFailureReason);
   if (SUCCEEDED(hr)) {
     return d3d9Manager.forget();
   }
 
   // No hardware accelerated video decoding. :(
   return nullptr;
 }
 
 class D3D11DXVA2Manager : public DXVA2Manager
 {
 public:
   D3D11DXVA2Manager();
   virtual ~D3D11DXVA2Manager();
 
-  HRESULT Init();
+  HRESULT Init(nsACString& aFailureReason);
 
   IUnknown* GetDXVADeviceManager() override;
 
   // Copies a region (aRegion) of the video frame stored in aVideoSample
   // into an image which is returned by aOutImage.
   HRESULT CopyToImage(IMFSample* aVideoSample,
                       const nsIntRect& aRegion,
                       ImageContainer* aContainer,
@@ -276,38 +437,56 @@ D3D11DXVA2Manager::~D3D11DXVA2Manager()
 IUnknown*
 D3D11DXVA2Manager::GetDXVADeviceManager()
 {
   MutexAutoLock lock(mLock);
   return mDXGIDeviceManager;
 }
 
 HRESULT
-D3D11DXVA2Manager::Init()
+D3D11DXVA2Manager::Init(nsACString& aFailureReason)
 {
   HRESULT hr;
 
   mDevice = gfxWindowsPlatform::GetPlatform()->CreateD3D11DecoderDevice();
-  NS_ENSURE_TRUE(mDevice, E_FAIL);
+  if (!mDevice) {
+    aFailureReason.AssignLiteral("Failed to create D3D11 device for decoder");
+    return E_FAIL;
+  }
 
   mDevice->GetImmediateContext(byRef(mContext));
-  NS_ENSURE_TRUE(mContext, E_FAIL);
+  if (!mContext) {
+    aFailureReason.AssignLiteral("Failed to get immediate context for d3d11 device");
+    return E_FAIL;
+  }
 
   hr = wmf::MFCreateDXGIDeviceManager(&mDeviceManagerToken, byRef(mDXGIDeviceManager));
-  NS_ENSURE_TRUE(SUCCEEDED(hr),hr);
+  if (!SUCCEEDED(hr)) {
+    aFailureReason = nsPrintfCString("MFCreateDXGIDeviceManager failed with code %X", hr);
+    return hr;
+  }
 
   hr = mDXGIDeviceManager->ResetDevice(mDevice, mDeviceManagerToken);
-  NS_ENSURE_TRUE(SUCCEEDED(hr),hr);
+  if (!SUCCEEDED(hr)) {
+    aFailureReason = nsPrintfCString("IMFDXGIDeviceManager::ResetDevice failed with code %X", hr);
+    return hr;
+  }
 
   mTransform = new MFTDecoder();
   hr = mTransform->Create(CLSID_VideoProcessorMFT);
-  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+  if (!SUCCEEDED(hr)) {
+    aFailureReason = nsPrintfCString("MFTDecoder::Create(CLSID_VideoProcessorMFT) failed with code %X", hr);
+    return hr;
+  }
 
   hr = mTransform->SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER, ULONG_PTR(mDXGIDeviceManager.get()));
-  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+  if (!SUCCEEDED(hr)) {
+    aFailureReason = nsPrintfCString("MFTDecoder::SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER) failed with code %X", hr);
+    return hr;
+  }
 
   return S_OK;
 }
 
 HRESULT
 D3D11DXVA2Manager::CreateOutputSample(RefPtr<IMFSample>& aSample, RefPtr<ID3D11Texture2D>& aTexture)
 {
   RefPtr<IMFSample> sample;
@@ -453,28 +632,29 @@ D3D11DXVA2Manager::ConfigureForSize(uint
   hr = mTransform->SetMediaTypes(inputType, outputType, ConfigureOutput, &size);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   return S_OK;
 }
 
 /* static */
 DXVA2Manager*
-DXVA2Manager::CreateD3D11DXVA()
+DXVA2Manager::CreateD3D11DXVA(nsACString& aFailureReason)
 {
   // DXVA processing takes up a lot of GPU resources, so limit the number of
   // videos we use DXVA with at any one time.
   const uint32_t dxvaLimit =
     Preferences::GetInt("media.windows-media-foundation.max-dxva-videos", 8);
   if (sDXVAVideosCount == dxvaLimit) {
+    aFailureReason.AssignLiteral("Too many DXVA videos playing");
     return nullptr;
   }
 
   nsAutoPtr<D3D11DXVA2Manager> manager(new D3D11DXVA2Manager());
-  HRESULT hr = manager->Init();
+  HRESULT hr = manager->Init(aFailureReason);
   NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
 
   return manager.forget();
 }
 
 DXVA2Manager::DXVA2Manager()
   : mLock("DXVA2Manager")
 {
--- a/dom/media/platforms/wmf/DXVA2Manager.h
+++ b/dom/media/platforms/wmf/DXVA2Manager.h
@@ -18,18 +18,18 @@ class Image;
 class ImageContainer;
 }
 
 class DXVA2Manager {
 public:
 
   // Creates and initializes a DXVA2Manager. We can use DXVA2 via either
   // D3D9Ex or D3D11.
-  static DXVA2Manager* CreateD3D9DXVA();
-  static DXVA2Manager* CreateD3D11DXVA();
+  static DXVA2Manager* CreateD3D9DXVA(nsACString& aFailureReason);
+  static DXVA2Manager* CreateD3D11DXVA(nsACString& aFailureReason);
 
   // Returns a pointer to the D3D device manager responsible for managing the
   // device we're using for hardware accelerated video decoding. If we're using
   // D3D9Ex, this is an IDirect3DDeviceManager9. For D3D11 this is an
   // IMFDXGIDeviceManager. It is safe to call this on any thread.
   virtual IUnknown* GetDXVADeviceManager() = 0;
 
   // Creates an Image for the video frame stored in aVideoSample.
@@ -39,16 +39,18 @@ public:
                               layers::Image** aOutImage) = 0;
 
   virtual HRESULT ConfigureForSize(uint32_t aWidth, uint32_t aHeight) { return S_OK; }
 
   virtual bool IsD3D11() { return false; }
 
   virtual ~DXVA2Manager();
 
+  virtual bool SupportsConfig(IMFMediaType* aType) { return true; }
+
 protected:
   Mutex mLock;
   DXVA2Manager();
 };
 
 } // namespace mozilla
 
 #endif // DXVA2Manager_h_
--- a/dom/media/platforms/wmf/MFTDecoder.h
+++ b/dom/media/platforms/wmf/MFTDecoder.h
@@ -54,16 +54,21 @@ public:
   // Returns:
   //  - MF_E_NOTACCEPTING if the decoder can't accept input. The data
   //    must be resubmitted after Output() stops producing output.
   HRESULT Input(const uint8_t* aData,
                 uint32_t aDataSize,
                 int64_t aTimestampUsecs);
   HRESULT Input(IMFSample* aSample);
 
+  HRESULT CreateInputSample(const uint8_t* aData,
+                            uint32_t aDataSize,
+                            int64_t aTimestampUsecs,
+                            RefPtr<IMFSample>* aOutSample);
+
   // Retrieves output from the MFT. Call this once Input() returns
   // MF_E_NOTACCEPTING. Some MFTs with hardware acceleration (the H.264
   // decoder MFT in particular) can't handle it if clients hold onto
   // references to the output IMFSample, so don't do that.
   //
   // Returns:
   //  - MF_E_TRANSFORM_STREAM_CHANGE if the underlying stream output
   //    type changed. Retrieve the output media type and reconfig client,
@@ -75,24 +80,20 @@ public:
 
   // Sends a flush message to the MFT. This causes it to discard all
   // input data. Use before seeking.
   HRESULT Flush();
 
   // Sends a message to the MFT.
   HRESULT SendMFTMessage(MFT_MESSAGE_TYPE aMsg, ULONG_PTR aData);
 
-private:
 
   HRESULT SetDecoderOutputType(ConfigureOutputCallback aCallback, void* aData);
+private:
 
-  HRESULT CreateInputSample(const uint8_t* aData,
-                            uint32_t aDataSize,
-                            int64_t aTimestampUsecs,
-                            RefPtr<IMFSample>* aOutSample);
 
   HRESULT CreateOutputSample(RefPtr<IMFSample>* aOutSample);
 
   MFT_INPUT_STREAM_INFO mInputStreamInfo;
   MFT_OUTPUT_STREAM_INFO mOutputStreamInfo;
 
   RefPtr<IMFTransform> mDecoder;
 
--- a/dom/media/platforms/wmf/WMFDecoderModule.cpp
+++ b/dom/media/platforms/wmf/WMFDecoderModule.cpp
@@ -97,17 +97,17 @@ WMFDecoderModule::CreateVideoDecoder(con
                                      layers::ImageContainer* aImageContainer,
                                      FlushableTaskQueue* aVideoTaskQueue,
                                      MediaDataDecoderCallback* aCallback)
 {
   nsAutoPtr<WMFVideoMFTManager> manager(
     new WMFVideoMFTManager(aConfig,
                            aLayersBackend,
                            aImageContainer,
-                           sDXVAEnabled && ShouldUseDXVA(aConfig)));
+                           sDXVAEnabled));
 
   nsRefPtr<MFTDecoder> mft = manager->Init();
 
   if (!mft) {
     return nullptr;
   }
 
   nsRefPtr<MediaDataDecoder> decoder =
@@ -129,42 +129,21 @@ WMFDecoderModule::CreateAudioDecoder(con
   }
 
   nsRefPtr<MediaDataDecoder> decoder =
     new WMFMediaDataDecoder(manager.forget(), mft, aAudioTaskQueue, aCallback);
   return decoder.forget();
 }
 
 bool
-WMFDecoderModule::ShouldUseDXVA(const VideoInfo& aConfig) const
-{
-  static bool isAMD = false;
-  static bool initialized = false;
-  if (!initialized) {
-    nsCOMPtr<nsIGfxInfo> gfxInfo = services::GetGfxInfo();
-    nsAutoString vendor;
-    gfxInfo->GetAdapterVendorID(vendor);
-    isAMD = vendor.Equals(widget::GfxDriverInfo::GetDeviceVendor(widget::VendorAMD), nsCaseInsensitiveStringComparator()) ||
-            vendor.Equals(widget::GfxDriverInfo::GetDeviceVendor(widget::VendorATI), nsCaseInsensitiveStringComparator());
-    initialized = true;
-  }
-  if (!isAMD) {
-    return true;
-  }
-  // Don't use DXVA for 4k videos or above, since it seems to perform poorly.
-  return aConfig.mDisplay.width <= 1920 && aConfig.mDisplay.height <= 1200;
-}
-
-bool
 WMFDecoderModule::SupportsSharedDecoders(const VideoInfo& aConfig) const
 {
   // If DXVA is enabled, but we're not going to use it for this specific config, then
   // we can't use the shared decoder.
-  return !AgnosticMimeType(aConfig.mMimeType) &&
-    (!sDXVAEnabled || ShouldUseDXVA(aConfig));
+  return !AgnosticMimeType(aConfig.mMimeType);
 }
 
 bool
 WMFDecoderModule::SupportsMimeType(const nsACString& aMimeType)
 {
   return aMimeType.EqualsLiteral("video/mp4") ||
          aMimeType.EqualsLiteral("video/avc") ||
          aMimeType.EqualsLiteral("audio/mp4a-latm") ||
--- a/dom/media/platforms/wmf/WMFDecoderModule.h
+++ b/dom/media/platforms/wmf/WMFDecoderModule.h
@@ -47,15 +47,14 @@ public:
   static bool HasH264();
 
   // Called on main thread.
   static void Init();
 
   // Called from any thread, must call init first
   static int GetNumDecoderThreads();
 private:
-  bool ShouldUseDXVA(const VideoInfo& aConfig) const;
   bool mWMFInitialized;
 };
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
@@ -229,15 +229,15 @@ WMFMediaDataDecoder::Drain()
 
   nsCOMPtr<nsIRunnable> runnable =
     NS_NewRunnableMethod(this, &WMFMediaDataDecoder::ProcessDrain);
   mTaskQueue->Dispatch(runnable.forget());
   return NS_OK;
 }
 
 bool
-WMFMediaDataDecoder::IsHardwareAccelerated() const {
+WMFMediaDataDecoder::IsHardwareAccelerated(nsACString& aFailureReason) const {
   MOZ_ASSERT(!mIsShutDown);
 
-  return mMFTManager && mMFTManager->IsHardwareAccelerated();
+  return mMFTManager && mMFTManager->IsHardwareAccelerated(aFailureReason);
 }
 
 } // namespace mozilla
--- a/dom/media/platforms/wmf/WMFMediaDataDecoder.h
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.h
@@ -38,17 +38,17 @@ public:
   // than MF_E_TRANSFORM_NEED_MORE_INPUT, an error will be reported to the
   // MP4Reader.
   virtual HRESULT Output(int64_t aStreamOffset,
                          nsRefPtr<MediaData>& aOutput) = 0;
 
   // Destroys all resources.
   virtual void Shutdown() = 0;
 
-  virtual bool IsHardwareAccelerated() const { return false; }
+  virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const { return false; }
 
   virtual TrackInfo::TrackType GetType() = 0;
 
 };
 
 // Decodes audio and video using Windows Media Foundation. Samples are decoded
 // using the MFTDecoder created by the MFTManager. This class implements
 // the higher-level logic that drives mapping the MFT to the async
@@ -67,17 +67,17 @@ public:
   virtual nsresult Input(MediaRawData* aSample);
 
   virtual nsresult Flush() override;
 
   virtual nsresult Drain() override;
 
   virtual nsresult Shutdown() override;
 
-  virtual bool IsHardwareAccelerated() const override;
+  virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
 
 private:
 
   // Called on the task queue. Inserts the sample into the decoder, and
   // extracts output if available.
   void ProcessDecode(MediaRawData* aSample);
 
   // Called on the task queue. Extracts output if available, and delivers
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -16,16 +16,17 @@
 #include "mozilla/layers/LayersTypes.h"
 #include "MediaInfo.h"
 #include "mozilla/Logging.h"
 #include "gfx2DGlue.h"
 #include "gfxWindowsPlatform.h"
 #include "IMFYCbCrImage.h"
 #include "mozilla/WindowsVersion.h"
 #include "mozilla/Preferences.h"
+#include "nsPrintfCString.h"
 
 PRLogModuleInfo* GetDemuxerLog();
 #define LOG(...) MOZ_LOG(GetDemuxerLog(), mozilla::LogLevel::Debug, (__VA_ARGS__))
 
 using mozilla::layers::Image;
 using mozilla::layers::IMFYCbCrImage;
 using mozilla::layers::LayerManager;
 using mozilla::layers::LayersBackend;
@@ -123,52 +124,58 @@ WMFVideoMFTManager::GetMediaSubtypeGUID(
     case VP8: return MFVideoFormat_VP80;
     case VP9: return MFVideoFormat_VP90;
     default: return GUID_NULL;
   };
 }
 
 class CreateDXVAManagerEvent : public nsRunnable {
 public:
-  CreateDXVAManagerEvent(LayersBackend aBackend)
+  CreateDXVAManagerEvent(LayersBackend aBackend, nsCString& aFailureReason)
     : mBackend(aBackend)
+    , mFailureReason(aFailureReason)
   {}
 
   NS_IMETHOD Run() {
     NS_ASSERTION(NS_IsMainThread(), "Must be on main thread.");
     if (mBackend == LayersBackend::LAYERS_D3D11 &&
         Preferences::GetBool("media.windows-media-foundation.allow-d3d11-dxva", false) &&
         IsWin8OrLater()) {
-      mDXVA2Manager = DXVA2Manager::CreateD3D11DXVA();
+      mDXVA2Manager = DXVA2Manager::CreateD3D11DXVA(mFailureReason);
     } else {
-      mDXVA2Manager = DXVA2Manager::CreateD3D9DXVA();
+      mDXVA2Manager = DXVA2Manager::CreateD3D9DXVA(mFailureReason);
     }
     return NS_OK;
   }
   nsAutoPtr<DXVA2Manager> mDXVA2Manager;
   LayersBackend mBackend;
+  nsACString& mFailureReason;
 };
 
 bool
 WMFVideoMFTManager::InitializeDXVA(bool aForceD3D9)
 {
   MOZ_ASSERT(!mDXVA2Manager);
 
   // If we use DXVA but aren't running with a D3D layer manager then the
   // readback of decoded video frames from GPU to CPU memory grinds painting
   // to a halt, and makes playback performance *worse*.
-  if (!mDXVAEnabled ||
-      (mLayersBackend != LayersBackend::LAYERS_D3D9 &&
-       mLayersBackend != LayersBackend::LAYERS_D3D11)) {
+  if (!mDXVAEnabled) {
+    mDXVAFailureReason.AssignLiteral("Hardware video decoding disabled or blacklisted");
+    return false;
+  }
+  if (mLayersBackend != LayersBackend::LAYERS_D3D9 &&
+      mLayersBackend != LayersBackend::LAYERS_D3D11) {
+    mDXVAFailureReason.AssignLiteral("Unsupported layers backend");
     return false;
   }
 
   // The DXVA manager must be created on the main thread.
   nsRefPtr<CreateDXVAManagerEvent> event = 
-    new CreateDXVAManagerEvent(aForceD3D9 ? LayersBackend::LAYERS_D3D9 : mLayersBackend);
+    new CreateDXVAManagerEvent(aForceD3D9 ? LayersBackend::LAYERS_D3D9 : mLayersBackend, mDXVAFailureReason);
 
   if (NS_IsMainThread()) {
     event->Run();
   } else {
     NS_DispatchToMainThread(event, NS_DISPATCH_SYNC);
   }
   mDXVA2Manager = event->mDXVA2Manager;
 
@@ -179,17 +186,20 @@ already_AddRefed<MFTDecoder>
 WMFVideoMFTManager::Init()
 {
   RefPtr<MFTDecoder> decoder = InitInternal(/* aForceD3D9 = */ false);
 
   // If initialization failed with d3d11 DXVA then try falling back
   // to d3d9.
   if (!decoder && mDXVA2Manager && mDXVA2Manager->IsD3D11()) {
     mDXVA2Manager = nullptr;
+    nsCString d3d11Failure = mDXVAFailureReason;
     decoder = InitInternal(true);
+    mDXVAFailureReason.Append(NS_LITERAL_CSTRING("; "));
+    mDXVAFailureReason.Append(d3d11Failure);
   }
 
   return decoder.forget();
 }
 
 already_AddRefed<MFTDecoder>
 WMFVideoMFTManager::InitInternal(bool aForceD3D9)
 {
@@ -199,102 +209,174 @@ WMFVideoMFTManager::InitInternal(bool aF
   RefPtr<MFTDecoder> decoder(new MFTDecoder());
 
   HRESULT hr = decoder->Create(GetMFTGUID());
   NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
 
   RefPtr<IMFAttributes> attr(decoder->GetAttributes());
   UINT32 aware = 0;
   if (attr) {
-      attr->GetUINT32(MF_SA_D3D_AWARE, &aware);
-      attr->SetUINT32(CODECAPI_AVDecNumWorkerThreads,
-                      WMFDecoderModule::GetNumDecoderThreads());
-      hr = attr->SetUINT32(CODECAPI_AVLowLatencyMode, TRUE);
-      if (SUCCEEDED(hr)) {
-        LOG("Enabling Low Latency Mode");
-      } else {
-        LOG("Couldn't enable Low Latency Mode");
-      }
+    attr->GetUINT32(MF_SA_D3D_AWARE, &aware);
+    attr->SetUINT32(CODECAPI_AVDecNumWorkerThreads,
+      WMFDecoderModule::GetNumDecoderThreads());
+    hr = attr->SetUINT32(CODECAPI_AVLowLatencyMode, TRUE);
+    if (SUCCEEDED(hr)) {
+      LOG("Enabling Low Latency Mode");
+    }
+    else {
+      LOG("Couldn't enable Low Latency Mode");
+    }
   }
 
   if (useDxva) {
     if (aware) {
       // TODO: Test if I need this anywhere... Maybe on Vista?
       //hr = attr->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE);
       //NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
       MOZ_ASSERT(mDXVA2Manager);
       ULONG_PTR manager = ULONG_PTR(mDXVA2Manager->GetDXVADeviceManager());
       hr = decoder->SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER, manager);
       if (SUCCEEDED(hr)) {
         mUseHwAccel = true;
+      } else {
+        mDXVA2Manager = nullptr;
+        mDXVAFailureReason = nsPrintfCString("MFT_MESSAGE_SET_D3D_MANAGER failed with code %X", hr);
       }
     }
+    else {
+      mDXVAFailureReason.AssignLiteral("Decoder returned false for MF_SA_D3D_AWARE");
+    }
   }
 
-  // Setup the input/output media types.
-  RefPtr<IMFMediaType> inputType;
-  hr = wmf::MFCreateMediaType(byRef(inputType));
-  NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
-  hr = inputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
-  NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
-  hr = inputType->SetGUID(MF_MT_SUBTYPE, GetMediaSubtypeGUID());
-  NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
-  hr = inputType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_MixedInterlaceOrProgressive);
+  mDecoder = decoder;
+  hr = SetDecoderMediaTypes();
   NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
 
-  RefPtr<IMFMediaType> outputType;
-  hr = wmf::MFCreateMediaType(byRef(outputType));
-  NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
-  hr = outputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
-  NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
-  GUID outputSubType = mUseHwAccel ? MFVideoFormat_NV12 : MFVideoFormat_YV12;
-  hr = outputType->SetGUID(MF_MT_SUBTYPE, outputSubType);
-  NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
-  hr = decoder->SetMediaTypes(inputType, outputType);
-  NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
-  mDecoder = decoder;
   LOG("Video Decoder initialized, Using DXVA: %s", (mUseHwAccel ? "Yes" : "No"));
 
   // Just in case ConfigureVideoFrameGeometry() does not set these
   mVideoInfo = VideoInfo();
   mVideoStride = 0;
   mVideoWidth = 0;
   mVideoHeight = 0;
   mPictureRegion.SetEmpty();
 
   return decoder.forget();
 }
 
 HRESULT
+WMFVideoMFTManager::SetDecoderMediaTypes()
+{
+  // Setup the input/output media types.
+  RefPtr<IMFMediaType> inputType;
+  HRESULT hr = wmf::MFCreateMediaType(byRef(inputType));
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = inputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = inputType->SetGUID(MF_MT_SUBTYPE, GetMediaSubtypeGUID());
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = inputType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_MixedInterlaceOrProgressive);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  RefPtr<IMFMediaType> outputType;
+  hr = wmf::MFCreateMediaType(byRef(outputType));
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  hr = outputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  GUID outputSubType = mUseHwAccel ? MFVideoFormat_NV12 : MFVideoFormat_YV12;
+  hr = outputType->SetGUID(MF_MT_SUBTYPE, outputSubType);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+  return mDecoder->SetMediaTypes(inputType, outputType);
+}
+
+HRESULT
 WMFVideoMFTManager::Input(MediaRawData* aSample)
 {
   if (!mDecoder) {
     // This can happen during shutdown.
     return E_FAIL;
   }
+
+  HRESULT hr = mDecoder->CreateInputSample(aSample->Data(),
+                                           uint32_t(aSample->Size()),
+                                           aSample->mTime,
+                                           &mLastInput);
+  NS_ENSURE_TRUE(SUCCEEDED(hr) && mLastInput != nullptr, hr);
+
   // Forward sample data to the decoder.
-  return mDecoder->Input(aSample->Data(),
-                         uint32_t(aSample->Size()),
-                         aSample->mTime);
+  return mDecoder->Input(mLastInput);
+}
+
+// The MFTransform we use for decoding h264 video will silently fall
+// back to software decoding (even if we've negotiated DXVA) if the GPU
+// doesn't support decoding the given resolution. It will then upload
+// the software decoded frames into d3d textures to preserve behaviour.
+//
+// Unfortunately this seems to cause corruption (see bug 1193547) and is
+// slow because the upload is done into a non-shareable texture and requires
+// us to copy it.
+//
+// This code tests if the given resolution can be supported directly on the GPU,
+// and makes sure we only ask the MFT for DXVA if it can be supported properly.
+bool
+WMFVideoMFTManager::MaybeToggleDXVA(IMFMediaType* aType)
+{
+  // SupportsConfig only checks for valid h264 decoders currently.
+  if (!mDXVA2Manager || mStreamType != H264) {
+    return false;
+  }
+
+  if (mDXVA2Manager->SupportsConfig(aType)) {
+    if (!mUseHwAccel) {
+      // DXVA disabled, but supported for this resolution
+      ULONG_PTR manager = ULONG_PTR(mDXVA2Manager->GetDXVADeviceManager());
+      HRESULT hr = mDecoder->SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER, manager);
+      if (SUCCEEDED(hr)) {
+        mUseHwAccel = true;
+        return true;
+      }
+    }
+  } else if (mUseHwAccel) {
+    // DXVA enabled, and not supported for this resolution
+    HRESULT hr = mDecoder->SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER, 0);
+    MOZ_ASSERT(SUCCEEDED(hr), "Attempting to fall back to software failed?");
+    mUseHwAccel = false;
+    return true;
+  }
+
+  return false;
 }
 
 HRESULT
 WMFVideoMFTManager::ConfigureVideoFrameGeometry()
 {
   RefPtr<IMFMediaType> mediaType;
   HRESULT hr = mDecoder->GetOutputMediaType(mediaType);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
+  // If we enabled/disabled DXVA in response to a resolution
+  // change then we need to renegotiate our media types,
+  // and resubmit our previous frame (since the MFT appears
+  // to lose it otherwise).
+  if (MaybeToggleDXVA(mediaType)) {
+    hr = SetDecoderMediaTypes();
+    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+    HRESULT hr = mDecoder->GetOutputMediaType(mediaType);
+    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+    mDecoder->Input(mLastInput);
+  }
+
   // Verify that the video subtype is what we expect it to be.
   // When using hardware acceleration/DXVA2 the video format should
   // be NV12, which is DXVA2's preferred format. For software decoding
   // we use YV12, as that's easier for us to stick into our rendering
   // pipeline than NV12. NV12 has interleaved UV samples, whereas YV12
   // is a planar format.
   GUID videoFormat;
   hr = mediaType->GetGUID(MF_MT_SUBTYPE, &videoFormat);
@@ -550,14 +632,15 @@ WMFVideoMFTManager::Output(int64_t aStre
 void
 WMFVideoMFTManager::Shutdown()
 {
   mDecoder = nullptr;
   DeleteOnMainThread(mDXVA2Manager);
 }
 
 bool
-WMFVideoMFTManager::IsHardwareAccelerated() const
+WMFVideoMFTManager::IsHardwareAccelerated(nsACString& aFailureReason) const
 {
+  aFailureReason = mDXVAFailureReason;
   return mDecoder && mUseHwAccel;
 }
 
 } // namespace mozilla
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.h
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.h
@@ -29,17 +29,17 @@ public:
 
   virtual HRESULT Input(MediaRawData* aSample) override;
 
   virtual HRESULT Output(int64_t aStreamOffset,
                          nsRefPtr<MediaData>& aOutput) override;
 
   virtual void Shutdown() override;
 
-  virtual bool IsHardwareAccelerated() const override;
+  virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
 
   virtual TrackInfo::TrackType GetType() override {
     return TrackInfo::kVideoTrack;
   }
 
 private:
 
   bool InitializeDXVA(bool aForceD3D9);
@@ -51,31 +51,39 @@ private:
   HRESULT CreateBasicVideoFrame(IMFSample* aSample,
                                 int64_t aStreamOffset,
                                 VideoData** aOutVideoData);
 
   HRESULT CreateD3DVideoFrame(IMFSample* aSample,
                               int64_t aStreamOffset,
                               VideoData** aOutVideoData);
 
+  HRESULT SetDecoderMediaTypes();
+
+  bool MaybeToggleDXVA(IMFMediaType* aType);
+
   // Video frame geometry.
   VideoInfo mVideoInfo;
   uint32_t mVideoStride;
   uint32_t mVideoWidth;
   uint32_t mVideoHeight;
   nsIntRect mPictureRegion;
 
   RefPtr<MFTDecoder> mDecoder;
   RefPtr<layers::ImageContainer> mImageContainer;
   nsAutoPtr<DXVA2Manager> mDXVA2Manager;
 
+  RefPtr<IMFSample> mLastInput;
+
   const bool mDXVAEnabled;
   const layers::LayersBackend mLayersBackend;
   bool mUseHwAccel;
 
+  nsCString mDXVAFailureReason;
+
   enum StreamType {
     Unknown,
     H264,
     VP8,
     VP9
   };
 
   StreamType mStreamType;
--- a/dom/media/platforms/wrappers/H264Converter.cpp
+++ b/dom/media/platforms/wrappers/H264Converter.cpp
@@ -24,51 +24,51 @@ H264Converter::H264Converter(PlatformDec
   : mPDM(aPDM)
   , mCurrentConfig(aConfig)
   , mLayersBackend(aLayersBackend)
   , mImageContainer(aImageContainer)
   , mVideoTaskQueue(aVideoTaskQueue)
   , mCallback(aCallback)
   , mDecoder(nullptr)
   , mNeedAVCC(aPDM->DecoderNeedsConversion(aConfig) == PlatformDecoderModule::kNeedAVCC)
-  , mDecoderInitializing(false)
   , mLastError(NS_OK)
 {
   CreateDecoder();
 }
 
 H264Converter::~H264Converter()
 {
 }
 
 nsRefPtr<MediaDataDecoder::InitPromise>
 H264Converter::Init()
 {
   if (mDecoder) {
     return mDecoder->Init();
   }
 
-  return MediaDataDecoder::InitPromise::CreateAndReject(
-           MediaDataDecoder::DecoderFailureReason::INIT_ERROR, __func__);
+  // We haven't been able to initialize a decoder due to a missing SPS/PPS.
+  return MediaDataDecoder::InitPromise::CreateAndResolve(
+           TrackType::kVideoTrack, __func__);
 }
 
 nsresult
 H264Converter::Input(MediaRawData* aSample)
 {
   if (!mNeedAVCC) {
     if (!mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample)) {
       return NS_ERROR_FAILURE;
     }
   } else {
     if (!mp4_demuxer::AnnexB::ConvertSampleToAVCC(aSample)) {
       return NS_ERROR_FAILURE;
     }
   }
 
-  if (mDecoderInitializing) {
+  if (mInitPromiseRequest.Exists()) {
     mMediaRawSamples.AppendElement(aSample);
     return NS_OK;
   }
 
   nsresult rv;
   if (!mDecoder) {
     // It is not possible to create an AVCC H264 decoder without SPS.
     // As such, creation will fail if the extra_data just extracted doesn't
@@ -116,22 +116,22 @@ H264Converter::Shutdown()
     mInitPromiseRequest.DisconnectIfExists();
     mDecoder = nullptr;
     return rv;
   }
   return NS_OK;
 }
 
 bool
-H264Converter::IsHardwareAccelerated() const
+H264Converter::IsHardwareAccelerated(nsACString& aFailureReason) const
 {
   if (mDecoder) {
-    return mDecoder->IsHardwareAccelerated();
+    return mDecoder->IsHardwareAccelerated(aFailureReason);
   }
-  return MediaDataDecoder::IsHardwareAccelerated();
+  return MediaDataDecoder::IsHardwareAccelerated(aFailureReason);
 }
 
 nsresult
 H264Converter::CreateDecoder()
 {
   if (mNeedAVCC && !mp4_demuxer::AnnexB::HasSPS(mCurrentConfig.mExtraData)) {
     // nothing found yet, will try again later
     return NS_ERROR_NOT_INITIALIZED;
@@ -158,17 +158,19 @@ H264Converter::CreateDecoderAndInit(Medi
   if (!mp4_demuxer::AnnexB::HasSPS(extra_data)) {
     return NS_ERROR_NOT_INITIALIZED;
   }
   UpdateConfigFromExtraData(extra_data);
 
   nsresult rv = CreateDecoder();
 
   if (NS_SUCCEEDED(rv)) {
-    mDecoderInitializing = true;
+    // Queue the incoming sample.
+    mMediaRawSamples.AppendElement(aSample);
+
     nsRefPtr<H264Converter> self = this;
 
     // The mVideoTaskQueue is flushable which can't be used in MediaPromise. So
     // we get the current AbstractThread instead of it. The MOZ_ASSERT above
     // ensures we are running in AbstractThread so we won't get a nullptr.
     mInitPromiseRequest.Begin(mDecoder->Init()
       ->Then(AbstractThread::GetCurrent(), __func__, this,
              &H264Converter::OnDecoderInitDone,
@@ -182,17 +184,16 @@ H264Converter::OnDecoderInitDone(const T
 {
   mInitPromiseRequest.Complete();
   for (uint32_t i = 0 ; i < mMediaRawSamples.Length(); i++) {
     if (NS_FAILED(mDecoder->Input(mMediaRawSamples[i]))) {
       mCallback->Error();
     }
   }
   mMediaRawSamples.Clear();
-  mDecoderInitializing = false;
 }
 
 void
 H264Converter::OnDecoderInitFailed(MediaDataDecoder::DecoderFailureReason aReason)
 {
   mInitPromiseRequest.Complete();
   mCallback->Error();
 }
--- a/dom/media/platforms/wrappers/H264Converter.h
+++ b/dom/media/platforms/wrappers/H264Converter.h
@@ -29,17 +29,17 @@ public:
                 MediaDataDecoderCallback* aCallback);
   virtual ~H264Converter();
 
   virtual nsRefPtr<InitPromise> Init() override;
   virtual nsresult Input(MediaRawData* aSample) override;
   virtual nsresult Flush() override;
   virtual nsresult Drain() override;
   virtual nsresult Shutdown() override;
-  virtual bool IsHardwareAccelerated() const override;
+  virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
 
   // Return true if mimetype is H.264.
   static bool IsH264(const TrackInfo& aConfig);
 
 private:
   // Will create the required MediaDataDecoder if need AVCC and we have a SPS NAL.
   // Returns NS_ERROR_FAILURE if error is permanent and can't be recovered and
   // will set mError accordingly.
@@ -56,15 +56,14 @@ private:
   layers::LayersBackend mLayersBackend;
   nsRefPtr<layers::ImageContainer> mImageContainer;
   nsRefPtr<FlushableTaskQueue> mVideoTaskQueue;
   nsTArray<nsRefPtr<MediaRawData>> mMediaRawSamples;
   MediaDataDecoderCallback* mCallback;
   nsRefPtr<MediaDataDecoder> mDecoder;
   MozPromiseRequestHolder<InitPromise> mInitPromiseRequest;
   bool mNeedAVCC;
-  bool mDecoderInitializing;
   nsresult mLastError;
 };
 
 } // namespace mozilla
 
 #endif // mozilla_H264Converter_h
--- a/dom/media/webaudio/test/chrome.ini
+++ b/dom/media/webaudio/test/chrome.ini
@@ -1,6 +1,7 @@
 [DEFAULT]
 skip-if = buildapp == 'b2g'
 
 [test_AudioNodeDevtoolsAPI.html]
 [test_bug1027864.html]
 [test_AudioParamDevtoolsAPI.html]
+[test_ScriptProcessorCollected1.html]
new file mode 100644
--- /dev/null
+++ b/dom/media/webaudio/test/test_ScriptProcessorCollected1.html
@@ -0,0 +1,81 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <title>Test ScriptProcessorNode in cycle with no listener is collected</title>
+  <script type="text/javascript" src="chrome://mochikit/content/tests/SimpleTest/SimpleTest.js"></script>
+  <link rel="stylesheet" type="text/css" href="chrome://mochikit/content/tests/SimpleTest/test.css" />
+</head>
+<body>
+<script class="testbody" type="text/javascript">
+
+Components.utils.import('resource://gre/modules/Services.jsm');
+SimpleTest.waitForExplicitFinish();
+
+var observer = function(subject, topic, data) {
+  var id = parseInt(data);
+  var index = ids.indexOf(id);
+  if (index != -1) {
+    ok(true, "Collected AudioNode id " + id + " at index " + index);
+    ids.splice(index, 1);
+  }
+}
+
+Services.obs.addObserver(observer, "webaudio-node-demise", false);
+
+SimpleTest.registerCleanupFunction(function() {
+  if (observer) {
+    Services.obs.removeObserver(observer, "webaudio-node-demise");
+  }
+});
+
+var ac = new AudioContext();
+
+var testProcessor = ac.createScriptProcessor(256, 1, 0);
+var delay = ac.createDelay();
+testProcessor.connect(delay);
+delay.connect(testProcessor);
+
+var referenceProcessor = ac.createScriptProcessor(256, 1, 0);
+var gain = ac.createGain();
+gain.connect(referenceProcessor);
+
+var processCount = 0;
+testProcessor.onaudioprocess = function(event) {
+  ++processCount;
+  switch (processCount) {
+  case 1:
+    // Switch to listening to referenceProcessor;
+    referenceProcessor.onaudioprocess = event.target.onaudioprocess;
+    referenceProcessor = null;
+    event.target.onaudioprocess = null;
+  case 2:
+    // There are no references to testProcessor and so GC can begin.
+    SpecialPowers.forceGC();
+    break;
+  case 3:
+    // Another GC should not be required after testProcessor would have
+    // received another audioprocess event.
+    SpecialPowers.forceCC();
+    // Expect that webaudio-demise has been queued.
+    // Queue another event to check.
+    SimpleTest.executeSoon(function() {
+      Services.obs.removeObserver(observer, "webaudio-node-demise");
+      observer = null;
+      event.target.onaudioprocess = null;
+      ok(ids.length == 0, "All expected nodes should be collected");
+      SimpleTest.finish();
+    });
+    break;
+  }
+};
+
+// Nodes with these ids should be collected.
+var ids = [ testProcessor.id, delay.id, gain.id ];
+testProcessor = null;
+delay = null;
+gain = null;
+
+</script>
+</pre>
+</body>
+</html>
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -13,57 +13,67 @@
 #include "gfx2DGlue.h"
 #include "mozilla/Preferences.h"
 #include "mozilla/SharedThreadPool.h"
 #include "MediaDataDemuxer.h"
 #include "nsAutoRef.h"
 #include "NesteggPacketHolder.h"
 
 #include <algorithm>
+#include <stdint.h>
 
 #define VPX_DONT_DEFINE_STDINT_TYPES
 #include "vpx/vp8dx.h"
 #include "vpx/vpx_decoder.h"
 
 #define WEBM_DEBUG(arg, ...) MOZ_LOG(gMediaDecoderLog, mozilla::LogLevel::Debug, ("WebMDemuxer(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
 
 namespace mozilla {
 
 using namespace gfx;
 
 extern PRLogModuleInfo* gMediaDecoderLog;
 extern PRLogModuleInfo* gNesteggLog;
 
 // Functions for reading and seeking using WebMDemuxer required for
-// nestegg_io. The 'user data' passed to these functions is the demuxer
+// nestegg_io. The 'user data' passed to these functions is the
+// demuxer's MediaResourceIndex
 static int webmdemux_read(void* aBuffer, size_t aLength, void* aUserData)
 {
   MOZ_ASSERT(aUserData);
-  WebMDemuxer* demuxer = reinterpret_cast<WebMDemuxer*>(aUserData);
+  MediaResourceIndex* resource =
+    reinterpret_cast<MediaResourceIndex*>(aUserData);
+  int64_t length = resource->GetLength();
+  MOZ_ASSERT(aLength < UINT32_MAX);
+  uint32_t count = aLength;
+  if (length >= 0 && count + resource->Tell() > length) {
+    count = uint32_t(length - resource->Tell());
+  }
+
   uint32_t bytes = 0;
-  bool eof = false;
-  char* p = static_cast<char*>(aBuffer);
-  nsresult rv = demuxer->Read(p, aLength, &bytes);
-  eof = bytes < aLength;
+  nsresult rv = resource->Read(static_cast<char*>(aBuffer), count, &bytes);
+  bool eof = !bytes;
   return NS_FAILED(rv) ? -1 : eof ? 0 : 1;
 }
 
 static int webmdemux_seek(int64_t aOffset, int aWhence, void* aUserData)
 {
   MOZ_ASSERT(aUserData);
-  WebMDemuxer* demuxer = reinterpret_cast<WebMDemuxer*>(aUserData);
-  nsresult rv = demuxer->Seek(aWhence, aOffset);
+  MediaResourceIndex* resource =
+    reinterpret_cast<MediaResourceIndex*>(aUserData);
+  nsresult rv = resource->Seek(aWhence, aOffset);
   return NS_SUCCEEDED(rv) ? 0 : -1;
 }
 
 static int64_t webmdemux_tell(void* aUserData)
 {
   MOZ_ASSERT(aUserData);
-  WebMDemuxer* demuxer = reinterpret_cast<WebMDemuxer*>(aUserData);
-  return demuxer->Tell();
+  MediaResourceIndex* resource =
+    reinterpret_cast<MediaResourceIndex*>(aUserData);
+  return resource->Tell();
 }
 
 static void webmdemux_log(nestegg* aContext,
                           unsigned int aSeverity,
                           char const* aFormat, ...)
 {
   if (!MOZ_LOG_TEST(gNesteggLog, LogLevel::Debug)) {
     return;
@@ -104,17 +114,16 @@ static void webmdemux_log(nestegg* aCont
 }
 
 
 WebMDemuxer::WebMDemuxer(MediaResource* aResource)
   : mResource(aResource)
   , mBufferedState(nullptr)
   , mInitData(nullptr)
   , mContext(nullptr)
-  , mOffset(0)
   , mVideoTrack(0)
   , mAudioTrack(0)
   , mSeekPreroll(0)
   , mLastVideoFrameTime(0)
   , mAudioCodec(-1)
   , mVideoCodec(-1)
   , mHasVideo(false)
   , mHasAudio(false)
@@ -157,17 +166,17 @@ WebMDemuxer::InitBufferedState()
   }
   EnsureUpToDateIndex();
   return NS_OK;
 }
 
 already_AddRefed<MediaDataDemuxer>
 WebMDemuxer::Clone() const
 {
-  nsRefPtr<WebMDemuxer> demuxer = new WebMDemuxer(mResource);
+  nsRefPtr<WebMDemuxer> demuxer = new WebMDemuxer(mResource.GetResource());
   demuxer->mInitData = mInitData;
   if (demuxer->InitBufferedState() != NS_OK ||
       demuxer->ReadMetadata() != NS_OK) {
     NS_WARNING("Couldn't recreate WebMDemuxer");
     return nullptr;
   }
   return demuxer.forget();
 }
@@ -238,20 +247,20 @@ WebMDemuxer::Cleanup()
 
 nsresult
 WebMDemuxer::ReadMetadata()
 {
   nestegg_io io;
   io.read = webmdemux_read;
   io.seek = webmdemux_seek;
   io.tell = webmdemux_tell;
-  io.userdata = this;
+  io.userdata = &mResource;
   int64_t maxOffset = mBufferedState->GetInitEndOffset();
   if (maxOffset == -1) {
-    maxOffset = mResource->GetLength();
+    maxOffset = mResource.GetLength();
   }
   int r = nestegg_init(&mContext, io, &webmdemux_log, maxOffset);
   if (r == -1) {
     return NS_ERROR_FAILURE;
   }
 
   unsigned int ntracks = 0;
   r = nestegg_track_count(mContext, &ntracks);
@@ -389,84 +398,40 @@ WebMDemuxer::ReadMetadata()
       if (!r) {
         mInfo.mAudio.mDuration = media::TimeUnit::FromNanoseconds(duration).ToMicroseconds();
       }
     }
   }
   return NS_OK;
 }
 
-nsresult
-WebMDemuxer::Read(char* aBuffer, uint32_t aCount, uint32_t* aBytes)
-{
-  int64_t length = mResource->GetLength();
-  if (length >= 0 && aCount + mOffset > length) {
-    WEBM_DEBUG("requested to large amount, trying to get %ld bytes at %ld (length: %ld)", aCount, mOffset, length);
-    aCount = length - mOffset;
-    WEBM_DEBUG("will only return %ld", aCount);
-  }
-  nsRefPtr<MediaByteBuffer> bytes = mResource->MediaReadAt(mOffset, aCount);
-  if (!bytes) {
-    return NS_ERROR_FAILURE;
-  }
-  mOffset += bytes->Length();
-  *aBytes = bytes->Length();
-  memcpy(aBuffer, bytes->Elements(), bytes->Length());
-  return NS_OK;
-}
-
-nsresult
-WebMDemuxer::Seek(int32_t aWhence, int64_t aOffset)
-{
-  if (aWhence == SEEK_CUR) {
-    aOffset += mOffset;
-  } else if (aWhence == SEEK_END) {
-    int64_t length = mResource->GetLength();
-    if (length == -1 || length - aOffset < 0) {
-      return NS_ERROR_FAILURE;
-    }
-    aOffset = mResource->GetLength() - aOffset;
-  }
-  if (aOffset > mResource->GetLength()) {
-    return NS_ERROR_FAILURE;
-  }
-  mOffset = aOffset;
-  return NS_OK;
-}
-
-int64_t
-WebMDemuxer::Tell()
-{
-  return mOffset;
-}
-
 bool
 WebMDemuxer::IsSeekable() const
 {
   return mContext && nestegg_has_cues(mContext);
 }
 
 void
 WebMDemuxer::EnsureUpToDateIndex()
 {
   if (!mNeedReIndex) {
     return;
   }
   if (mInitData && mBufferedState->GetInitEndOffset() == -1) {
     mBufferedState->NotifyDataArrived(mInitData->Elements(), mInitData->Length(), 0);
   }
-  AutoPinned<MediaResource> resource(mResource);
+  AutoPinned<MediaResource> resource(mResource.GetResource());
   nsTArray<MediaByteRange> byteRanges;
   nsresult rv = resource->GetCachedRanges(byteRanges);
   if (NS_FAILED(rv) || !byteRanges.Length()) {
     return;
   }
-  mBufferedState->UpdateIndex(byteRanges, mResource);
+  mBufferedState->UpdateIndex(byteRanges, resource);
   if (!mInitData && mBufferedState->GetInitEndOffset() != -1) {
-    mInitData = mResource->MediaReadAt(0, mBufferedState->GetInitEndOffset());
+    mInitData = mResource.MediaReadAt(0, mBufferedState->GetInitEndOffset());
   }
   mNeedReIndex = false;
 }
 
 void
 WebMDemuxer::NotifyDataArrived(uint32_t aLength, int64_t aOffset)
 {
   WEBM_DEBUG("length: %ld offset: %ld", aLength, aOffset);
@@ -637,17 +602,17 @@ WebMDemuxer::DemuxPacket()
   }
 
   unsigned int track = 0;
   r = nestegg_packet_track(packet, &track);
   if (r == -1) {
     return nullptr;
   }
 
-  int64_t offset = Tell();
+  int64_t offset = mResource.Tell();
   nsRefPtr<NesteggPacketHolder> holder = new NesteggPacketHolder();
   if (!holder->Init(packet, offset, track, false)) {
     return nullptr;
   }
 
   return holder;
 }
 
@@ -726,17 +691,17 @@ WebMDemuxer::SeekInternal(const media::T
   }
   return NS_OK;
 }
 
 media::TimeIntervals
 WebMDemuxer::GetBuffered()
 {
   EnsureUpToDateIndex();
-  AutoPinned<MediaResource> resource(mResource);
+  AutoPinned<MediaResource> resource(mResource.GetResource());
 
   media::TimeIntervals buffered;
 
   nsTArray<MediaByteRange> ranges;
   nsresult rv = resource->GetCachedRanges(ranges);
   if (NS_FAILED(rv)) {
     return media::TimeIntervals();
   }
--- a/dom/media/webm/WebMDemuxer.h
+++ b/dom/media/webm/WebMDemuxer.h
@@ -80,20 +80,16 @@ public:
   nsresult Reset();
 
   // Pushes a packet to the front of the audio packet queue.
   virtual void PushAudioPacket(NesteggPacketHolder* aItem);
 
   // Pushes a packet to the front of the video packet queue.
   virtual void PushVideoPacket(NesteggPacketHolder* aItem);
 
-  nsresult Read(char* aBuffer, uint32_t aCount, uint32_t * aBytes);
-  nsresult Seek(int32_t aWhence, int64_t aOffset);
-  int64_t Tell();
-
 private:
   friend class WebMTrackDemuxer;
 
   ~WebMDemuxer();
   void Cleanup();
   nsresult InitBufferedState();
   nsresult ReadMetadata();
   void NotifyDataArrived(uint32_t aLength, int64_t aOffset) override;
@@ -108,30 +104,29 @@ private:
   // the particular track have been read. Pass TrackInfo::kVideoTrack or
   // TrackInfo::kVideoTrack to indicate the type of the packet we want to read.
   nsRefPtr<NesteggPacketHolder> NextPacket(TrackInfo::TrackType aType);
 
   // Internal method that demuxes the next packet from the stream. The caller
   // is responsible for making sure it doesn't get lost.
   nsRefPtr<NesteggPacketHolder> DemuxPacket();
 
-  nsRefPtr<MediaResource> mResource;
+  MediaResourceIndex mResource;
   MediaInfo mInfo;
   nsTArray<nsRefPtr<WebMTrackDemuxer>> mDemuxers;
 
   // Parser state and computed offset-time mappings.  Shared by multiple
   // readers when decoder has been cloned.  Main thread only.
   nsRefPtr<WebMBufferedState> mBufferedState;
   nsRefPtr<MediaByteBuffer> mInitData;
 
   // libnestegg context for webm container.
   // Access on reader's thread for main demuxer,
   // or main thread for cloned demuxer
   nestegg* mContext;
-  int64_t mOffset;
 
   // Queue of video and audio packets that have been read but not decoded.
   WebMPacketQueue mVideoPackets;
   WebMPacketQueue mAudioPackets;
 
   // Index of video and audio track to play
   uint32_t mVideoTrack;
   uint32_t mAudioTrack;
--- a/dom/media/webrtc/MediaEngineGonkVideoSource.cpp
+++ b/dom/media/webrtc/MediaEngineGonkVideoSource.cpp
@@ -745,21 +745,21 @@ MediaEngineGonkVideoSource::RotateImage(
     dstHeight = aHeight;
   }
 
   uint32_t half_width = dstWidth / 2;
 
   layers::GrallocImage* videoImage = static_cast<layers::GrallocImage*>(image.get());
   MOZ_ASSERT(mTextureClientAllocator);
   RefPtr<layers::TextureClient> textureClient
-    = mTextureClientAllocator->CreateOrRecycleForDrawing(gfx::SurfaceFormat::YUV,
-                                                         gfx::IntSize(dstWidth, dstHeight),
-                                                         layers::BackendSelector::Content,
-                                                         layers::TextureFlags::DEFAULT,
-                                                         layers::ALLOC_DISALLOW_BUFFERTEXTURECLIENT);
+    = mTextureClientAllocator->CreateOrRecycle(gfx::SurfaceFormat::YUV,
+                                               gfx::IntSize(dstWidth, dstHeight),
+                                               layers::BackendSelector::Content,
+                                               layers::TextureFlags::DEFAULT,
+                                               layers::ALLOC_DISALLOW_BUFFERTEXTURECLIENT);
   if (textureClient) {
     RefPtr<layers::GrallocTextureClientOGL> grallocTextureClient =
       static_cast<layers::GrallocTextureClientOGL*>(textureClient.get());
 
     android::sp<android::GraphicBuffer> destBuffer = grallocTextureClient->GetGraphicBuffer();
 
     void* destMem = nullptr;
     destBuffer->lock(android::GraphicBuffer::USAGE_SW_WRITE_OFTEN, &destMem);
--- a/dom/push/moz.build
+++ b/dom/push/moz.build
@@ -33,13 +33,15 @@ XPCSHELL_TESTS_MANIFESTS += [
 EXPORTS.mozilla.dom += [
     'PushManager.h',
 ]
 
 UNIFIED_SOURCES += [
     'PushManager.cpp',
 ]
 
+FAIL_ON_WARNINGS = True
+
 LOCAL_INCLUDES += [
     '../workers',
 ]
 
 FINAL_LIBRARY = 'xul'
--- a/dom/push/test/xpcshell/test_clearAll_successful.js
+++ b/dom/push/test/xpcshell/test_clearAll_successful.js
@@ -20,17 +20,16 @@ add_task(function* test_unregister_succe
     channelID,
     pushEndpoint: 'https://example.org/update/unregister-success',
     scope: 'https://example.com/page/unregister-success',
     version: 1,
     originAttributes: '',
     quota: Infinity,
   });
 
-  let unregisterDefer = Promise.defer();
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     db,
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
           this.serverSendMsg(JSON.stringify({
--- a/dom/push/test/xpcshell/test_notification_ack.js
+++ b/dom/push/test/xpcshell/test_notification_ack.js
@@ -49,17 +49,18 @@ add_task(function* test_notification_ack
 
   let notifyPromise = Promise.all([
     promiseObserverNotification('push-notification'),
     promiseObserverNotification('push-notification'),
     promiseObserverNotification('push-notification')
   ]);
 
   let acks = 0;
-  let ackDefer = Promise.defer();
+  let ackDone;
+  let ackPromise = new Promise(resolve => ackDone = resolve);
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     db,
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
           equal(request.uaid, userAgentID,
@@ -110,24 +111,24 @@ add_task(function* test_notification_ack
             }], updates, 'Wrong updates for acknowledgement 2');
             break;
 
           case 3:
             deepEqual([{
               channelID: '5477bfda-22db-45d4-9614-fee369630260',
               version: 6
             }], updates, 'Wrong updates for acknowledgement 3');
-            ackDefer.resolve();
+            ackDone();
             break;
 
           default:
             ok(false, 'Unexpected acknowledgement ' + acks);
           }
         }
       });
     }
   });
 
   yield waitForPromise(notifyPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for notifications');
-  yield waitForPromise(ackDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(ackPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for multiple acknowledgements');
 });
--- a/dom/push/test/xpcshell/test_notification_duplicate.js
+++ b/dom/push/test/xpcshell/test_notification_duplicate.js
@@ -36,18 +36,18 @@ add_task(function* test_notification_dup
   }];
   for (let record of records) {
     yield db.put(record);
   }
 
   let notifyPromise = promiseObserverNotification('push-notification');
 
   let acks = 0;
-  let ackDefer = Promise.defer();
-  let ackDone = after(2, ackDefer.resolve);
+  let ackDone;
+  let ackPromise = new Promise(resolve => ackDone = after(2, resolve));
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     db,
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
           this.serverSendMsg(JSON.stringify({
@@ -68,17 +68,17 @@ add_task(function* test_notification_dup
         },
         onACK: ackDone
       });
     }
   });
 
   yield waitForPromise(notifyPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for notifications');
-  yield waitForPromise(ackDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(ackPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for stale acknowledgement');
 
   let staleRecord = yield db.getByKeyID(
     '8d2d9400-3597-4c5a-8a38-c546b0043bcc');
   strictEqual(staleRecord.version, 2, 'Wrong stale record version');
 
   let updatedRecord = yield db.getByKeyID(
     '27d1e393-03ef-4c72-a5e6-9e890dfccad0');
--- a/dom/push/test/xpcshell/test_notification_error.js
+++ b/dom/push/test/xpcshell/test_notification_error.js
@@ -53,18 +53,18 @@ add_task(function* test_notification_err
       (subject, data) => data == 'https://example.com/a'
     ),
     promiseObserverNotification(
       'push-notification',
       (subject, data) => data == 'https://example.com/c'
     )
   ]);
 
-  let ackDefer = Promise.defer();
-  let ackDone = after(records.length, ackDefer.resolve);
+  let ackDone;
+  let ackPromise = new Promise(resolve => ackDone = after(records.length, resolve));
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     db: makeStub(db, {
       getByKeyID(prev, channelID) {
         if (channelID == '3c3930ba-44de-40dc-a7ca-8a133ec1a866') {
           return Promise.reject('splines not reticulated');
         }
@@ -107,17 +107,17 @@ add_task(function* test_notification_err
     'Wrong endpoint for notification A');
   equal(aPush.version, 2, 'Wrong version for notification A');
 
   let cPush = c.subject.QueryInterface(Ci.nsIPushObserverNotification);
   equal(cPush.pushEndpoint, 'https://example.org/update/success-2',
     'Wrong endpoint for notification C');
   equal(cPush.version, 4, 'Wrong version for notification C');
 
-  yield waitForPromise(ackDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(ackPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for acknowledgements');
 
   let aRecord = yield db.getByIdentifiers({scope: 'https://example.com/a',
                                            originAttributes: originAttributes });
   equal(aRecord.channelID, 'f04f1e46-9139-4826-b2d1-9411b0821283',
     'Wrong channel ID for record A');
   strictEqual(aRecord.version, 2,
     'Should return the new version for record A');
--- a/dom/push/test/xpcshell/test_notification_incomplete.js
+++ b/dom/push/test/xpcshell/test_notification_incomplete.js
@@ -52,18 +52,18 @@ add_task(function* test_notification_inc
   for (let record of records) {
     yield db.put(record);
   }
 
   Services.obs.addObserver(function observe(subject, topic, data) {
     ok(false, 'Should not deliver malformed updates');
   }, 'push-notification', false);
 
-  let notificationDefer = Promise.defer();
-  let notificationDone = after(2, notificationDefer.resolve);
+  let notificationDone;
+  let notificationPromise = new Promise(resolve => notificationDone = after(2, resolve));
   let prevHandler = PushServiceWebSocket._handleNotificationReply;
   PushServiceWebSocket._handleNotificationReply = function _handleNotificationReply() {
     notificationDone();
     return prevHandler.apply(this, arguments);
   };
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
@@ -102,17 +102,17 @@ add_task(function* test_notification_inc
         },
         onACK() {
           ok(false, 'Should not acknowledge malformed updates');
         }
       });
     }
   });
 
-  yield waitForPromise(notificationDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(notificationPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for incomplete notifications');
 
   let storeRecords = yield db.getAllKeyIDs();
   storeRecords.sort(({pushEndpoint: a}, {pushEndpoint: b}) =>
     compareAscending(a, b));
   recordsAreEqual(records, storeRecords);
 });
 
--- a/dom/push/test/xpcshell/test_notification_version_string.js
+++ b/dom/push/test/xpcshell/test_notification_version_string.js
@@ -23,17 +23,18 @@ add_task(function* test_notification_ver
     scope: 'https://example.com/page/1',
     originAttributes: '',
     version: 2,
     quota: Infinity,
   });
 
   let notifyPromise = promiseObserverNotification('push-notification');
 
-  let ackDefer = Promise.defer();
+  let ackDone;
+  let ackPromise = new Promise(resolve => ackDone = resolve);
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     db,
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
           this.serverSendMsg(JSON.stringify({
@@ -44,32 +45,32 @@ add_task(function* test_notification_ver
           this.serverSendMsg(JSON.stringify({
             messageType: 'notification',
             updates: [{
               channelID: '6ff97d56-d0c0-43bc-8f5b-61b855e1d93b',
               version: '4'
             }]
           }));
         },
-        onACK: ackDefer.resolve
+        onACK: ackDone
       });
     }
   });
 
   let {subject: notification, data: scope} = yield waitForPromise(
     notifyPromise,
     DEFAULT_TIMEOUT,
     'Timed out waiting for string notification'
   );
   let message = notification.QueryInterface(Ci.nsIPushObserverNotification);
   equal(scope, 'https://example.com/page/1', 'Wrong scope');
   equal(message.pushEndpoint, 'https://example.org/updates/1',
     'Wrong push endpoint');
   strictEqual(message.version, 4, 'Wrong version');
 
-  yield waitForPromise(ackDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(ackPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for string acknowledgement');
 
   let storeRecord = yield db.getByKeyID(
     '6ff97d56-d0c0-43bc-8f5b-61b855e1d93b');
   strictEqual(storeRecord.version, 4, 'Wrong record version');
   equal(storeRecord.quota, Infinity, 'Wrong quota');
 });
--- a/dom/push/test/xpcshell/test_quota_exceeded.js
+++ b/dom/push/test/xpcshell/test_quota_exceeded.js
@@ -80,17 +80,19 @@ add_task(function* test_expiration_origi
   // and 1 on the `deals` channel. They're from the same origin, but
   // different scopes, so each can send 5 notifications before we remove
   // their subscription.
   let updates = 0;
   let notifyPromise = promiseObserverNotification('push-notification', (subject, data) => {
     updates++;
     return updates == 6;
   });
-  let unregisterDefer = Promise.defer();
+
+  let unregisterDone;
+  let unregisterPromise = new Promise(resolve => unregisterDone = resolve);
 
   PushService.init({
     serverURI: 'wss://push.example.org/',
     networkInfo: new MockDesktopNetworkInfo(),
     db,
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
@@ -122,26 +124,26 @@ add_task(function* test_expiration_origi
             updates: [{
               channelID: '46cc6f6a-c106-4ffa-bb7c-55c60bd50c41',
               version: 1,
             }],
           }));
         },
         onUnregister(request) {
           equal(request.channelID, 'eb33fc90-c883-4267-b5cb-613969e8e349', 'Unregistered wrong channel ID');
-          unregisterDefer.resolve();
+          unregisterDone();
         },
         // We expect to receive acks, but don't care about their
         // contents.
         onACK(request) {},
       });
     },
   });
 
-  yield waitForPromise(unregisterDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for unregister request');
 
   yield waitForPromise(notifyPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for notifications');
 
   let expiredRecord = yield db.getByKeyID('eb33fc90-c883-4267-b5cb-613969e8e349');
   strictEqual(expiredRecord.quota, 0, 'Expired record not updated');
 });
--- a/dom/push/test/xpcshell/test_quota_observer.js
+++ b/dom/push/test/xpcshell/test_quota_observer.js
@@ -47,17 +47,18 @@ add_task(function* test_expiration_histo
     uri: 'https://example.com/infrequent',
     title: 'Infrequently-visited page',
     visits: [{
       visitDate: (Date.now() - 14 * 24 * 60 * 60 * 1000) * 1000,
       transitionType: Ci.nsINavHistoryService.TRANSITION_LINK,
     }],
   });
 
-  let unregisterDefer = Promise.defer();
+  let unregisterDone;
+  let unregisterPromise = new Promise(resolve => unregisterDone = resolve);
 
   PushService.init({
     serverURI: 'wss://push.example.org/',
     networkInfo: new MockDesktopNetworkInfo(),
     db,
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
@@ -74,24 +75,24 @@ add_task(function* test_expiration_histo
             updates: [{
               channelID: '379c0668-8323-44d2-a315-4ee83f1a9ee9',
               version: 2,
             }],
           }));
         },
         onUnregister(request) {
           equal(request.channelID, '379c0668-8323-44d2-a315-4ee83f1a9ee9', 'Dropped wrong channel ID');
-          unregisterDefer.resolve();
+          unregisterDone();
         },
         onACK(request) {},
       });
     }
   });
 
-  yield waitForPromise(unregisterDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for unregister request');
 
   let expiredRecord = yield db.getByKeyID('379c0668-8323-44d2-a315-4ee83f1a9ee9');
   strictEqual(expiredRecord.quota, 0, 'Expired record not updated');
 
   let notifiedScopes = [];
   let subChangePromise = promiseObserverNotification('push-subscription-change', (subject, data) => {
     notifiedScopes.push(data);
--- a/dom/push/test/xpcshell/test_register_flush.js
+++ b/dom/push/test/xpcshell/test_register_flush.js
@@ -32,18 +32,18 @@ add_task(function* test_register_flush()
     originAttributes: '',
     version: 2,
     quota: Infinity,
   };
   yield db.put(record);
 
   let notifyPromise = promiseObserverNotification('push-notification');
 
-  let ackDefer = Promise.defer();
-  let ackDone = after(2, ackDefer.resolve);
+  let ackDone;
+  let ackPromise = new Promise(resolve => ackDone = after(2, resolve));
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     db,
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
           this.serverSendMsg(JSON.stringify({
@@ -82,17 +82,17 @@ add_task(function* test_register_flush()
     'Wrong push endpoint in record');
   equal(newRecord.scope, 'https://example.com/page/2',
     'Wrong scope in record');
 
   let {data: scope} = yield waitForPromise(notifyPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for notification');
   equal(scope, 'https://example.com/page/1', 'Wrong notification scope');
 
-  yield waitForPromise(ackDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(ackPromise, DEFAULT_TIMEOUT,
      'Timed out waiting for acknowledgements');
 
   let prevRecord = yield db.getByKeyID(
     '9bcc7efb-86c7-4457-93ea-e24e6eb59b74');
   equal(prevRecord.pushEndpoint, 'https://example.org/update/1',
     'Wrong existing push endpoint');
   strictEqual(prevRecord.version, 3,
     'Should record version updates sent before register responses');
--- a/dom/push/test/xpcshell/test_register_invalid_json.js
+++ b/dom/push/test/xpcshell/test_register_invalid_json.js
@@ -16,18 +16,18 @@ function run_test() {
   });
   disableServiceWorkerEvents(
     'https://example.net/page/invalid-json'
   );
   run_next_test();
 }
 
 add_task(function* test_register_invalid_json() {
-  let helloDefer = Promise.defer();
-  let helloDone = after(2, helloDefer.resolve);
+  let helloDone;
+  let helloPromise = new Promise(resolve => helloDone = after(2, resolve));
   let registers = 0;
 
   PushServiceWebSocket._generateID = () => channelID;
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
@@ -52,12 +52,12 @@ add_task(function* test_register_invalid
     PushNotificationService.register('https://example.net/page/invalid-json',
       ChromeUtils.originAttributesToSuffix({ appId: Ci.nsIScriptSecurityManager.NO_APP_ID, inBrowser: false })),
     function(error) {
       return error == 'TimeoutError';
     },
     'Wrong error for invalid JSON response'
   );
 
-  yield waitForPromise(helloDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(helloPromise, DEFAULT_TIMEOUT,
     'Reconnect after invalid JSON response timed out');
   equal(registers, 1, 'Wrong register count');
 });
--- a/dom/push/test/xpcshell/test_register_no_id.js
+++ b/dom/push/test/xpcshell/test_register_no_id.js
@@ -18,18 +18,18 @@ function run_test() {
   disableServiceWorkerEvents(
     'https://example.com/incomplete'
   );
   run_next_test();
 }
 
 add_task(function* test_register_no_id() {
   let registers = 0;
-  let helloDefer = Promise.defer();
-  let helloDone = after(2, helloDefer.resolve);
+  let helloDone;
+  let helloPromise = new Promise(resolve => helloDone = after(2, resolve));
 
   PushServiceWebSocket._generateID = () => channelID;
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
@@ -56,12 +56,12 @@ add_task(function* test_register_no_id()
     PushNotificationService.register('https://example.com/incomplete',
       ChromeUtils.originAttributesToSuffix({ appId: Ci.nsIScriptSecurityManager.NO_APP_ID, inBrowser: false })),
     function(error) {
       return error == 'TimeoutError';
     },
     'Wrong error for incomplete register response'
   );
 
-  yield waitForPromise(helloDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(helloPromise, DEFAULT_TIMEOUT,
     'Reconnect after incomplete register response timed out');
   equal(registers, 1, 'Wrong register count');
 });
--- a/dom/push/test/xpcshell/test_register_request_queue.js
+++ b/dom/push/test/xpcshell/test_register_request_queue.js
@@ -16,25 +16,26 @@ function run_test() {
   );
   run_next_test();
 }
 
 add_task(function* test_register_request_queue() {
   let db = PushServiceWebSocket.newPushDB();
   do_register_cleanup(() => {return db.drop().then(_ => db.close());});
 
-  let helloDefer = Promise.defer();
-  let onHello = after(2, function onHello(request) {
+  let onHello;
+  let helloPromise = new Promise(resolve => onHello = after(2, function onHello(request) {
     this.serverSendMsg(JSON.stringify({
       messageType: 'hello',
       status: 200,
       uaid: '54b08a9e-59c6-4ed7-bb54-f4fd60d6f606'
     }));
-    helloDefer.resolve();
-  });
+    resolve();
+  }));
+
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     db,
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello,
         onRegister() {
@@ -57,11 +58,11 @@ add_task(function* test_register_request
     rejects(firstRegister, function(error) {
       return error == 'TimeoutError';
     }, 'Should time out the first request'),
     rejects(secondRegister, function(error) {
       return error == 'TimeoutError';
     }, 'Should time out the second request')
   ]), DEFAULT_TIMEOUT, 'Queued requests did not time out');
 
-  yield waitForPromise(helloDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(helloPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for reconnect');
 });
--- a/dom/push/test/xpcshell/test_register_rollback.js
+++ b/dom/push/test/xpcshell/test_register_rollback.js
@@ -22,17 +22,18 @@ function run_test() {
 }
 
 add_task(function* test_register_rollback() {
   let db = PushServiceWebSocket.newPushDB();
   do_register_cleanup(() => {return db.drop().then(_ => db.close());});
 
   let handshakes = 0;
   let registers = 0;
-  let unregisterDefer = Promise.defer();
+  let unregisterDone;
+  let unregisterPromise = new Promise(resolve => unregisterDone = resolve);
   PushServiceWebSocket._generateID = () => channelID;
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     db: makeStub(db, {
       put(prev, record) {
         return Promise.reject('universe has imploded');
       }
@@ -61,30 +62,30 @@ add_task(function* test_register_rollbac
         },
         onUnregister(request) {
           equal(request.channelID, channelID, 'Unregister: wrong channel ID');
           this.serverSendMsg(JSON.stringify({
             messageType: 'unregister',
             status: 200,
             channelID
           }));
-          unregisterDefer.resolve();
+          unregisterDone();
         }
       });
     }
   });
 
   // Should return a rejected promise if storage fails.
   yield rejects(
     PushNotificationService.register('https://example.com/storage-error',
       ChromeUtils.originAttributesToSuffix({ appId: Ci.nsIScriptSecurityManager.NO_APP_ID, inBrowser: false })),
     function(error) {
       return error == 'universe has imploded';
     },
     'Wrong error for unregister database failure'
   );
 
   // Should send an out-of-band unregister request.
-  yield waitForPromise(unregisterDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
     'Unregister request timed out');
   equal(handshakes, 1, 'Wrong handshake count');
   equal(registers, 1, 'Wrong register count');
 });
--- a/dom/push/test/xpcshell/test_register_timeout.js
+++ b/dom/push/test/xpcshell/test_register_timeout.js
@@ -17,17 +17,18 @@ function run_test() {
   disableServiceWorkerEvents(
     'https://example.net/page/timeout'
   );
   run_next_test();
 }
 
 add_task(function* test_register_timeout() {
   let handshakes = 0;
-  let timeoutDefer = Promise.defer();
+  let timeoutDone;
+  let timeoutPromise = new Promise(resolve => timeoutDone = resolve);
   let registers = 0;
 
   let db = PushServiceWebSocket.newPushDB();
   do_register_cleanup(() => {return db.drop().then(_ => db.close());});
 
   PushServiceWebSocket._generateID = () => channelID;
   PushService.init({
     serverURI: "wss://push.example.org/",
@@ -69,17 +70,17 @@ add_task(function* test_register_timeout
             // Should ignore replies for timed-out requests.
             this.serverSendMsg(JSON.stringify({
               messageType: 'register',
               status: 200,
               channelID: channelID,
               uaid: userAgentID,
               pushEndpoint: 'https://example.com/update/timeout',
             }));
-            timeoutDefer.resolve();
+            timeoutDone();
           }, 2000);
           registers++;
         }
       });
     }
   });
 
   yield rejects(
@@ -90,14 +91,14 @@ add_task(function* test_register_timeout
     },
     'Wrong error for request timeout'
   );
 
   let record = yield db.getByKeyID(channelID);
   ok(!record, 'Should not store records for timed-out responses');
 
   yield waitForPromise(
-    timeoutDefer.promise,
+    timeoutPromise,
     DEFAULT_TIMEOUT,
     'Reconnect timed out'
   );
   equal(registers, 1, 'Should not handle timed-out register requests');
 });
--- a/dom/push/test/xpcshell/test_register_wrong_id.js
+++ b/dom/push/test/xpcshell/test_register_wrong_id.js
@@ -20,18 +20,18 @@ function run_test() {
     'https://example.com/mismatched'
   );
   run_next_test();
 }
 
 add_task(function* test_register_wrong_id() {
   // Should reconnect after the register request times out.
   let registers = 0;
-  let helloDefer = Promise.defer();
-  let helloDone = after(2, helloDefer.resolve);
+  let helloDone;
+  let helloPromise = new Promise(resolve => helloDone = after(2, resolve));
 
   PushServiceWebSocket._generateID = () => clientChannelID;
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
@@ -62,12 +62,12 @@ add_task(function* test_register_wrong_i
     PushNotificationService.register('https://example.com/mismatched',
       ChromeUtils.originAttributesToSuffix({ appId: Ci.nsIScriptSecurityManager.NO_APP_ID, inBrowser: false })),
     function(error) {
       return error == 'TimeoutError';
     },
     'Wrong error for mismatched register reply'
   );
 
-  yield waitForPromise(helloDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(helloPromise, DEFAULT_TIMEOUT,
     'Reconnect after mismatched register reply timed out');
   equal(registers, 1, 'Wrong register count');
 });
--- a/dom/push/test/xpcshell/test_register_wrong_type.js
+++ b/dom/push/test/xpcshell/test_register_wrong_type.js
@@ -16,18 +16,18 @@ function run_test() {
   disableServiceWorkerEvents(
     'https://example.com/mistyped'
   );
   run_next_test();
 }
 
 add_task(function* test_register_wrong_type() {
   let registers = 0;
-  let helloDefer = Promise.defer();
-  let helloDone = after(2, helloDefer.resolve);
+  let helloDone;
+  let helloPromise = new Promise(resolve => helloDone = after(2, resolve));
 
   PushService._generateID = () => '1234';
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
@@ -58,12 +58,12 @@ add_task(function* test_register_wrong_t
     PushNotificationService.register('https://example.com/mistyped',
       ChromeUtils.originAttributesToSuffix({ appId: Ci.nsIScriptSecurityManager.NO_APP_ID, inBrowser: false })),
     function(error) {
       return error == 'TimeoutError';
     },
     'Wrong error for non-string channel ID'
   );
 
-  yield waitForPromise(helloDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(helloPromise, DEFAULT_TIMEOUT,
     'Reconnect after sending non-string channel ID timed out');
   equal(registers, 1, 'Wrong register count');
 });
--- a/dom/push/test/xpcshell/test_registration_success.js
+++ b/dom/push/test/xpcshell/test_registration_success.js
@@ -37,17 +37,18 @@ add_task(function* test_registration_suc
     originAttributes: ChromeUtils.originAttributesToSuffix({ appId: 42, inBrowser: true }),
     version: 15,
     quota: Infinity,
   }];
   for (let record of records) {
     yield db.put(record);
   }
 
-  let handshakeDefer = Promise.defer();
+  let handshakeDone;
+  let handshakePromise = new Promise(resolve => handshakeDone = resolve);
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
           equal(request.uaid, userAgentID, 'Wrong device ID in handshake');
           deepEqual(request.channelIDs.sort(), [
@@ -55,24 +56,24 @@ add_task(function* test_registration_suc
             'bf001fe0-2684-42f2-bc4d-a3e14b11dd5b',
             'f6edfbcd-79d6-49b8-9766-48b9dcfeff0f',
           ], 'Wrong channel list in handshake');
           this.serverSendMsg(JSON.stringify({
             messageType: 'hello',
             status: 200,
             uaid: userAgentID
           }));
-          handshakeDefer.resolve();
+          handshakeDone();
         }
       });
     }
   });
 
   yield waitForPromise(
-    handshakeDefer.promise,
+    handshakePromise,
     DEFAULT_TIMEOUT,
     'Timed out waiting for handshake'
   );
 
   let registration = yield PushNotificationService.registration(
     'https://example.net/a', '');
   equal(
     registration.pushEndpoint,
--- a/dom/push/test/xpcshell/test_unregister_error.js
+++ b/dom/push/test/xpcshell/test_unregister_error.js
@@ -20,17 +20,18 @@ add_task(function* test_unregister_error
     channelID: channelID,
     pushEndpoint: 'https://example.org/update/failure',
     scope: 'https://example.net/page/failure',
     originAttributes: '',
     version: 1,
     quota: Infinity,
   });
 
-  let unregisterDefer = Promise.defer();
+  let unregisterDone;
+  let unregisterPromise = new Promise(resolve => unregisterDone = resolve);
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     db,
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
           this.serverSendMsg(JSON.stringify({
@@ -44,24 +45,24 @@ add_task(function* test_unregister_error
           // any failures are swallowed.
           equal(request.channelID, channelID, 'Unregister: wrong channel ID');
           this.serverSendMsg(JSON.stringify({
             messageType: 'unregister',
             status: 500,
             error: 'omg, everything is exploding',
             channelID
           }));
-          unregisterDefer.resolve();
+          unregisterDone();
         }
       });
     }
   });
 
   yield PushNotificationService.unregister(
     'https://example.net/page/failure', '');
 
   let result = yield db.getByKeyID(channelID);
   ok(!result, 'Deleted push record exists');
 
   // Make sure we send a request to the server.
-  yield waitForPromise(unregisterDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for unregister');
 });
--- a/dom/push/test/xpcshell/test_unregister_invalid_json.js
+++ b/dom/push/test/xpcshell/test_unregister_invalid_json.js
@@ -34,18 +34,18 @@ add_task(function* test_unregister_inval
     originAttributes: '',
     version: 1,
     quota: Infinity,
   }];
   for (let record of records) {
     yield db.put(record);
   }
 
-  let unregisterDefer = Promise.defer();
-  let unregisterDone = after(2, unregisterDefer.resolve);
+  let unregisterDone;
+  let unregisterPromise = new Promise(resolve => unregisterDone = after(2, resolve));
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     db,
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
           this.serverSendMsg(JSON.stringify({
@@ -72,11 +72,11 @@ add_task(function* test_unregister_inval
 
   yield PushNotificationService.unregister(
     'https://example.net/page/1', '');
   record = yield db.getByKeyID(
     '057caa8f-9b99-47ff-891c-adad18ce603e');
   ok(!record,
     'Failed to delete unregistered record after receiving invalid JSON');
 
-  yield waitForPromise(unregisterDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for unregister');
 });
--- a/dom/push/test/xpcshell/test_unregister_success.js
+++ b/dom/push/test/xpcshell/test_unregister_success.js
@@ -20,17 +20,18 @@ add_task(function* test_unregister_succe
     channelID,
     pushEndpoint: 'https://example.org/update/unregister-success',
     scope: 'https://example.com/page/unregister-success',
     originAttributes: '',
     version: 1,
     quota: Infinity,
   });
 
-  let unregisterDefer = Promise.defer();
+  let unregisterDone;
+  let unregisterPromise = new Promise(resolve => unregisterDone = resolve);
   PushService.init({
     serverURI: "wss://push.example.org/",
     networkInfo: new MockDesktopNetworkInfo(),
     db,
     makeWebSocket(uri) {
       return new MockWebSocket(uri, {
         onHello(request) {
           this.serverSendMsg(JSON.stringify({
@@ -41,22 +42,22 @@ add_task(function* test_unregister_succe
         },
         onUnregister(request) {
           equal(request.channelID, channelID, 'Should include the channel ID');
           this.serverSendMsg(JSON.stringify({
             messageType: 'unregister',
             status: 200,
             channelID
           }));
-          unregisterDefer.resolve();
+          unregisterDone();
         }
       });
     }
   });
 
   yield PushNotificationService.unregister(
     'https://example.com/page/unregister-success', '');
   let record = yield db.getByKeyID(channelID);
   ok(!record, 'Unregister did not remove record');
 
-  yield waitForPromise(unregisterDefer.promise, DEFAULT_TIMEOUT,
+  yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
     'Timed out waiting for unregister');
 });
--- a/gfx/gl/GLContext.cpp
+++ b/gfx/gl/GLContext.cpp
@@ -25,16 +25,17 @@
 #include "prenv.h"
 #include "prlink.h"
 #include "ScopedGLHelpers.h"
 #include "SharedSurfaceGL.h"
 #include "GfxTexturesReporter.h"
 #include "TextureGarbageBin.h"
 #include "gfx2DGlue.h"
 #include "gfxPrefs.h"
+#include "DriverCrashGuard.h"
 #include "mozilla/IntegerPrintfMacros.h"
 
 #include "OGLShaderProgram.h" // for ShaderProgramType
 
 #include "mozilla/DebugOnly.h"
 
 #ifdef XP_MACOSX
 #include <CoreServices/CoreServices.h>
@@ -363,16 +364,21 @@ GLContext::InitWithPrefix(const char *pr
 {
     ScopedGfxFeatureReporter reporter("GL Context");
 
     if (mInitialized) {
         reporter.SetSuccessful();
         return true;
     }
 
+    GLContextCrashGuard crashGuard;
+    if (crashGuard.Crashed()) {
+        return false;
+    }
+
     mWorkAroundDriverBugs = gfxPrefs::WorkAroundDriverBugs();
 
     SymLoadStruct symbols[] = {
         { (PRFuncPtr*) &mSymbols.fActiveTexture, { "ActiveTexture", "ActiveTextureARB", nullptr } },
         { (PRFuncPtr*) &mSymbols.fAttachShader, { "AttachShader", "AttachShaderARB", nullptr } },
         { (PRFuncPtr*) &mSymbols.fBindAttribLocation, { "BindAttribLocation", "BindAttribLocationARB", nullptr } },
         { (PRFuncPtr*) &mSymbols.fBindBuffer, { "BindBuffer", "BindBufferARB", nullptr } },
         { (PRFuncPtr*) &mSymbols.fBindTexture, { "BindTexture", "BindTextureARB", nullptr } },
--- a/gfx/layers/D3D9SurfaceImage.cpp
+++ b/gfx/layers/D3D9SurfaceImage.cpp
@@ -3,87 +3,33 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "D3D9SurfaceImage.h"
 #include "gfx2DGlue.h"
 #include "mozilla/layers/TextureD3D9.h"
 #include "mozilla/layers/CompositableClient.h"
 #include "mozilla/layers/CompositableForwarder.h"
+#include "mozilla/layers/ImageBridgeChild.h"
 #include "mozilla/gfx/Types.h"
 
 namespace mozilla {
 namespace layers {
 
 
 D3D9SurfaceImage::D3D9SurfaceImage()
   : Image(nullptr, ImageFormat::D3D9_RGB32_TEXTURE)
   , mSize(0, 0)
   , mValid(false)
 {}
 
 D3D9SurfaceImage::~D3D9SurfaceImage()
 {
-  if (mTexture) {
-    gfxWindowsPlatform::sD3D9SurfaceImageUsed -= mSize.width * mSize.height * 4;
-  }
 }
 
-static const GUID sD3D9TextureUsage =
-{ 0x631e1338, 0xdc22, 0x497f, { 0xa1, 0xa8, 0xb4, 0xfe, 0x3a, 0xf4, 0x13, 0x4d } };
-
-/* This class get's it's lifetime tied to a D3D texture
- * and increments memory usage on construction and decrements
- * on destruction */
-class TextureMemoryMeasurer9 : public IUnknown
-{
-public:
-  TextureMemoryMeasurer9(size_t aMemoryUsed)
-  {
-    mMemoryUsed = aMemoryUsed;
-    gfxWindowsPlatform::sD3D9MemoryUsed += mMemoryUsed;
-    mRefCnt = 0;
-  }
-  ~TextureMemoryMeasurer9()
-  {
-    gfxWindowsPlatform::sD3D9MemoryUsed -= mMemoryUsed;
-  }
-  STDMETHODIMP_(ULONG) AddRef() {
-    mRefCnt++;
-    return mRefCnt;
-  }
-  STDMETHODIMP QueryInterface(REFIID riid,
-                              void **ppvObject)
-  {
-    IUnknown *punk = nullptr;
-    if (riid == IID_IUnknown) {
-      punk = this;
-    }
-    *ppvObject = punk;
-    if (punk) {
-      punk->AddRef();
-      return S_OK;
-    } else {
-      return E_NOINTERFACE;
-    }
-  }
-
-  STDMETHODIMP_(ULONG) Release() {
-    int refCnt = --mRefCnt;
-    if (refCnt == 0) {
-      delete this;
-    }
-    return refCnt;
-  }
-private:
-  int mRefCnt;
-  int mMemoryUsed;
-};
-
-
 HRESULT
 D3D9SurfaceImage::SetData(const Data& aData)
 {
   NS_ENSURE_TRUE(aData.mSurface, E_POINTER);
   HRESULT hr;
   RefPtr<IDirect3DSurface9> surface = aData.mSurface;
 
   RefPtr<IDirect3DDevice9> device;
@@ -103,57 +49,44 @@ D3D9SurfaceImage::SetData(const Data& aD
                                          desc.Format,
                                          D3DFMT_X8R8G8B8);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   // DXVA surfaces aren't created sharable, so we need to copy the surface
   // to a sharable texture to that it's accessible to the layer manager's
   // device.
   const gfx::IntRect& region = aData.mRegion;
-  RefPtr<IDirect3DTexture9> texture;
-  HANDLE shareHandle = nullptr;
-  hr = device->CreateTexture(region.width,
-                             region.height,
-                             1,
-                             D3DUSAGE_RENDERTARGET,
-                             D3DFMT_X8R8G8B8,
-                             D3DPOOL_DEFAULT,
-                             byRef(texture),
-                             &shareHandle);
-  NS_ENSURE_TRUE(SUCCEEDED(hr) && shareHandle, hr);
-
-  // Track the lifetime of this memory
-  texture->SetPrivateData(sD3D9TextureUsage, new TextureMemoryMeasurer9(region.width * region.height * 4), sizeof(IUnknown *), D3DSPD_IUNKNOWN);
-
-  gfxWindowsPlatform::sD3D9SurfaceImageUsed += region.width * region.height * 4;
+  RefPtr<SharedTextureClientD3D9> textureClient =
+    aData.mAllocator->CreateOrRecycleClient(gfx::SurfaceFormat::B8G8R8X8,
+                                            region.Size());
+  if (!textureClient) {
+    return E_FAIL;
+  }
 
   // Copy the image onto the texture, preforming YUV -> RGB conversion if necessary.
-  RefPtr<IDirect3DSurface9> textureSurface;
-  hr = texture->GetSurfaceLevel(0, byRef(textureSurface));
-  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
-  // Stash the surface description for later use.
-  textureSurface->GetDesc(&mDesc);
+  RefPtr<IDirect3DSurface9> textureSurface = textureClient->GetD3D9Surface();
+  if (!textureSurface) {
+    return E_FAIL;
+  }
 
   RECT src = { region.x, region.y, region.x+region.width, region.y+region.height };
   hr = device->StretchRect(surface, &src, textureSurface, nullptr, D3DTEXF_NONE);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   // Flush the draw command now, so that by the time we come to draw this
   // image, we're less likely to need to wait for the draw operation to
   // complete.
   RefPtr<IDirect3DQuery9> query;
   hr = device->CreateQuery(D3DQUERYTYPE_EVENT, byRef(query));
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
   hr = query->Issue(D3DISSUE_END);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
-  mTexture = texture;
-  mShareHandle = shareHandle;
-  mSize = gfx::IntSize(region.width, region.height);
+  mTextureClient = textureClient;
+  mSize = region.Size();
   mQuery = query;
 
   return S_OK;
 }
 
 bool
 D3D9SurfaceImage::IsValid()
 {
@@ -183,67 +116,63 @@ D3D9SurfaceImage::EnsureSynchronized()
     break;
   }
   mQuery = nullptr;
 }
 
 const D3DSURFACE_DESC&
 D3D9SurfaceImage::GetDesc() const
 {
-  return mDesc;
+  return mTextureClient->GetDesc();
 }
 
 gfx::IntSize
 D3D9SurfaceImage::GetSize()
 {
   return mSize;
 }
 
 TextureClient*
 D3D9SurfaceImage::GetTextureClient(CompositableClient* aClient)
 {
+  MOZ_ASSERT(mTextureClient);
+  MOZ_ASSERT(mTextureClient->GetAllocator() == aClient->GetForwarder());
   EnsureSynchronized();
-  if (!mTextureClient) {
-    mTextureClient = SharedTextureClientD3D9::Create(aClient->GetForwarder(),
-                                                     gfx::SurfaceFormat::B8G8R8X8,
-                                                     TextureFlags::DEFAULT,
-                                                     mTexture,
-                                                     mShareHandle,
-                                                     mDesc);
-  }
   return mTextureClient;
 }
 
 already_AddRefed<gfx::SourceSurface>
 D3D9SurfaceImage::GetAsSourceSurface()
 {
-  NS_ENSURE_TRUE(mTexture, nullptr);
+  NS_ENSURE_TRUE(mTextureClient, nullptr);
 
   HRESULT hr;
   RefPtr<gfx::DataSourceSurface> surface = gfx::Factory::CreateDataSourceSurface(mSize, gfx::SurfaceFormat::B8G8R8X8);
   if (NS_WARN_IF(!surface)) {
     return nullptr;
   }
 
   // Ensure that the texture is ready to be used.
   EnsureSynchronized();
 
   // Readback the texture from GPU memory into system memory, so that
   // we can copy it into the Cairo image. This is expensive.
-  RefPtr<IDirect3DSurface9> textureSurface;
-  hr = mTexture->GetSurfaceLevel(0, byRef(textureSurface));
-  NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
+  RefPtr<IDirect3DSurface9> textureSurface = mTextureClient->GetD3D9Surface();
+  if (!textureSurface) {
+    return nullptr;
+  }
 
-  RefPtr<IDirect3DDevice9> device;
-  hr = mTexture->GetDevice(byRef(device));
-  NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
+  RefPtr<IDirect3DDevice9> device = mTextureClient->GetD3D9Device();
+  if (!device) {
+    return nullptr;
+  }
 
   RefPtr<IDirect3DSurface9> systemMemorySurface;
-  hr = device->CreateOffscreenPlainSurface(mDesc.Width,
-                                           mDesc.Height,
+  hr = device->CreateOffscreenPlainSurface(mSize.width,
+                                           mSize.height,
                                            D3DFMT_X8R8G8B8,
                                            D3DPOOL_SYSTEMMEM,
                                            byRef(systemMemorySurface),
                                            0);
   NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
 
   hr = device->GetRenderTargetData(textureSurface, systemMemorySurface);
   NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
@@ -267,10 +196,41 @@ D3D9SurfaceImage::GetAsSourceSurface()
   }
 
   systemMemorySurface->UnlockRect();
   surface->Unmap();
 
   return surface.forget();
 }
 
+already_AddRefed<TextureClient>
+D3D9RecycleAllocator::Allocate(gfx::SurfaceFormat aFormat,
+                               gfx::IntSize aSize,
+                               BackendSelector aSelector,
+                               TextureFlags aTextureFlags,
+                               TextureAllocationFlags aAllocFlags)
+{
+  return SharedTextureClientD3D9::Create(mSurfaceAllocator,
+                                         aFormat,
+                                         aTextureFlags,
+                                         mDevice,
+                                         aSize);
+}
+
+already_AddRefed<SharedTextureClientD3D9>
+D3D9RecycleAllocator::CreateOrRecycleClient(gfx::SurfaceFormat aFormat,
+                                            const gfx::IntSize& aSize)
+{
+  RefPtr<TextureClient> textureClient =
+    CreateOrRecycle(aFormat,
+                    aSize,
+                    BackendSelector::Content,
+                    layers::TextureFlags::DEFAULT);
+  if (!textureClient) {
+    return nullptr;
+  }
+
+  RefPtr<SharedTextureClientD3D9> textureD3D9 = static_cast<SharedTextureClientD3D9*>(textureClient.get());
+  return textureD3D9.forget();
+}
+
 } // namespace layers
 } // namespace mozilla
--- a/gfx/layers/D3D9SurfaceImage.h
+++ b/gfx/layers/D3D9SurfaceImage.h
@@ -5,32 +5,66 @@
 
 #ifndef GFX_D3DSURFACEIMAGE_H
 #define GFX_D3DSURFACEIMAGE_H
 
 #include "mozilla/RefPtr.h"
 #include "ImageContainer.h"
 #include "nsAutoPtr.h"
 #include "d3d9.h"
+#include "mozilla/layers/TextureClientRecycleAllocator.h"
 
 namespace mozilla {
 namespace layers {
 
+class SharedTextureClientD3D9;
+
+class D3D9RecycleAllocator : public TextureClientRecycleAllocator
+{
+public:
+  explicit D3D9RecycleAllocator(ISurfaceAllocator* aAllocator,
+                                IDirect3DDevice9* aDevice)
+    : TextureClientRecycleAllocator(aAllocator)
+    , mDevice(aDevice)
+  {}
+
+  already_AddRefed<SharedTextureClientD3D9>
+  CreateOrRecycleClient(gfx::SurfaceFormat aFormat,
+                        const gfx::IntSize& aSize);
+
+protected:
+  virtual already_AddRefed<TextureClient>
+  Allocate(gfx::SurfaceFormat aFormat,
+           gfx::IntSize aSize,
+           BackendSelector aSelector,
+           TextureFlags aTextureFlags,
+           TextureAllocationFlags aAllocFlags) override;
+
+  RefPtr<IDirect3DDevice9> mDevice;
+};
+
 // Image class that wraps a IDirect3DSurface9. This class copies the image
 // passed into SetData(), so that it can be accessed from other D3D devices.
 // This class also manages the synchronization of the copy, to ensure the
 // resource is ready to use.
 class D3D9SurfaceImage : public Image {
 public:
 
   struct Data {
-    Data(IDirect3DSurface9* aSurface, const gfx::IntRect& aRegion)
-      : mSurface(aSurface), mRegion(aRegion) {}
+    Data(IDirect3DSurface9* aSurface,
+         const gfx::IntRect& aRegion,
+         D3D9RecycleAllocator* aAllocator)
+      : mSurface(aSurface)
+      , mRegion(aRegion)
+      , mAllocator(aAllocator)
+    {}
+
     RefPtr<IDirect3DSurface9> mSurface;
     gfx::IntRect mRegion;
+    RefPtr<D3D9RecycleAllocator> mAllocator;
   };
 
   D3D9SurfaceImage();
   virtual ~D3D9SurfaceImage();
 
   // Copies the surface into a sharable texture's surface, and initializes
   // the image.
   HRESULT SetData(const Data& aData);
@@ -48,20 +82,17 @@ public:
 
 private:
 
   // Blocks the calling thread until the copy operation started in SetData()
   // is complete, whereupon the texture is safe to use.
   void EnsureSynchronized();
 
   gfx::IntSize mSize;
-  RefPtr<IDirect3DTexture9> mTexture;
   RefPtr<IDirect3DQuery9> mQuery;
-  RefPtr<TextureClient> mTextureClient;
-  HANDLE mShareHandle;
-  D3DSURFACE_DESC mDesc;
+  RefPtr<SharedTextureClientD3D9> mTextureClient;
   bool mValid;
 };
 
 } // namepace layers
 } // namespace mozilla
 
 #endif // GFX_D3DSURFACEIMAGE_H
--- a/gfx/layers/ImageContainer.cpp
+++ b/gfx/layers/ImageContainer.cpp
@@ -616,20 +616,20 @@ CairoImage::GetTextureClient(Compositabl
 
 // XXX only gonk ensure when TextureClient is recycled,
 // TextureHost is not used by CompositableHost.
 #ifdef MOZ_WIDGET_GONK
   RefPtr<TextureClientRecycleAllocator> recycler =
     aClient->GetTextureClientRecycler();
   if (recycler) {
     textureClient =
-      recycler->CreateOrRecycleForDrawing(surface->GetFormat(),
-                                          surface->GetSize(),
-                                          BackendSelector::Content,
-                                          aClient->GetTextureFlags());
+      recycler->CreateOrRecycle(surface->GetFormat(),
+                                surface->GetSize(),
+                                BackendSelector::Content,
+                                aClient->GetTextureFlags());
   }
 #endif
 
 #endif
   if (!textureClient) {
     // gfx::BackendType::NONE means default to content backend
     textureClient = aClient->CreateTextureClientForDrawing(surface->GetFormat(),
                                                            surface->GetSize(),
--- a/gfx/layers/ImageContainer.h
+++ b/gfx/layers/ImageContainer.h
@@ -283,18 +283,18 @@ class ImageContainer final : public Supp
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ImageContainer)
 public:
   MOZ_DECLARE_WEAKREFERENCE_TYPENAME(ImageContainer)
 
   enum Mode { SYNCHRONOUS = 0x0, ASYNCHRONOUS = 0x01, ASYNCHRONOUS_OVERLAY = 0x02 };
 
   explicit ImageContainer(ImageContainer::Mode flag = SYNCHRONOUS);
 
-  typedef int32_t FrameID;
-  typedef int32_t ProducerID;
+  typedef uint32_t FrameID;
+  typedef uint32_t ProducerID;
 
 
   /**
    * Create an Image in one of the given formats.
    * Picks the "best" format from the list and creates an Image of that
    * format.
    * Returns null if this backend does not support any of the formats.
    * Can be called on any thread. This method takes mReentrantMonitor
--- a/gfx/layers/client/ImageClient.cpp
+++ b/gfx/layers/client/ImageClient.cpp
@@ -76,28 +76,33 @@ ImageClient::RemoveTexture(TextureClient
 {
   RemoveTextureWithWaiter(aTexture);
 }
 
 void
 ImageClient::RemoveTextureWithWaiter(TextureClient* aTexture,
                                      AsyncTransactionWaiter* aAsyncTransactionWaiter)
 {
-#ifdef MOZ_WIDGET_GONK
-  if (aAsyncTransactionWaiter ||
-      GetForwarder()->IsImageBridgeChild()) {
+  if ((aAsyncTransactionWaiter ||
+      GetForwarder()->IsImageBridgeChild())
+#ifndef MOZ_WIDGET_GONK
+      // If the texture client is taking part in recycling then we should make sure
+      // the host has finished with it before dropping the ref and triggering
+      // the recycle callback.
+      && aTexture->GetRecycleAllocator()
+#endif
+     ) {
     RefPtr<AsyncTransactionTracker> request =
       new RemoveTextureFromCompositableTracker(aAsyncTransactionWaiter);
     // Hold TextureClient until the transaction complete to postpone
     // the TextureClient recycle/delete.
     request->SetTextureClient(aTexture);
     GetForwarder()->RemoveTextureFromCompositableAsync(request, this, aTexture);
     return;
   }
-#endif
 
   GetForwarder()->RemoveTextureFromCompositable(this, aTexture);
 }
 
 ImageClientSingle::ImageClientSingle(CompositableForwarder* aFwd,
                                      TextureFlags aFlags,
                                      CompositableType aType)
   : ImageClient(aFwd, aFlags, aType)
--- a/gfx/layers/client/TextureClient.cpp
+++ b/gfx/layers/client/TextureClient.cpp
@@ -244,16 +244,34 @@ TextureClient::WaitForCompositorRecycle(
 void
 TextureClient::SetAddedToCompositableClient()
 {
   if (!mAddedToCompositableClient) {
     mAddedToCompositableClient = true;
   }
 }
 
+/* static */ void
+TextureClient::TextureClientRecycleCallback(TextureClient* aClient, void* aClosure)
+{
+  MOZ_ASSERT(aClient->GetRecycleAllocator());
+  aClient->GetRecycleAllocator()->RecycleTextureClient(aClient);
+}
+
+void
+TextureClient::SetRecycleAllocator(TextureClientRecycleAllocator* aAllocator)
+{
+  mRecycleAllocator = aAllocator;
+  if (aAllocator) {
+    SetRecycleCallback(TextureClientRecycleCallback, nullptr);
+  } else {
+    ClearRecycleCallback();
+  }
+}
+
 bool
 TextureClient::InitIPDLActor(CompositableForwarder* aForwarder)
 {
   MOZ_ASSERT(aForwarder && aForwarder->GetMessageLoop() == mAllocator->GetMessageLoop());
   if (mActor && mActor->GetForwarder() == aForwarder) {
     return true;
   }
   MOZ_ASSERT(!mActor, "Cannot use a texture on several IPC channels.");
--- a/gfx/layers/client/TextureClient.h
+++ b/gfx/layers/client/TextureClient.h
@@ -45,16 +45,17 @@ class CompositableForwarder;
 class ISurfaceAllocator;
 class CompositableClient;
 struct PlanarYCbCrData;
 class Image;
 class PTextureChild;
 class TextureChild;
 class BufferTextureClient;
 class TextureClient;
+class TextureClientRecycleAllocator;
 #ifdef GFX_DEBUG_TRACK_CLIENTS_IN_POOL
 class TextureClientPool;
 #endif
 class KeepAlive;
 
 /**
  * TextureClient is the abstraction that allows us to share data between the
  * content and the compositor side.
@@ -486,17 +487,27 @@ public:
    }
 
    virtual void SyncWithObject(SyncObject* aSyncObject) { }
 
    void MarkShared() {
      mShared = true;
    }
 
+  ISurfaceAllocator* GetAllocator()
+  {
+    return mAllocator;
+  }
+
+   TextureClientRecycleAllocator* GetRecycleAllocator() { return mRecycleAllocator; }
+   void SetRecycleAllocator(TextureClientRecycleAllocator* aAllocator);
+
 private:
+  static void TextureClientRecycleCallback(TextureClient* aClient, void* aClosure);
+
   /**
    * Called once, just before the destructor.
    *
    * Here goes the shut-down code that uses virtual methods.
    * Must only be called by Release().
    */
   B2G_ACL_EXPORT void Finalize();
 
@@ -514,23 +525,19 @@ protected:
    * Some texture implementations rely on the fact that the descriptor will be
    * deserialized.
    * Calling ToSurfaceDescriptor again after it has already returned true,
    * or never constructing a TextureHost with aDescriptor may result in a memory
    * leak (see TextureClientD3D9 for example).
    */
   virtual bool ToSurfaceDescriptor(SurfaceDescriptor& aDescriptor) = 0;
 
-  ISurfaceAllocator* GetAllocator()
-  {
-    return mAllocator;
-  }
-
   RefPtr<TextureChild> mActor;
   RefPtr<ISurfaceAllocator> mAllocator;
+  RefPtr<TextureClientRecycleAllocator> mRecycleAllocator;
   TextureFlags mFlags;
   FenceHandle mReleaseFenceHandle;
   FenceHandle mAcquireFenceHandle;
   gl::GfxTextureWasteTracker mWasteTracker;
   bool mShared;
   bool mValid;
   bool mAddedToCompositableClient;
 
--- a/gfx/layers/client/TextureClientRecycleAllocator.cpp
+++ b/gfx/layers/client/TextureClientRecycleAllocator.cpp
@@ -1,272 +1,147 @@
 /* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*-
 * This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#include <map>
-#include <stack>
-
 #include "gfxPlatform.h"
-#include "mozilla/layers/GrallocTextureClient.h"
 #include "mozilla/layers/ISurfaceAllocator.h"
-#include "mozilla/Mutex.h"
-
 #include "TextureClientRecycleAllocator.h"
 
 namespace mozilla {
 namespace layers {
 
-class TextureClientRecycleAllocatorImp : public ISurfaceAllocator
+// Used to keep TextureClient's reference count stable as not to disrupt recycling.
+class TextureClientHolder
 {
-  ~TextureClientRecycleAllocatorImp();
-
+  ~TextureClientHolder() {}
 public:
-  explicit TextureClientRecycleAllocatorImp(ISurfaceAllocator* aAllocator);
-
-  void SetMaxPoolSize(uint32_t aMax)
-  {
-    if (aMax > 0) {
-      mMaxPooledSize = aMax;
-    }
-  }
-
-  // Creates and allocates a TextureClient.
-  already_AddRefed<TextureClient>
-  CreateOrRecycleForDrawing(gfx::SurfaceFormat aFormat,
-                            gfx::IntSize aSize,
-                            BackendSelector aSelector,
-                            TextureFlags aTextureFlags,
-                            TextureAllocationFlags flags);
-
-  void Destroy();
-
-  void RecycleCallbackImp(TextureClient* aClient);
+  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TextureClientHolder)
 
-  static void RecycleCallback(TextureClient* aClient, void* aClosure);
+  explicit TextureClientHolder(TextureClient* aClient)
+    : mTextureClient(aClient)
+  {}
 
-  // ISurfaceAllocator
-  virtual LayersBackend GetCompositorBackendType() const override
-  {
-    return mSurfaceAllocator->GetCompositorBackendType();
-  }
-
-  virtual bool AllocShmem(size_t aSize,
-                          mozilla::ipc::SharedMemory::SharedMemoryType aType,
-                          mozilla::ipc::Shmem* aShmem) override
+  TextureClient* GetTextureClient()
   {
-    return mSurfaceAllocator->AllocShmem(aSize, aType, aShmem);
-  }
-
-  virtual bool AllocUnsafeShmem(size_t aSize,
-                                mozilla::ipc::SharedMemory::SharedMemoryType aType,
-                                mozilla::ipc::Shmem* aShmem) override
-  {
-    return mSurfaceAllocator->AllocUnsafeShmem(aSize, aType, aShmem);
-  }
-
-  virtual void DeallocShmem(mozilla::ipc::Shmem& aShmem) override
-  {
-    mSurfaceAllocator->DeallocShmem(aShmem);
+    return mTextureClient;
   }
 
-  virtual bool IsSameProcess() const override
-  {
-    return mSurfaceAllocator->IsSameProcess();
-  }
-
-  virtual MessageLoop * GetMessageLoop() const override
-  {
-    return mSurfaceAllocator->GetMessageLoop();
-  }
-
+  void ClearTextureClient() { mTextureClient = nullptr; }
 protected:
-  // ISurfaceAllocator
-  virtual bool IsOnCompositorSide() const override
-  {
-    return false;
-  }
-
-private:
-  static const uint32_t kMaxPooledSized = 2;
-
-  // Used to keep TextureClient's reference count stable as not to disrupt recycling.
-  class TextureClientHolder
-  {
-    ~TextureClientHolder() {}
-  public:
-    NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TextureClientHolder)
-
-    explicit TextureClientHolder(TextureClient* aClient)
-      : mTextureClient(aClient)
-    {}
-
-    TextureClient* GetTextureClient()
-    {
-      return mTextureClient;
-    }
-    void ClearTextureClient() { mTextureClient = nullptr; }
-  protected:
-    RefPtr<TextureClient> mTextureClient;
-  };
-
-  bool mDestroyed;
-  uint32_t mMaxPooledSize;
-  RefPtr<ISurfaceAllocator> mSurfaceAllocator;
-  std::map<TextureClient*, RefPtr<TextureClientHolder> > mInUseClients;
-
-  // On b2g gonk, std::queue might be a better choice.
-  // On ICS, fence wait happens implicitly before drawing.
-  // Since JB, fence wait happens explicitly when fetching a client from the pool.
-  // stack is good from Graphics cache usage point of view.
-  std::stack<RefPtr<TextureClientHolder> > mPooledClients;
-  Mutex mLock;
+  RefPtr<TextureClient> mTextureClient;
 };
 
-TextureClientRecycleAllocatorImp::TextureClientRecycleAllocatorImp(ISurfaceAllocator *aAllocator)
-  : mDestroyed(false)
+TextureClientRecycleAllocator::TextureClientRecycleAllocator(ISurfaceAllocator *aAllocator)
+  : mSurfaceAllocator(aAllocator)
   , mMaxPooledSize(kMaxPooledSized)
-  , mSurfaceAllocator(aAllocator)
   , mLock("TextureClientRecycleAllocatorImp.mLock")
 {
 }
 
-TextureClientRecycleAllocatorImp::~TextureClientRecycleAllocatorImp()
+TextureClientRecycleAllocator::~TextureClientRecycleAllocator()
 {
-  MOZ_ASSERT(mDestroyed);
-  MOZ_ASSERT(mPooledClients.empty());
+  MutexAutoLock lock(mLock);
+  while (!mPooledClients.empty()) {
+    mPooledClients.pop();
+  }
   MOZ_ASSERT(mInUseClients.empty());
 }
 
+void
+TextureClientRecycleAllocator::SetMaxPoolSize(uint32_t aMax)
+{
+  mMaxPooledSize = aMax;
+}
+
 already_AddRefed<TextureClient>
-TextureClientRecycleAllocatorImp::CreateOrRecycleForDrawing(
-                                             gfx::SurfaceFormat aFormat,
-                                             gfx::IntSize aSize,
-                                             BackendSelector aSelector,
-                                             TextureFlags aTextureFlags,
-                                             TextureAllocationFlags aAllocFlags)
+TextureClientRecycleAllocator::CreateOrRecycle(gfx::SurfaceFormat aFormat,
+                                               gfx::IntSize aSize,
+                                               BackendSelector aSelector,
+                                               TextureFlags aTextureFlags,
+                                               TextureAllocationFlags aAllocFlags)
 {
   // TextureAllocationFlags is actually used only by ContentClient.
   // This class does not handle ConteClient's TextureClient allocation.
   MOZ_ASSERT(aAllocFlags == TextureAllocationFlags::ALLOC_DEFAULT ||
              aAllocFlags == TextureAllocationFlags::ALLOC_DISALLOW_BUFFERTEXTURECLIENT);
   MOZ_ASSERT(!(aTextureFlags & TextureFlags::RECYCLE));
   aTextureFlags = aTextureFlags | TextureFlags::RECYCLE; // Set recycle flag
 
   RefPtr<TextureClientHolder> textureHolder;
 
   {
     MutexAutoLock lock(mLock);
-    if (mDestroyed) {
-      return nullptr;
-    } else if (!mPooledClients.empty()) {
+    if (!mPooledClients.empty()) {
       textureHolder = mPooledClients.top();
       mPooledClients.pop();
       // If a pooled TextureClient is not compatible, release it.
       if (textureHolder->GetTextureClient()->GetFormat() != aFormat ||
-          textureHolder->GetTextureClient()->GetSize() != aSize)
-      {
+          textureHolder->GetTextureClient()->GetSize() != aSize) {
         TextureClientReleaseTask* task = new TextureClientReleaseTask(textureHolder->GetTextureClient());
         textureHolder->ClearTextureClient();
         textureHolder = nullptr;
         // Release TextureClient.
         mSurfaceAllocator->GetMessageLoop()->PostTask(FROM_HERE, task);
       } else {
         textureHolder->GetTextureClient()->RecycleTexture(aTextureFlags);
       }
     }
   }
 
   if (!textureHolder) {
     // Allocate new TextureClient
-    RefPtr<TextureClient> texture;
-    texture = TextureClient::CreateForDrawing(this, aFormat, aSize, aSelector,
-                                              aTextureFlags, aAllocFlags);
+    RefPtr<TextureClient> texture = Allocate(aFormat, aSize, aSelector, aTextureFlags, aAllocFlags);
     if (!texture) {
       return nullptr;
     }
     textureHolder = new TextureClientHolder(texture);
   }
 
   {
     MutexAutoLock lock(mLock);
     MOZ_ASSERT(mInUseClients.find(textureHolder->GetTextureClient()) == mInUseClients.end());
     // Register TextureClient
     mInUseClients[textureHolder->GetTextureClient()] = textureHolder;
   }
-  textureHolder->GetTextureClient()->SetRecycleCallback(TextureClientRecycleAllocatorImp::RecycleCallback, this);
   RefPtr<TextureClient> client(textureHolder->GetTextureClient());
+
+  // Make sure the texture holds a reference to us, and ask it to call RecycleTextureClient when its
+  // ref count drops to 1.
+  client->SetRecycleAllocator(this);
   return client.forget();
 }
 
+already_AddRefed<TextureClient>
+TextureClientRecycleAllocator::Allocate(gfx::SurfaceFormat aFormat,
+                                        gfx::IntSize aSize,
+                                        BackendSelector aSelector,
+                                        TextureFlags aTextureFlags,
+                                        TextureAllocationFlags aAllocFlags)
+{
+  return TextureClient::CreateForDrawing(mSurfaceAllocator, aFormat, aSize, aSelector,
+                                         aTextureFlags, aAllocFlags);
+}
+
 void
-TextureClientRecycleAllocatorImp::Destroy()
+TextureClientRecycleAllocator::RecycleTextureClient(TextureClient* aClient)
 {
-  MutexAutoLock lock(mLock);
-  if (mDestroyed) {
-    return;
-  }
-  mDestroyed = true;
-  while (!mPooledClients.empty()) {
-    mPooledClients.pop();
-  }
-}
+  // Clearing the recycle allocator drops a reference, so make sure we stay alive
+  // for the duration of this function.
+  RefPtr<TextureClientRecycleAllocator> kungFuDeathGrip(this);
+  aClient->SetRecycleAllocator(nullptr);
 
-void
-TextureClientRecycleAllocatorImp::RecycleCallbackImp(TextureClient* aClient)
-{
   RefPtr<TextureClientHolder> textureHolder;
-  aClient->ClearRecycleCallback();
   {
     MutexAutoLock lock(mLock);
     if (mInUseClients.find(aClient) != mInUseClients.end()) {
       textureHolder = mInUseClients[aClient]; // Keep reference count of TextureClientHolder within lock.
-      if (!mDestroyed && mPooledClients.size() < mMaxPooledSize) {
+      if (mPooledClients.size() < mMaxPooledSize) {
         mPooledClients.push(textureHolder);
       }
       mInUseClients.erase(aClient);
     }
   }
 }
 
-/* static */ void
-TextureClientRecycleAllocatorImp::RecycleCallback(TextureClient* aClient, void* aClosure)
-{
-  MOZ_ASSERT(aClient && !aClient->IsDead());
-  TextureClientRecycleAllocatorImp* recycleAllocator = static_cast<TextureClientRecycleAllocatorImp*>(aClosure);
-  recycleAllocator->RecycleCallbackImp(aClient);
-}
-
-TextureClientRecycleAllocator::TextureClientRecycleAllocator(ISurfaceAllocator *aAllocator)
-{
-  mAllocator = new TextureClientRecycleAllocatorImp(aAllocator);
-}
-
-TextureClientRecycleAllocator::~TextureClientRecycleAllocator()
-{
-  mAllocator->Destroy();
-  mAllocator = nullptr;
-}
-
-void
-TextureClientRecycleAllocator::SetMaxPoolSize(uint32_t aMax)
-{
-  mAllocator->SetMaxPoolSize(aMax);
-}
-
-already_AddRefed<TextureClient>
-TextureClientRecycleAllocator::CreateOrRecycleForDrawing(
-                                            gfx::SurfaceFormat aFormat,
-                                            gfx::IntSize aSize,
-                                            BackendSelector aSelector,
-                                            TextureFlags aTextureFlags,
-                                            TextureAllocationFlags aAllocFlags)
-{
-  return mAllocator->CreateOrRecycleForDrawing(aFormat,
-                                               aSize,
-                                               aSelector,
-                                               aTextureFlags,
-                                               aAllocFlags);
-}
-
 } // namespace layers
 } // namespace mozilla
--- a/gfx/layers/client/TextureClientRecycleAllocator.h
+++ b/gfx/layers/client/TextureClientRecycleAllocator.h
@@ -1,52 +1,82 @@
 /* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*-
 * This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_GFX_TEXTURECLIENT_RECYCLE_ALLOCATOR_H
 #define MOZILLA_GFX_TEXTURECLIENT_RECYCLE_ALLOCATOR_H
 
+#include <map>
+#include <stack>
 #include "mozilla/gfx/Types.h"
 #include "mozilla/RefPtr.h"
 #include "TextureClient.h"
+#include "mozilla/Mutex.h"
 
 namespace mozilla {
 namespace layers {
 
 class ISurfaceAllocator;
-class TextureClientRecycleAllocatorImp;
+class TextureClientHolder;
 
 
 /**
  * TextureClientRecycleAllocator provides TextureClients allocation and
  * recycling capabilities. It expects allocations of same sizes and
  * attributres. If a recycled TextureClient is different from
  * requested one, the recycled one is dropped and new TextureClient is allocated.
+ *
+ * By default this uses TextureClient::CreateForDrawing to allocate new texture
+ * clients.
  */
 class TextureClientRecycleAllocator
 {
-  ~TextureClientRecycleAllocator();
+protected:
+  virtual ~TextureClientRecycleAllocator();
 
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TextureClientRecycleAllocator)
 
   explicit TextureClientRecycleAllocator(ISurfaceAllocator* aAllocator);
 
   void SetMaxPoolSize(uint32_t aMax);
 
   // Creates and allocates a TextureClient.
   already_AddRefed<TextureClient>
-  CreateOrRecycleForDrawing(gfx::SurfaceFormat aFormat,
-                            gfx::IntSize aSize,
-                            BackendSelector aSelector,
-                            TextureFlags aTextureFlags,
-                            TextureAllocationFlags flags = ALLOC_DEFAULT);
+  CreateOrRecycle(gfx::SurfaceFormat aFormat,
+                  gfx::IntSize aSize,
+                  BackendSelector aSelector,
+                  TextureFlags aTextureFlags,
+                  TextureAllocationFlags flags = ALLOC_DEFAULT);
+
+protected:
+  virtual already_AddRefed<TextureClient>
+  Allocate(gfx::SurfaceFormat aFormat,
+           gfx::IntSize aSize,
+           BackendSelector aSelector,
+           TextureFlags aTextureFlags,
+           TextureAllocationFlags aAllocFlags);
+
+  RefPtr<ISurfaceAllocator> mSurfaceAllocator;
 
 private:
-  RefPtr<TextureClientRecycleAllocatorImp> mAllocator;
+  friend class TextureClient;
+  void RecycleTextureClient(TextureClient* aClient);
+
+  static const uint32_t kMaxPooledSized = 2;
+  uint32_t mMaxPooledSize;
+
+  std::map<TextureClient*, RefPtr<TextureClientHolder> > mInUseClients;
+
+  // On b2g gonk, std::queue might be a better choice.
+  // On ICS, fence wait happens implicitly before drawing.
+  // Since JB, fence wait happens explicitly when fetching a client from the pool.
+  // stack is good from Graphics cache usage point of view.
+  std::stack<RefPtr<TextureClientHolder> > mPooledClients;
+  Mutex mLock;
 };
 
 } // namespace layers
 } // namespace mozilla
 
 #endif /* MOZILLA_GFX_TEXTURECLIENT_RECYCLE_ALLOCATOR_H */
--- a/gfx/layers/d3d9/TextureD3D9.cpp
+++ b/gfx/layers/d3d9/TextureD3D9.cpp
@@ -670,32 +670,55 @@ SharedTextureClientD3D9::~SharedTextureC
   MOZ_COUNT_DTOR(SharedTextureClientD3D9);
 }
 
 // static
 already_AddRefed<SharedTextureClientD3D9>
 SharedTextureClientD3D9::Create(ISurfaceAllocator* aAllocator,
                                 gfx::SurfaceFormat aFormat,
                                 TextureFlags aFlags,
-                                IDirect3DTexture9* aTexture,
-                                HANDLE aSharedHandle,
-                                D3DSURFACE_DESC aDesc)
+                                IDirect3DDevice9* aDevice,
+                                const gfx::IntSize& aSize)
 {
-  RefPtr<SharedTextureClientD3D9> texture =
+  MOZ_ASSERT(aFormat == gfx::SurfaceFormat::B8G8R8X8);
+
+  RefPtr<IDirect3DTexture9> texture;
+  HANDLE shareHandle = nullptr;
+  HRESULT hr = aDevice->CreateTexture(aSize.width,
+                                      aSize.height,
+                                      1,
+                                      D3DUSAGE_RENDERTARGET,
+                                      D3DFMT_X8R8G8B8,
+                                      D3DPOOL_DEFAULT,
+                                      byRef(texture),
+                                      &shareHandle);
+  NS_ENSURE_TRUE(SUCCEEDED(hr) && shareHandle, nullptr);
+
+  RefPtr<SharedTextureClientD3D9> client =
     new SharedTextureClientD3D9(aAllocator,
                                 aFormat,
                                 aFlags);
-  MOZ_ASSERT(!texture->mTexture);
-  texture->mTexture = aTexture;
-  texture->mHandle = aSharedHandle;
-  texture->mDesc = aDesc;
-  if (texture->mTexture) {
-    gfxWindowsPlatform::sD3D9SharedTextureUsed += texture->mDesc.Width * texture->mDesc.Height * 4;
-  }
-  return texture.forget();
+
+  client->mDevice = aDevice;
+  client->mTexture = texture;
+  client->mHandle = shareHandle;
+  texture->GetLevelDesc(0, &client->mDesc);
+
+  gfxWindowsPlatform::sD3D9SharedTextureUsed += aSize.width * aSize.height * 4;
+  return client.forget();
+}
+
+already_AddRefed<IDirect3DSurface9>
+SharedTextureClientD3D9::GetD3D9Surface() const
+{
+  RefPtr<IDirect3DSurface9> textureSurface;
+  HRESULT hr = mTexture->GetSurfaceLevel(0, byRef(textureSurface));
+  NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
+
+  return textureSurface.forget();
 }
 
 bool
 SharedTextureClientD3D9::Lock(OpenMode)
 {
   MOZ_ASSERT(!mIsLocked);
   if (!IsValid()) {
     return false;
--- a/gfx/layers/d3d9/TextureD3D9.h
+++ b/gfx/layers/d3d9/TextureD3D9.h
@@ -239,24 +239,25 @@ public:
 
   virtual ~SharedTextureClientD3D9();
 
   // Creates a TextureClient and init width.
   static already_AddRefed<SharedTextureClientD3D9>
   Create(ISurfaceAllocator* aAllocator,
          gfx::SurfaceFormat aFormat,
          TextureFlags aFlags,
-         IDirect3DTexture9* aTexture,
-         HANDLE aSharedHandle,
-         D3DSURFACE_DESC aDesc);
+         IDirect3DDevice9* aDevice,
+         const gfx::IntSize& aSize);
 
   // TextureClient
 
   virtual bool IsAllocated() const override { return !!mTexture; }
 
+  virtual gfx::SurfaceFormat GetFormat() const override { return mFormat; }
+
   virtual bool Lock(OpenMode aOpenMode) override;
 
   virtual void Unlock() override;
 
   virtual bool IsLocked() const override { return mIsLocked; }
 
   virtual bool ToSurfaceDescriptor(SurfaceDescriptor& aOutDescriptor) override;
 
@@ -268,17 +269,28 @@ public:
   virtual bool HasInternalBuffer() const override { return true; }
 
   // This TextureClient should not be used in a context where we use CreateSimilar
   // (ex. component alpha) because the underlying texture data is always created by
   // an external producer.
   virtual already_AddRefed<TextureClient>
   CreateSimilar(TextureFlags, TextureAllocationFlags) const override { return nullptr; }
 
+  IDirect3DDevice9* GetD3D9Device() { return mDevice; }
+  IDirect3DTexture9* GetD3D9Texture() { return mTexture; }
+  HANDLE GetShareHandle() const { return mHandle; }
+  already_AddRefed<IDirect3DSurface9> GetD3D9Surface() const;
+
+  const D3DSURFACE_DESC& GetDesc() const
+  {
+    return mDesc;
+  }
+
 private:
+  RefPtr<IDirect3DDevice9> mDevice;
   RefPtr<IDirect3DTexture9> mTexture;
   gfx::SurfaceFormat mFormat;
   HANDLE mHandle;
   D3DSURFACE_DESC mDesc;
   bool mIsLocked;
 };
 
 class TextureHostD3D9 : public TextureHost
--- a/gfx/layers/ipc/LayersMessages.ipdlh
+++ b/gfx/layers/ipc/LayersMessages.ipdlh
@@ -387,18 +387,18 @@ union MaybeFence {
   null_t;
 };
 
 struct TimedTexture {
   PTexture texture;
   MaybeFence fence;
   TimeStamp timeStamp;
   IntRect picture;
-  int32_t frameID;
-  int32_t producerID;
+  uint32_t frameID;
+  uint32_t producerID;
 };
 
 /**
  * Tells the compositor-side which textures to use (for example, as front buffer
  * if there are several textures for double buffering).
  * This provides a list of textures with timestamps, ordered by timestamp.
  * The newest texture whose timestamp is <= the current time is rendered
  * (where null is considered less than every other timestamp). If there is no
@@ -482,18 +482,18 @@ struct OpContentBufferSwap {
 /**
  * An ImageCompositeNotification is sent the first time a particular
  * image is composited by an ImageHost.
  */
 struct ImageCompositeNotification {
   PImageContainer imageContainer;
   TimeStamp imageTimeStamp;
   TimeStamp firstCompositeTimeStamp;
-  int32_t frameID;
-  int32_t producerID;
+  uint32_t frameID;
+  uint32_t producerID;
 };
 
 // Unit of a "changeset reply".  This is a weird abstraction, probably
 // only to be used for buffer swapping.
 union EditReply {
   OpContentBufferSwap;
 };
 
--- a/gfx/src/DriverCrashGuard.cpp
+++ b/gfx/src/DriverCrashGuard.cpp
@@ -20,16 +20,17 @@
 
 namespace mozilla {
 namespace gfx {
 
 static const size_t NUM_CRASH_GUARD_TYPES = size_t(CrashGuardType::NUM_TYPES);
 static const char* sCrashGuardNames[NUM_CRASH_GUARD_TYPES] = {
   "d3d11layers",
   "d3d9video",
+  "glcontext",
 };
 
 DriverCrashGuard::DriverCrashGuard(CrashGuardType aType, dom::ContentParent* aContentParent)
  : mType(aType)
  , mMode(aContentParent ? Mode::Proxy : Mode::Normal)
  , mInitialized(false)
  , mGuardActivated(false)
  , mCrashDetected(false)
@@ -49,16 +50,26 @@ DriverCrashGuard::InitializeIfNeeded()
 
   mInitialized = true;
   Initialize();
 }
 
 void
 DriverCrashGuard::Initialize()
 {
+  // Using DriverCrashGuard off the main thread currently does not work. Under
+  // e10s it could conceivably work by dispatching the IPC calls via the main
+  // thread. In the parent process this would be harder. For now, we simply
+  // exit early instead.
+  if (!NS_IsMainThread()) {
+    return;
+  }
+
+  mGfxInfo = services::GetGfxInfo();
+
   if (XRE_IsContentProcess()) {
     // Ask the parent whether or not activating the guard is okay. The parent
     // won't bother if it detected a crash.
     dom::ContentChild* cc = dom::ContentChild::GetSingleton();
     cc->SendBeginDriverCrashGuard(uint32_t(mType), &mCrashDetected);
     if (mCrashDetected) {
       LogFeatureDisabled();
       return;
@@ -246,17 +257,16 @@ DriverCrashGuard::CheckOrRefreshEnvironm
          sBaseInfoChanged ||
          GetStatus() == DriverInitStatus::Unknown;
 }
 
 bool
 DriverCrashGuard::UpdateBaseEnvironment()
 {
   bool changed = false;
-  mGfxInfo = services::GetGfxInfo();
   if (mGfxInfo) {
     nsString value;
 
     // Driver properties.
     mGfxInfo->GetAdapterDriverVersion(value);
     changed |= CheckAndUpdatePref("driverVersion", value);
     mGfxInfo->GetAdapterDeviceID(value);
     changed |= CheckAndUpdatePref("deviceID", value);
@@ -264,18 +274,21 @@ DriverCrashGuard::UpdateBaseEnvironment(
 
   // Firefox properties.
   changed |= CheckAndUpdatePref("appVersion", NS_LITERAL_STRING(MOZ_APP_VERSION));
 
   return changed;
 }
 
 bool
-DriverCrashGuard::FeatureEnabled(int aFeature)
+DriverCrashGuard::FeatureEnabled(int aFeature, bool aDefault)
 {
+  if (!mGfxInfo) {
+    return aDefault;
+  }
   int32_t status;
   if (!NS_SUCCEEDED(mGfxInfo->GetFeatureStatus(aFeature, &status))) {
     return false;
   }
   return status == nsIGfxInfo::FEATURE_STATUS_OK;
 }
 
 bool
@@ -368,30 +381,28 @@ D3D11LayersCrashGuard::UpdateEnvironment
   static bool changed = false;
 
   if (checked) {
     return changed;
   }
 
   checked = true;
 
-  if (mGfxInfo) {
-    // Feature status.
+  // Feature status.
 #if defined(XP_WIN)
-    bool d2dEnabled = gfxPrefs::Direct2DForceEnabled() ||
-                      (!gfxPrefs::Direct2DDisabled() && FeatureEnabled(nsIGfxInfo::FEATURE_DIRECT2D));
-    changed |= CheckAndUpdateBoolPref("feature-d2d", d2dEnabled);
+  bool d2dEnabled = gfxPrefs::Direct2DForceEnabled() ||
+                    (!gfxPrefs::Direct2DDisabled() && FeatureEnabled(nsIGfxInfo::FEATURE_DIRECT2D));
+  changed |= CheckAndUpdateBoolPref("feature-d2d", d2dEnabled);
 
-    bool d3d11Enabled = !gfxPrefs::LayersPreferD3D9();
-    if (!FeatureEnabled(nsIGfxInfo::FEATURE_DIRECT3D_11_LAYERS)) {
-      d3d11Enabled = false;
-    }
-    changed |= CheckAndUpdateBoolPref("feature-d3d11", d3d11Enabled);
+  bool d3d11Enabled = !gfxPrefs::LayersPreferD3D9();
+  if (!FeatureEnabled(nsIGfxInfo::FEATURE_DIRECT3D_11_LAYERS)) {
+    d3d11Enabled = false;
+  }
+  changed |= CheckAndUpdateBoolPref("feature-d3d11", d3d11Enabled);
 #endif
-  }
 
   if (!changed) {
     return false;
   }
 
   RecordTelemetry(TelemetryState::EnvironmentChanged);
   return true;
 }
@@ -448,10 +459,55 @@ D3D9VideoCrashGuard::LogCrashRecovery()
 }
 
 void
 D3D9VideoCrashGuard::LogFeatureDisabled()
 {
   gfxCriticalError(CriticalLog::DefaultOptions(false)) << "DXVA2D3D9 video decoding is disabled due to a previous crash.";
 }
 
+GLContextCrashGuard::GLContextCrashGuard(dom::ContentParent* aContentParent)
+ : DriverCrashGuard(CrashGuardType::GLContext, aContentParent)
+{
+}
+
+bool
+GLContextCrashGuard::UpdateEnvironment()
+{
+  static bool checked = false;
+  static bool changed = false;
+
+  if (checked) {
+    return changed;
+  }
+
+  checked = true;
+
+#if defined(XP_WIN)
+  changed |= CheckAndUpdateBoolPref("gfx.driver-init.webgl-angle-force-d3d11",
+                                    gfxPrefs::WebGLANGLEForceD3D11());
+  changed |= CheckAndUpdateBoolPref("gfx.driver-init.webgl-angle-try-d3d11",
+                                    gfxPrefs::WebGLANGLETryD3D11());
+  changed |= CheckAndUpdateBoolPref("gfx.driver-init.webgl-angle-force-warp",
+                                    gfxPrefs::WebGLANGLEForceWARP());
+  changed |= CheckAndUpdateBoolPref("gfx.driver-init.webgl-angle",
+                                    FeatureEnabled(nsIGfxInfo::FEATURE_WEBGL_ANGLE, false));
+  changed |= CheckAndUpdateBoolPref("gfx.driver-init.direct3d11-angle",
+                                    FeatureEnabled(nsIGfxInfo::FEATURE_DIRECT3D_11_ANGLE, false));
+#endif
+
+  return changed;
+}
+
+void
+GLContextCrashGuard::LogCrashRecovery()
+{
+  gfxCriticalError(CriticalLog::DefaultOptions(false)) << "GLContext just crashed and is now disabled.";
+}
+
+void
+GLContextCrashGuard::LogFeatureDisabled()
+{
+  gfxCriticalError(CriticalLog::DefaultOptions(false)) << "GLContext is disabled due to a previous crash.";
+}
+
 } // namespace gfx
 } // namespace mozilla
--- a/gfx/src/DriverCrashGuard.h
+++ b/gfx/src/DriverCrashGuard.h
@@ -34,16 +34,17 @@ enum class DriverInitStatus
   // We crashed during driver initialization, and have restarted.
   Crashed
 };
 
 enum class CrashGuardType : uint32_t
 {
   D3D11Layers,
   D3D9Video,
+  GLContext,
   NUM_TYPES
 };
 
 // DriverCrashGuard is used to detect crashes at graphics driver callsites.
 // 
 // If the graphics environment is unrecognized or has changed since the last
 // session, the crash guard will activate and will detect any crashes within
 // the scope of the guard object.
@@ -79,17 +80,17 @@ public:
 
 protected:
   virtual void Initialize();
   virtual bool UpdateEnvironment() = 0;
   virtual void LogCrashRecovery() = 0;
   virtual void LogFeatureDisabled() = 0;
 
   // Helper functions.
-  bool FeatureEnabled(int aFeature);
+  bool FeatureEnabled(int aFeature, bool aDefault=true);
   bool CheckAndUpdatePref(const char* aPrefName, const nsAString& aCurrentValue);
   bool CheckAndUpdateBoolPref(const char* aPrefName, bool aCurrentValue);
   std::string GetFullPrefName(const char* aPref);
 
 private:
   // Either process.
   void InitializeIfNeeded();
   bool CheckOrRefreshEnvironment();
@@ -137,13 +138,24 @@ class D3D9VideoCrashGuard final : public
   explicit D3D9VideoCrashGuard(dom::ContentParent* aContentParent = nullptr);
 
  protected:
   bool UpdateEnvironment() override;
   void LogCrashRecovery() override;
   void LogFeatureDisabled() override;
 };
 
+class GLContextCrashGuard final : public DriverCrashGuard
+{
+ public:
+  explicit GLContextCrashGuard(dom::ContentParent* aContentParent = nullptr);
+
+ protected:
+  bool UpdateEnvironment() override;
+  void LogCrashRecovery() override;
+  void LogFeatureDisabled() override;
+};
+
 } // namespace gfx
 } // namespace mozilla
 
 #endif // gfx_src_DriverCrashGuard_h__
 
--- a/gfx/thebes/gfxPlatformGtk.cpp
+++ b/gfx/thebes/gfxPlatformGtk.cpp
@@ -60,16 +60,18 @@ static cairo_user_data_key_t cairo_gdk_d
 #ifdef MOZ_X11
     bool gfxPlatformGtk::sUseXRender = true;
 #endif
 
 bool gfxPlatformGtk::sUseFcFontList = false;
 
 gfxPlatformGtk::gfxPlatformGtk()
 {
+    gtk_init(nullptr, nullptr);
+
     sUseFcFontList = mozilla::Preferences::GetBool("gfx.font_rendering.fontconfig.fontlist.enabled");
     if (!sUseFcFontList && !sFontconfigUtils) {
         sFontconfigUtils = gfxFontconfigUtils::GetFontconfigUtils();
     }
 
 #ifdef MOZ_X11
     sUseXRender = (GDK_IS_X11_DISPLAY(gdk_display_get_default())) ? 
                     mozilla::Preferences::GetBool("gfx.xrender.enabled") : false;
--- a/gfx/thebes/gfxWindowsPlatform.cpp
+++ b/gfx/thebes/gfxWindowsPlatform.cpp
@@ -328,36 +328,16 @@ public:
     return MOZ_COLLECT_REPORT("d3d9-shared-textures", KIND_OTHER, UNITS_BYTES,
                               gfxWindowsPlatform::sD3D9MemoryUsed,
                               "Memory used for D3D9 shared textures");
   }
 };
 
 NS_IMPL_ISUPPORTS(D3D9TextureReporter, nsIMemoryReporter)
 
-Atomic<size_t> gfxWindowsPlatform::sD3D9SurfaceImageUsed;
-
-class D3D9SurfaceImageReporter final : public nsIMemoryReporter
-{
-  ~D3D9SurfaceImageReporter() {}
-
-public:
-  NS_DECL_ISUPPORTS
-
-  NS_IMETHOD CollectReports(nsIHandleReportCallback *aHandleReport,
-                            nsISupports* aData, bool aAnonymize) override
-  {
-    return MOZ_COLLECT_REPORT("d3d9-surface-image", KIND_OTHER, UNITS_BYTES,
-                              gfxWindowsPlatform::sD3D9SurfaceImageUsed,
-                              "Memory used for D3D9 surface images");
-  }
-};
-
-NS_IMPL_ISUPPORTS(D3D9SurfaceImageReporter, nsIMemoryReporter)
-
 Atomic<size_t> gfxWindowsPlatform::sD3D9SharedTextureUsed;
 
 class D3D9SharedTextureReporter final : public nsIMemoryReporter
 {
   ~D3D9SharedTextureReporter() {}
 
 public:
   NS_DECL_ISUPPORTS
@@ -415,17 +395,16 @@ gfxWindowsPlatform::gfxWindowsPlatform()
 
     UpdateDeviceInitData();
     InitializeDevices();
     UpdateRenderMode();
 
     RegisterStrongMemoryReporter(new GPUAdapterReporter());
     RegisterStrongMemoryReporter(new D3D11TextureReporter());
     RegisterStrongMemoryReporter(new D3D9TextureReporter());
-    RegisterStrongMemoryReporter(new D3D9SurfaceImageReporter());
     RegisterStrongMemoryReporter(new D3D9SharedTextureReporter());
 }
 
 gfxWindowsPlatform::~gfxWindowsPlatform()
 {
     mDeviceManager = nullptr;
     mD3D10Device = nullptr;
     mD3D11Device = nullptr;
--- a/gfx/thebes/gfxWindowsPlatform.h
+++ b/gfx/thebes/gfxWindowsPlatform.h
@@ -279,17 +279,16 @@ public:
     mozilla::gfx::FeatureStatus GetD2D1Status() const;
     unsigned GetD3D11Version();
 
     void TestDeviceReset(DeviceResetReason aReason) override;
 
     virtual already_AddRefed<mozilla::gfx::VsyncSource> CreateHardwareVsyncSource() override;
     static mozilla::Atomic<size_t> sD3D11MemoryUsed;
     static mozilla::Atomic<size_t> sD3D9MemoryUsed;
-    static mozilla::Atomic<size_t> sD3D9SurfaceImageUsed;
     static mozilla::Atomic<size_t> sD3D9SharedTextureUsed;
 
     void GetDeviceInitData(mozilla::gfx::DeviceInitData* aOut) override;
 
 protected:
     bool AccelerateLayersByDefault() override {
       return true;
     }
--- a/image/DecodePool.cpp
+++ b/image/DecodePool.cpp
@@ -42,45 +42,47 @@ class NotifyProgressWorker : public nsRu
 public:
   /**
    * Called by the DecodePool when it's done some significant portion of
    * decoding, so that progress can be recorded and notifications can be sent.
    */
   static void Dispatch(RasterImage* aImage,
                        Progress aProgress,
                        const nsIntRect& aInvalidRect,
-                       uint32_t aFlags)
+                       SurfaceFlags aSurfaceFlags)
   {
     MOZ_ASSERT(aImage);
 
     nsCOMPtr<nsIRunnable> worker =
-      new NotifyProgressWorker(aImage, aProgress, aInvalidRect, aFlags);
+      new NotifyProgressWorker(aImage, aProgress, aInvalidRect, aSurfaceFlags);
     NS_DispatchToMainThread(worker);
   }
 
   NS_IMETHOD Run() override
   {
     MOZ_ASSERT(NS_IsMainThread());
-    mImage->NotifyProgress(mProgress, mInvalidRect, mFlags);
+    mImage->NotifyProgress(mProgress, mInvalidRect, mSurfaceFlags);
     return NS_OK;
   }
 
 private:
-  NotifyProgressWorker(RasterImage* aImage, Progress aProgress,
-                       const nsIntRect& aInvalidRect, uint32_t aFlags)
+  NotifyProgressWorker(RasterImage* aImage,
+                       Progress aProgress,
+                       const nsIntRect& aInvalidRect,
+                       SurfaceFlags aSurfaceFlags)
     : mImage(aImage)
     , mProgress(aProgress)
     , mInvalidRect(aInvalidRect)
-    , mFlags(aFlags)
+    , mSurfaceFlags(aSurfaceFlags)
   { }
 
   nsRefPtr<RasterImage> mImage;
   const Progress mProgress;
   const nsIntRect mInvalidRect;
-  const uint32_t mFlags;
+  const SurfaceFlags mSurfaceFlags;
 };
 
 class NotifyDecodeCompleteWorker : public nsRunnable
 {
 public:
   /**
    * Called by the DecodePool when decoding is complete, so that final cleanup
    * can be performed.
@@ -465,36 +467,36 @@ DecodePool::Decode(Decoder* aDecoder)
 }
 
 void
 DecodePool::NotifyProgress(Decoder* aDecoder)
 {
   MOZ_ASSERT(aDecoder);
 
   if (!NS_IsMainThread() ||
-      (aDecoder->GetFlags() & imgIContainer::FLAG_ASYNC_NOTIFY)) {
+      (aDecoder->GetDecoderFlags() & DecoderFlags::ASYNC_NOTIFY)) {
     NotifyProgressWorker::Dispatch(aDecoder->GetImage(),
                                    aDecoder->TakeProgress(),
                                    aDecoder->TakeInvalidRect(),
-                                   aDecoder->GetDecodeFlags());
+                                   aDecoder->GetSurfaceFlags());
     return;
   }
 
   aDecoder->GetImage()->NotifyProgress(aDecoder->TakeProgress(),
                                        aDecoder->TakeInvalidRect(),
-                                       aDecoder->GetDecodeFlags());
+                                       aDecoder->GetSurfaceFlags());
 }
 
 void
 DecodePool::NotifyDecodeComplete(Decoder* aDecoder)
 {
   MOZ_ASSERT(aDecoder);
 
   if (!NS_IsMainThread() ||
-      (aDecoder->GetFlags() & imgIContainer::FLAG_ASYNC_NOTIFY)) {
+      (aDecoder->GetDecoderFlags() & DecoderFlags::ASYNC_NOTIFY)) {
     NotifyDecodeCompleteWorker::Dispatch(aDecoder);
     return;
   }
 
   aDecoder->GetImage()->FinalizeDecoder(aDecoder);
 }
 
 } // namespace image
--- a/image/Decoder.cpp
+++ b/image/Decoder.cpp
@@ -26,23 +26,21 @@ Decoder::Decoder(RasterImage* aImage)
   , mImageDataLength(0)
   , mColormap(nullptr)
   , mColormapSize(0)
   , mImage(aImage)
   , mProgress(NoProgress)
   , mFrameCount(0)
   , mFailCode(NS_OK)
   , mChunkCount(0)
-  , mFlags(0)
+  , mDecoderFlags(DefaultDecoderFlags())
+  , mSurfaceFlags(DefaultSurfaceFlags())
   , mBytesDecoded(0)
   , mInitialized(false)
   , mMetadataDecode(false)
-  , mSendPartialInvalidations(false)
-  , mImageIsTransient(false)
-  , mFirstFrameDecode(false)
   , mInFrame(false)
   , mDataDone(false)
   , mDecodeDone(false)
   , mDataError(false)
   , mDecodeAborted(false)
   , mShouldReportError(false)
 { }
 
@@ -230,32 +228,34 @@ Decoder::CompleteDecode()
   }
 
   if (mDecodeDone && !IsMetadataDecode()) {
     MOZ_ASSERT(HasError() || mCurrentFrame, "Should have an error or a frame");
 
     // If this image wasn't animated and isn't a transient image, mark its frame
     // as optimizable. We don't support optimizing animated images and
     // optimizing transient images isn't worth it.
-    if (!HasAnimation() && !mImageIsTransient && mCurrentFrame) {
+    if (!HasAnimation() &&
+        !(mDecoderFlags & DecoderFlags::IMAGE_IS_TRANSIENT) &&
+        mCurrentFrame) {
       mCurrentFrame->SetOptimizable();
     }
   }
 }
 
 nsresult
 Decoder::AllocateFrame(uint32_t aFrameNum,
                        const nsIntSize& aTargetSize,
                        const nsIntRect& aFrameRect,
                        gfx::SurfaceFormat aFormat,
                        uint8_t aPaletteDepth)
 {
   mCurrentFrame = AllocateFrameInternal(aFrameNum, aTargetSize, aFrameRect,
-                                        GetDecodeFlags(), aFormat,
-                                        aPaletteDepth, mCurrentFrame.get());
+                                        aFormat, aPaletteDepth,
+                                        mCurrentFrame.get());
 
   if (mCurrentFrame) {
     // Gather the raw pointers the decoders will use.
     mCurrentFrame->GetImageData(&mImageData, &mImageDataLength);
     mCurrentFrame->GetPaletteData(&mColormap, &mColormapSize);
 
     if (aFrameNum + 1 == mFrameCount) {
       // If we're past the first frame, PostIsAnimated() should've been called.
@@ -271,17 +271,16 @@ Decoder::AllocateFrame(uint32_t aFrameNu
 
   return mCurrentFrame ? NS_OK : NS_ERROR_FAILURE;
 }
 
 RawAccessFrameRef
 Decoder::AllocateFrameInternal(uint32_t aFrameNum,
                                const nsIntSize& aTargetSize,
                                const nsIntRect& aFrameRect,
-                               uint32_t aDecodeFlags,
                                SurfaceFormat aFormat,
                                uint8_t aPaletteDepth,
                                imgFrame* aPreviousFrame)
 {
   if (mDataError || NS_FAILED(mFailCode)) {
     return RawAccessFrameRef();
   }
 
@@ -299,35 +298,34 @@ Decoder::AllocateFrameInternal(uint32_t 
   const uint32_t bytesPerPixel = aPaletteDepth == 0 ? 4 : 1;
   if (ShouldUseSurfaceCache() &&
       !SurfaceCache::CanHold(aFrameRect.Size(), bytesPerPixel)) {
     NS_WARNING("Trying to add frame that's too large for the SurfaceCache");
     return RawAccessFrameRef();
   }
 
   nsRefPtr<imgFrame> frame = new imgFrame();
-  bool nonPremult =
-    aDecodeFlags & imgIContainer::FLAG_DECODE_NO_PREMULTIPLY_ALPHA;
+  bool nonPremult = bool(mSurfaceFlags & SurfaceFlags::NO_PREMULTIPLY_ALPHA);
   if (NS_FAILED(frame->InitForDecoder(aTargetSize, aFrameRect, aFormat,
                                       aPaletteDepth, nonPremult))) {
     NS_WARNING("imgFrame::Init should succeed");
     return RawAccessFrameRef();
   }
 
   RawAccessFrameRef ref = frame->RawAccessRef();
   if (!ref) {
     frame->Abort();
     return RawAccessFrameRef();
   }
 
   if (ShouldUseSurfaceCache()) {
     InsertOutcome outcome =
       SurfaceCache::Insert(frame, ImageKey(mImage.get()),
                            RasterSurfaceKey(aTargetSize,
-                                            aDecodeFlags,
+                                            mSurfaceFlags,
                                             aFrameNum),
                            Lifetime::Persistent);
     if (outcome == InsertOutcome::FAILURE) {
       // We couldn't insert the surface, almost certainly due to low memory. We
       // treat this as a permanent error to help the system recover; otherwise,
       // we might just end up attempting to decode this image again immediately.
       ref->Abort();
       return RawAccessFrameRef();
@@ -432,34 +430,34 @@ Decoder::PostFrameStop(Opacity aFrameOpa
   mInFrame = false;
 
   mCurrentFrame->Finish(aFrameOpacity, aDisposalMethod, aTimeout, aBlendMethod);
 
   mProgress |= FLAG_FRAME_COMPLETE;
 
   // If we're not sending partial invalidations, then we send an invalidation
   // here when the first frame is complete.
-  if (!mSendPartialInvalidations && !HasAnimation()) {
+  if (!ShouldSendPartialInvalidations() && !HasAnimation()) {
     mInvalidRect.UnionRect(mInvalidRect,
                            gfx::IntRect(gfx::IntPoint(0, 0), GetSize()));
   }
 }
 
 void
 Decoder::PostInvalidation(const nsIntRect& aRect,
                           const Maybe<nsIntRect>& aRectAtTargetSize
                             /* = Nothing() */)
 {
   // We should be mid-frame
   MOZ_ASSERT(mInFrame, "Can't invalidate when not mid-frame!");
   MOZ_ASSERT(mCurrentFrame, "Can't invalidate when not mid-frame!");
 
   // Record this invalidation, unless we're not sending partial invalidations
   // or we're past the first frame.
-  if (mSendPartialInvalidations && !HasAnimation()) {
+  if (ShouldSendPartialInvalidations() && !HasAnimation()) {
     mInvalidRect.UnionRect(mInvalidRect, aRect);
     mCurrentFrame->ImageUpdated(aRectAtTargetSize.valueOr(aRect));
   }
 }
 
 void
 Decoder::PostDecodeDone(int32_t aLoopCount /* = 0 */)
 {
--- a/image/Decoder.h
+++ b/image/Decoder.h
@@ -5,19 +5,21 @@
 
 #ifndef mozilla_image_Decoder_h
 #define mozilla_image_Decoder_h
 
 #include "FrameAnimator.h"
 #include "RasterImage.h"
 #include "mozilla/RefPtr.h"
 #include "DecodePool.h"
+#include "DecoderFlags.h"
 #include "ImageMetadata.h"
 #include "Orientation.h"
 #include "SourceBuffer.h"
+#include "SurfaceFlags.h"
 
 namespace mozilla {
 
 namespace Telemetry {
   enum ID : uint32_t;
 } // namespace Telemetry
 
 namespace image {
@@ -133,70 +135,46 @@ public:
    * Set the requested resolution for this decoder. Used to implement the
    * -moz-resolution media fragment.
    *
    *  XXX(seth): Support for -moz-resolution will be removed in bug 1118926.
    */
   virtual void SetResolution(const gfx::IntSize& aResolution) { }
 
   /**
-   * Set whether should send partial invalidations.
-   *
-   * If @aSend is true, we'll send partial invalidations when decoding the first
-   * frame of the image, so image notifications observers will be able to
-   * gradually draw in the image as it downloads.
-   *
-   * If @aSend is false (the default), we'll only send an invalidation when we
-   * complete the first frame.
-   *
-   * This must be called before Init() is called.
-   */
-  void SetSendPartialInvalidations(bool aSend)
-  {
-    MOZ_ASSERT(!mInitialized, "Shouldn't be initialized yet");
-    mSendPartialInvalidations = aSend;
-  }
-
-  /**
    * Set an iterator to the SourceBuffer which will feed data to this decoder.
    *
    * This should be called for almost all decoders; the exceptions are the
    * contained decoders of an nsICODecoder, which will be fed manually via Write
    * instead.
    *
    * This must be called before Init() is called.
    */
   void SetIterator(SourceBufferIterator&& aIterator)
   {
     MOZ_ASSERT(!mInitialized, "Shouldn't be initialized yet");
     mIterator.emplace(Move(aIterator));
   }
 
   /**
-   * Set whether this decoder is associated with a transient image. The decoder
-   * may choose to avoid certain optimizations that don't pay off for
-   * short-lived images in this case.
+   * Should this decoder send partial invalidations?
    */
-  void SetImageIsTransient(bool aIsTransient)
+  bool ShouldSendPartialInvalidations() const
   {
-    MOZ_ASSERT(!mInitialized, "Shouldn't be initialized yet");
-    mImageIsTransient = aIsTransient;
+    return !(mDecoderFlags & DecoderFlags::IS_REDECODE);
   }
 
   /**
-   * Set whether we should stop decoding after the first frame.
+   * Should we stop decoding after the first frame?
    */
-  void SetIsFirstFrameDecode()
+  bool IsFirstFrameDecode() const
   {
-    MOZ_ASSERT(!mInitialized, "Shouldn't be initialized yet");
-    mFirstFrameDecode = true;
+    return bool(mDecoderFlags & DecoderFlags::FIRST_FRAME_ONLY);
   }
 
-  bool IsFirstFrameDecode() const { return mFirstFrameDecode; }
-
   size_t BytesDecoded() const { return mBytesDecoded; }
 
   // The amount of time we've spent inside Write() so far for this decoder.
   TimeDuration DecodeTime() const { return mDecodeTime; }
 
   // The number of times Write() has been called so far for this decoder.
   uint32_t ChunkCount() const { return mChunkCount; }
 
@@ -250,19 +228,36 @@ public:
   bool WasAborted() const { return mDecodeAborted; }
 
   enum DecodeStyle {
       PROGRESSIVE, // produce intermediate frames representing the partial
                    // state of the image
       SEQUENTIAL   // decode to final image immediately
   };
 
-  void SetFlags(uint32_t aFlags) { mFlags = aFlags; }
-  uint32_t GetFlags() const { return mFlags; }
-  uint32_t GetDecodeFlags() const { return DecodeFlags(mFlags); }
+  /**
+   * Get or set the DecoderFlags that influence the behavior of this decoder.
+   */
+  void SetDecoderFlags(DecoderFlags aDecoderFlags)
+  {
+    MOZ_ASSERT(!mInitialized);
+    mDecoderFlags = aDecoderFlags;
+  }
+  DecoderFlags GetDecoderFlags() const { return mDecoderFlags; }
+
+  /**
+   * Get or set the SurfaceFlags that select the kind of output this decoder
+   * will produce.
+   */
+  void SetSurfaceFlags(SurfaceFlags aSurfaceFlags)
+  {
+    MOZ_ASSERT(!mInitialized);
+    mSurfaceFlags = aSurfaceFlags;
+  }
+  SurfaceFlags GetSurfaceFlags() const { return mSurfaceFlags; }
 
   bool HasSize() const { return mImageMetadata.HasSize(); }
 
   nsIntSize GetSize() const
   {
     MOZ_ASSERT(HasSize());
     return mImageMetadata.GetSize();
   }
@@ -400,17 +395,16 @@ protected:
     nsIntSize size = GetSize();
     return AllocateFrame(0, size, nsIntRect(nsIntPoint(), size),
                          gfx::SurfaceFormat::B8G8R8A8);
   }
 
   RawAccessFrameRef AllocateFrameInternal(uint32_t aFrameNum,
                                           const nsIntSize& aTargetSize,
                                           const nsIntRect& aFrameRect,
-                                          uint32_t aDecodeFlags,
                                           gfx::SurfaceFormat aFormat,
                                           uint8_t aPaletteDepth,
                                           imgFrame* aPreviousFrame);
 
 protected:
   uint8_t* mImageData;  // Pointer to image data in either Cairo or 8bit format
   uint32_t mImageDataLength;
   uint32_t* mColormap;  // Current colormap to be used in Cairo format
@@ -427,24 +421,22 @@ private:
   uint32_t mFrameCount; // Number of frames, including anything in-progress
 
   nsresult mFailCode;
 
   // Telemetry data for this decoder.
   TimeDuration mDecodeTime;
   uint32_t mChunkCount;
 
-  uint32_t mFlags;
+  DecoderFlags mDecoderFlags;
+  SurfaceFlags mSurfaceFlags;
   size_t mBytesDecoded;
 
   bool mInitialized : 1;
   bool mMetadataDecode : 1;
-  bool mSendPartialInvalidations : 1;
-  bool mImageIsTransient : 1;
-  bool mFirstFrameDecode : 1;
   bool mInFrame : 1;
   bool mDataDone : 1;
   bool mDecodeDone : 1;
   bool mDataError : 1;
   bool mDecodeAborted : 1;
   bool mShouldReportError : 1;
 };
 
--- a/image/DecoderFactory.cpp
+++ b/image/DecoderFactory.cpp
@@ -104,38 +104,36 @@ DecoderFactory::GetDecoder(DecoderType a
   return decoder.forget();
 }
 
 /* static */ already_AddRefed<Decoder>
 DecoderFactory::CreateDecoder(DecoderType aType,
                               RasterImage* aImage,
                               SourceBuffer* aSourceBuffer,
                               const Maybe<IntSize>& aTargetSize,
-                              uint32_t aFlags,
+                              DecoderFlags aDecoderFlags,
+                              SurfaceFlags aSurfaceFlags,
                               int aSampleSize,
-                              const IntSize& aResolution,
-                              bool aIsRedecode,
-                              bool aImageIsTransient)
+                              const IntSize& aResolution)
 {
   if (aType == DecoderType::UNKNOWN) {
     return nullptr;
   }
 
-  nsRefPtr<Decoder> decoder = GetDecoder(aType, aImage, aIsRedecode);
+  nsRefPtr<Decoder> decoder =
+    GetDecoder(aType, aImage, bool(aDecoderFlags & DecoderFlags::IS_REDECODE));
   MOZ_ASSERT(decoder, "Should have a decoder now");
 
   // Initialize the decoder.
   decoder->SetMetadataDecode(false);
   decoder->SetIterator(aSourceBuffer->Iterator());
-  decoder->SetFlags(aFlags);
+  decoder->SetDecoderFlags(aDecoderFlags | DecoderFlags::FIRST_FRAME_ONLY);
+  decoder->SetSurfaceFlags(aSurfaceFlags);
   decoder->SetSampleSize(aSampleSize);
   decoder->SetResolution(aResolution);
-  decoder->SetSendPartialInvalidations(!aIsRedecode);
-  decoder->SetImageIsTransient(aImageIsTransient);
-  decoder->SetIsFirstFrameDecode();
 
   // Set a target size for downscale-during-decode if applicable.
   if (aTargetSize) {
     DebugOnly<nsresult> rv = decoder->SetTargetSize(*aTargetSize);
     MOZ_ASSERT(nsresult(rv) != NS_ERROR_NOT_AVAILABLE,
                "We're downscale-during-decode but decoder doesn't support it?");
     MOZ_ASSERT(NS_SUCCEEDED(rv), "Bad downscale-during-decode target size?");
   }
@@ -147,36 +145,37 @@ DecoderFactory::CreateDecoder(DecoderTyp
 
   return decoder.forget();
 }
 
 /* static */ already_AddRefed<Decoder>
 DecoderFactory::CreateAnimationDecoder(DecoderType aType,
                                        RasterImage* aImage,
                                        SourceBuffer* aSourceBuffer,
-                                       uint32_t aFlags,
+                                       DecoderFlags aDecoderFlags,
+                                       SurfaceFlags aSurfaceFlags,
                                        const IntSize& aResolution)
 {
   if (aType == DecoderType::UNKNOWN) {
     return nullptr;
   }
 
   MOZ_ASSERT(aType == DecoderType::GIF || aType == DecoderType::PNG,
              "Calling CreateAnimationDecoder for non-animating DecoderType");
 
   nsRefPtr<Decoder> decoder =
     GetDecoder(aType, aImage, /* aIsRedecode = */ true);
   MOZ_ASSERT(decoder, "Should have a decoder now");
 
   // Initialize the decoder.
   decoder->SetMetadataDecode(false);
   decoder->SetIterator(aSourceBuffer->Iterator());
-  decoder->SetFlags(aFlags);
+  decoder->SetDecoderFlags(aDecoderFlags | DecoderFlags::IS_REDECODE);
+  decoder->SetSurfaceFlags(aSurfaceFlags);
   decoder->SetResolution(aResolution);
-  decoder->SetSendPartialInvalidations(false);
 
   decoder->Init();
   if (NS_FAILED(decoder->GetDecoderError())) {
     return nullptr;
   }
 
   return decoder.forget();
 }
@@ -208,38 +207,43 @@ DecoderFactory::CreateMetadataDecoder(De
   }
 
   return decoder.forget();
 }
 
 /* static */ already_AddRefed<Decoder>
 DecoderFactory::CreateAnonymousDecoder(DecoderType aType,
                                        SourceBuffer* aSourceBuffer,
-                                       uint32_t aFlags)
+                                       SurfaceFlags aSurfaceFlags)
 {
   if (aType == DecoderType::UNKNOWN) {
     return nullptr;
   }
 
   nsRefPtr<Decoder> decoder =
     GetDecoder(aType, /* aImage = */ nullptr, /* aIsRedecode = */ false);
   MOZ_ASSERT(decoder, "Should have a decoder now");
 
   // Initialize the decoder.
   decoder->SetMetadataDecode(false);
   decoder->SetIterator(aSourceBuffer->Iterator());
-  decoder->SetFlags(aFlags);
-  decoder->SetImageIsTransient(true);
+
+  // Anonymous decoders are always transient; we don't want to optimize surfaces
+  // or do any other expensive work that might be wasted.
+  DecoderFlags decoderFlags = DecoderFlags::IMAGE_IS_TRANSIENT;
 
   // Without an image, the decoder can't store anything in the SurfaceCache, so
   // callers will only be able to retrieve the most recent frame via
   // Decoder::GetCurrentFrame(). That means that anonymous decoders should
   // always be first-frame-only decoders, because nobody ever wants the *last*
   // frame.
-  decoder->SetIsFirstFrameDecode();
+  decoderFlags |= DecoderFlags::FIRST_FRAME_ONLY;
+
+  decoder->SetDecoderFlags(decoderFlags);
+  decoder->SetSurfaceFlags(aSurfaceFlags);
 
   decoder->Init();
   if (NS_FAILED(decoder->GetDecoderError())) {
     return nullptr;
   }
 
   return decoder.forget();
 }
@@ -254,17 +258,17 @@ DecoderFactory::CreateAnonymousMetadataD
 
   nsRefPtr<Decoder> decoder =
     GetDecoder(aType, /* aImage = */ nullptr, /* aIsRedecode = */ false);
   MOZ_ASSERT(decoder, "Should have a decoder now");
 
   // Initialize the decoder.
   decoder->SetMetadataDecode(true);
   decoder->SetIterator(aSourceBuffer->Iterator());
-  decoder->SetIsFirstFrameDecode();
+  decoder->SetDecoderFlags(DecoderFlags::FIRST_FRAME_ONLY);
 
   decoder->Init();
   if (NS_FAILED(decoder->GetDecoderError())) {
     return nullptr;
   }
 
   return decoder.forget();
 }
--- a/image/DecoderFactory.h
+++ b/image/DecoderFactory.h
@@ -2,29 +2,36 @@
  *
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef mozilla_image_DecoderFactory_h
 #define mozilla_image_DecoderFactory_h
 
+#include "DecoderFlags.h"
+#include "mozilla/Attributes.h"
 #include "mozilla/Maybe.h"
 #include "mozilla/gfx/2D.h"
 #include "nsCOMPtr.h"
+#include "SurfaceFlags.h"
 
 class nsACString;
 
 namespace mozilla {
 namespace image {
 
 class Decoder;
 class RasterImage;
 class SourceBuffer;
 
+/**
+ * The type of decoder; this is usually determined from a MIME type using
+ * DecoderFactory::GetDecoderType().
+ */
 enum class DecoderType
 {
   PNG,
   GIF,
   JPEG,
   BMP,
   ICO,
   ICON,
@@ -37,68 +44,64 @@ public:
   /// @return the type of decoder which is appropriate for @aMimeType.
   static DecoderType GetDecoderType(const char* aMimeType);
 
   /**
    * Creates and initializes a decoder for non-animated images of type @aType.
    * (If the image *is* animated, only the first frame will be decoded.) The
    * decoder will send notifications to @aImage.
    *
-   * XXX(seth): @aIsRedecode and @aImageIsTransient should really be part of
-   * @aFlags. This requires changes to the way that decoder flags work, though.
-   * See bug 1185800.
-   *
    * @param aType Which type of decoder to create - JPEG, PNG, etc.
    * @param aImage The image will own the decoder and which should receive
    *               notifications as decoding progresses.
    * @param aSourceBuffer The SourceBuffer which the decoder will read its data
    *                      from.
    * @param aTargetSize If not Nothing(), the target size which the image should
    *                    be scaled to during decoding. It's an error to specify
    *                    a target size for a decoder type which doesn't support
    *                    downscale-during-decode.
-   * @param aFlags Flags specifying what type of output the decoder should
-   *               produce; see GetDecodeFlags() in RasterImage.h.
+   * @param aDecoderFlags Flags specifying the behavior of this decoder.
+   * @param aSurfaceFlags Flags specifying the type of output this decoder
+   *                      should produce.
    * @param aSampleSize The sample size requested using #-moz-samplesize (or 0
    *                    if none).
    * @param aResolution The resolution requested using #-moz-resolution (or an
    *                    empty rect if none).
-   * @param aIsRedecode Specify 'true' if this image has been decoded before.
-   * @param aImageIsTransient Specify 'true' if this image is transient.
    */
   static already_AddRefed<Decoder>
   CreateDecoder(DecoderType aType,
                 RasterImage* aImage,
                 SourceBuffer* aSourceBuffer,
                 const Maybe<gfx::IntSize>& aTargetSize,
-                uint32_t aFlags,
+                DecoderFlags aDecoderFlags,
+                SurfaceFlags aSurfaceFlags,
                 int aSampleSize,
-                const gfx::IntSize& aResolution,
-                bool aIsRedecode,
-                bool aImageIsTransient);
+                const gfx::IntSize& aResolution);
 
   /**
    * Creates and initializes a decoder for animated images of type @aType.
    * The decoder will send notifications to @aImage.
    *
    * @param aType Which type of decoder to create - JPEG, PNG, etc.
    * @param aImage The image will own the decoder and which should receive
    *               notifications as decoding progresses.
    * @param aSourceBuffer The SourceBuffer which the decoder will read its data
    *                      from.
-   * @param aFlags Flags specifying what type of output the decoder should
-   *               produce; see GetDecodeFlags() in RasterImage.h.
+   * @param aDecoderFlags Flags specifying the behavior of this decoder.
+   * @param aSurfaceFlags Flags specifying the type of output this decoder
+   *                      should produce.
    * @param aResolution The resolution requested using #-moz-resolution (or an
    *                    empty rect if none).
    */
   static already_AddRefed<Decoder>
   CreateAnimationDecoder(DecoderType aType,
                          RasterImage* aImage,
                          SourceBuffer* aSourceBuffer,
-                         uint32_t aFlags,
+                         DecoderFlags aDecoderFlags,
+                         SurfaceFlags aSurfaceFlags,
                          const gfx::IntSize& aResolution);
 
   /**
    * Creates and initializes a metadata decoder of type @aType. This decoder
    * will only decode the image's header, extracting metadata like the size of
    * the image. No actual image data will be decoded and no surfaces will be
    * allocated. The decoder will send notifications to @aImage.
    *
@@ -121,35 +124,33 @@ public:
 
   /**
    * Creates and initializes an anonymous decoder (one which isn't associated
    * with an Image object). Only the first frame of the image will be decoded.
    *
    * @param aType Which type of decoder to create - JPEG, PNG, etc.
    * @param aSourceBuffer The SourceBuffer which the decoder will read its data
    *                      from.
-   * @param aFlags Flags specifying what type of output the decoder should
-   *               produce; see GetDecodeFlags() in RasterImage.h.
+   * @param aSurfaceFlags Flags specifying the type of output this decoder
+   *                      should produce.
    */
   static already_AddRefed<Decoder>
   CreateAnonymousDecoder(DecoderType aType,
                          SourceBuffer* aSourceBuffer,
-                         uint32_t aFlags);
+                         SurfaceFlags aSurfaceFlags);
 
   /**
    * Creates and initializes an anonymous metadata decoder (one which isn't
    * associated with an Image object). This decoder will only decode the image's
    * header, extracting metadata like the size of the image. No actual image
    * data will be decoded and no surfaces will be allocated.
    *
    * @param aType Which type of decoder to create - JPEG, PNG, etc.
    * @param aSourceBuffer The SourceBuffer which the decoder will read its data
    *                      from.
-   * @param aFlags Flags specifying what type of output the decoder should
-   *               produce; see GetDecodeFlags() in RasterImage.h.
    */
   static already_AddRefed<Decoder>
   CreateAnonymousMetadataDecoder(DecoderType aType,
                                  SourceBuffer* aSourceBuffer);
 
 private:
   virtual ~DecoderFactory() = 0;
 
new file mode 100644
--- /dev/null
+++ b/image/DecoderFlags.h
@@ -0,0 +1,42 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_DecoderFlags_h
+#define mozilla_image_DecoderFlags_h
+
+#include "mozilla/TypedEnumBits.h"
+
+namespace mozilla {
+namespace image {
+
+/**
+ * Flags that influence decoder behavior. Note that these flags *don't*
+ * influence the logical content of the surfaces that the decoder generates, so
+ * they're not in a factor in SurfaceCache lookups and the like. These flags
+ * instead either influence which surfaces are generated at all or the tune the
+ * decoder's behavior for a particular scenario.
+ */
+enum class DecoderFlags : uint8_t
+{
+  FIRST_FRAME_ONLY               = 1 << 0,
+  IS_REDECODE                    = 1 << 1,
+  IMAGE_IS_TRANSIENT             = 1 << 2,
+  ASYNC_NOTIFY                   = 1 << 3
+};
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(DecoderFlags)
+
+/**
+ * @return the default set of decode flags.
+ */
+inline DecoderFlags
+DefaultDecoderFlags()
+{
+  return DecoderFlags();
+}
+
+} // namespace image
+} // namespace mozilla
+
+#endif // mozilla_image_DecoderFlags_h
--- a/image/FrameAnimator.cpp
+++ b/image/FrameAnimator.cpp
@@ -276,17 +276,17 @@ FrameAnimator::GetCompositedFrame(uint32
     return LookupResult(mCompositingFrame->DrawableRef(), MatchType::EXACT);
   }
 
   // Otherwise return the raw frame. DoBlend is required to ensure that we only
   // hit this case if the frame is not paletted and doesn't require compositing.
   LookupResult result =
     SurfaceCache::Lookup(ImageKey(mImage),
                          RasterSurfaceKey(mSize,
-                                          0,  // Default decode flags.
+                                          DefaultSurfaceFlags(),
                                           aFrameNum));
   MOZ_ASSERT(!result || !result.DrawableRef()->GetIsPaletted(),
              "About to return a paletted frame");
   return result;
 }
 
 int32_t
 FrameAnimator::GetTimeoutForFrame(uint32_t aFrameNum) const
@@ -327,17 +327,17 @@ FrameAnimator::GetTimeoutForFrame(uint32
 static void
 DoCollectSizeOfCompositingSurfaces(const RawAccessFrameRef& aSurface,
                                    SurfaceMemoryCounterType aType,
                                    nsTArray<SurfaceMemoryCounter>& aCounters,
                                    MallocSizeOf aMallocSizeOf)
 {
   // Concoct a SurfaceKey for this surface.
   SurfaceKey key = RasterSurfaceKey(aSurface->GetImageSize(),
-                                    imgIContainer::DECODE_FLAGS_DEFAULT,
+                                    DefaultSurfaceFlags(),
                                     /* aFrameNum = */ 0);
 
   // Create a counter for this surface.
   SurfaceMemoryCounter counter(key, /* aIsLocked = */ true, aType);
 
   // Extract the surface's memory usage information.
   size_t heap = 0, nonHeap = 0;
   aSurface->AddSizeOfExcludingThis(aMallocSizeOf, heap, nonHeap);
@@ -369,17 +369,17 @@ FrameAnimator::CollectSizeOfCompositingS
 }
 
 RawAccessFrameRef
 FrameAnimator::GetRawFrame(uint32_t aFrameNum) const
 {
   LookupResult result =
     SurfaceCache::Lookup(ImageKey(mImage),
                          RasterSurfaceKey(mSize,
-                                          0,  // Default decode flags.
+                                          DefaultSurfaceFlags(),
                                           aFrameNum));
   return result ? result.DrawableRef()->RawAccessRef()
                 : RawAccessFrameRef();
 }
 
 //******************************************************************************
 // DoBlend gets called when the timer for animation get fired and we have to
 // update the composited frame of the animation.
--- a/image/ImageOps.cpp
+++ b/image/ImageOps.cpp
@@ -111,17 +111,19 @@ ImageOps::DecodeToSurface(nsIInputStream
     return nullptr;
   }
   sourceBuffer->Complete(NS_OK);
 
   // Create a decoder.
   DecoderType decoderType =
     DecoderFactory::GetDecoderType(PromiseFlatCString(aMimeType).get());
   nsRefPtr<Decoder> decoder =
-    DecoderFactory::CreateAnonymousDecoder(decoderType, sourceBuffer, aFlags);
+    DecoderFactory::CreateAnonymousDecoder(decoderType,
+                                           sourceBuffer,
+                                           ToSurfaceFlags(aFlags));
   if (!decoder) {
     return nullptr;
   }
 
   // Run the decoder synchronously.
   decoder->Decode();
   if (!decoder->GetDecodeDone() || decoder->HasError()) {
     return nullptr;
--- a/image/RasterImage.cpp
+++ b/image/RasterImage.cpp
@@ -113,17 +113,19 @@ public:
 
     // Everything worked, so commit to these objects and mark ourselves ready.
     mDstRef = Move(tentativeDstRef);
     mState = eReady;
 
     // Insert the new surface into the cache immediately. We need to do this so
     // that we won't start multiple scaling jobs for the same size.
     SurfaceCache::Insert(mDstRef.get(), ImageKey(mImage.get()),
-                         RasterSurfaceKey(mDstSize, mImageFlags, 0),
+                         RasterSurfaceKey(mDstSize,
+                                          ToSurfaceFlags(mImageFlags),
+                                          /* aFrameNum = */ 0),
                          Lifetime::Transient);
 
     return true;
   }
 
   NS_IMETHOD Run() override
   {
     if (mState == eReady) {
@@ -163,17 +165,18 @@ public:
       mDstRef.reset();
     } else if (mState == eFinishWithError) {
       MOZ_ASSERT(NS_IsMainThread());
       NS_WARNING("HQ scaling failed");
 
       // Remove the frame from the cache since we know we don't need it.
       SurfaceCache::RemoveSurface(ImageKey(mImage.get()),
                                   RasterSurfaceKey(mDstSize,
-                                                   mImageFlags, 0));
+                                                   ToSurfaceFlags(mImageFlags),
+                                                   /* aFrameNum = */ 0));
 
       // Release everything we're holding, too.
       mSrcRef.reset();
       mDstRef.reset();
     } else {
       // mState must be eNew, which is invalid in Run().
       MOZ_ASSERT(false, "Need to call Init() before dispatching");
     }
@@ -420,43 +423,44 @@ RasterImage::LookupFrameInternal(uint32_
 {
   if (!mAnim) {
     NS_ASSERTION(aFrameNum == 0,
                  "Don't ask for a frame > 0 if we're not animated!");
     aFrameNum = 0;
   }
 
   if (mAnim && aFrameNum > 0) {
-    MOZ_ASSERT(DecodeFlags(aFlags) == DECODE_FLAGS_DEFAULT,
-               "Can't composite frames with non-default decode flags");
+    MOZ_ASSERT(ToSurfaceFlags(aFlags) == DefaultSurfaceFlags(),
+               "Can't composite frames with non-default surface flags");
     return mAnim->GetCompositedFrame(aFrameNum);
   }
 
-  Maybe<uint32_t> alternateFlags;
+  Maybe<SurfaceFlags> alternateFlags;
   if (IsOpaque()) {
     // If we're opaque, we can always substitute a frame that was decoded with a
     // different decode flag for premultiplied alpha, because that can only
     // matter for frames with transparency.
-    alternateFlags = Some(aFlags ^ FLAG_DECODE_NO_PREMULTIPLY_ALPHA);
+    alternateFlags.emplace(ToSurfaceFlags(aFlags) ^
+                             SurfaceFlags::NO_PREMULTIPLY_ALPHA);
   }
 
   // We don't want any substitution for sync decodes (except the premultiplied
   // alpha optimization above), so we use SurfaceCache::Lookup in this case.
   if (aFlags & FLAG_SYNC_DECODE) {
     return SurfaceCache::Lookup(ImageKey(this),
                                 RasterSurfaceKey(aSize,
-                                                 DecodeFlags(aFlags),
+                                                 ToSurfaceFlags(aFlags),
                                                  aFrameNum),
                                 alternateFlags);
   }
 
   // We'll return the best match we can find to the requested frame.
   return SurfaceCache::LookupBestMatch(ImageKey(this),
                                        RasterSurfaceKey(aSize,
-                                                        DecodeFlags(aFlags),
+                                                        ToSurfaceFlags(aFlags),
                                                         aFrameNum),
                                        alternateFlags);
 }
 
 DrawableFrameRef
 RasterImage::LookupFrame(uint32_t aFrameNum,
                          const IntSize& aSize,
                          uint32_t aFlags)
@@ -1403,41 +1407,54 @@ RasterImage::Decode(const IntSize& aSize
   }
 
   MOZ_ASSERT(mDownscaleDuringDecode || aSize == mSize,
              "Can only decode to our intrinsic size if we're not allowed to "
              "downscale-during-decode");
 
   Maybe<IntSize> targetSize = mSize != aSize ? Some(aSize) : Nothing();
 
+  // Determine which flags we need to decode this image with.
+  DecoderFlags decoderFlags = DefaultDecoderFlags();
+  if (aFlags & FLAG_ASYNC_NOTIFY) {
+    decoderFlags |= DecoderFlags::ASYNC_NOTIFY;
+  }
+  if (mTransient) {
+    decoderFlags |= DecoderFlags::IMAGE_IS_TRANSIENT;
+  }
+  if (mHasBeenDecoded) {
+    decoderFlags |= DecoderFlags::IS_REDECODE;
+  }
+
   // Create a decoder.
   nsRefPtr<Decoder> decoder;
   if (mAnim) {
     decoder = DecoderFactory::CreateAnimationDecoder(mDecoderType, this,
-                                                     mSourceBuffer, aFlags,
+                                                     mSourceBuffer, decoderFlags,
+                                                     ToSurfaceFlags(aFlags),
                                                      mRequestedResolution);
   } else {
     decoder = DecoderFactory::CreateDecoder(mDecoderType, this, mSourceBuffer,
-                                            targetSize, aFlags,
+                                            targetSize, decoderFlags,
+                                            ToSurfaceFlags(aFlags),
                                             mRequestedSampleSize,
-                                            mRequestedResolution,
-                                            mHasBeenDecoded, mTransient);
+                                            mRequestedResolution);
   }
 
   // Make sure DecoderFactory was able to create a decoder successfully.
   if (!decoder) {
     return NS_ERROR_FAILURE;
   }
 
   // Add a placeholder for the first frame to the SurfaceCache so we won't
   // trigger any more decoders with the same parameters.
   InsertOutcome outcome =
     SurfaceCache::InsertPlaceholder(ImageKey(this),
                                     RasterSurfaceKey(aSize,
-                                                     decoder->GetDecodeFlags(),
+                                                     decoder->GetSurfaceFlags(),
                                                      /* aFrameNum = */ 0));
   if (outcome != InsertOutcome::SUCCESS) {
     return NS_ERROR_FAILURE;
   }
 
   // Report telemetry.
   Telemetry::GetHistogramById(Telemetry::IMAGE_DECODE_COUNT)
     ->Subtract(mDecodeCount);
@@ -1636,17 +1653,17 @@ RasterImage::RequestScale(imgFrame* aFra
 
   // We also can't scale if we can't lock the image data for this frame.
   RawAccessFrameRef frameRef = aFrame->RawAccessRef();
   if (!frameRef) {
     return;
   }
 
   nsRefPtr<ScaleRunner> runner =
-    new ScaleRunner(this, DecodeFlags(aFlags), aSize, Move(frameRef));
+    new ScaleRunner(this, aFlags, aSize, Move(frameRef));
   if (runner->Init()) {
     if (!sScaleWorkerThread) {
       NS_NewNamedThread("Image Scaler", getter_AddRefs(sScaleWorkerThread));
       ClearOnShutdown(&sScaleWorkerThread);
     }
 
     sScaleWorkerThread->Dispatch(runner, NS_DISPATCH_NORMAL);
   }
@@ -1661,18 +1678,18 @@ RasterImage::DrawWithPreDownscaleIfNeede
                                           uint32_t aFlags)
 {
   DrawableFrameRef frameRef;
 
   if (CanScale(aFilter, aSize, aFlags)) {
     LookupResult result =
       SurfaceCache::Lookup(ImageKey(this),
                            RasterSurfaceKey(aSize,
-                                            DecodeFlags(aFlags),
-                                            0));
+                                            ToSurfaceFlags(aFlags),
+                                            /* aFrameNum = */ 0));
     if (!result) {
       // We either didn't have a matching scaled frame or the OS threw it away.
       // Request a new one so we'll be ready next time. For now, we'll fall back
       // to aFrameRef below.
       RequestScale(aFrameRef.get(), aFlags, aSize);
     }
     if (result && result.DrawableRef()->IsImageComplete()) {
       frameRef = Move(result.DrawableRef());  // The scaled version is ready.
@@ -1741,17 +1758,17 @@ RasterImage::Draw(gfxContext* aContext,
 
   if (mError) {
     return DrawResult::BAD_IMAGE;
   }
 
   // Illegal -- you can't draw with non-default decode flags.
   // (Disabling colorspace conversion might make sense to allow, but
   // we don't currently.)
-  if (DecodeFlags(aFlags) != DECODE_FLAGS_DEFAULT) {
+  if (ToSurfaceFlags(aFlags) != DefaultSurfaceFlags()) {
     return DrawResult::BAD_ARGS;
   }
 
   if (!aContext) {
     return DrawResult::BAD_ARGS;
   }
 
   if (IsUnlocked() && mProgressTracker) {
@@ -1932,24 +1949,25 @@ RasterImage::GetFramesNotified(uint32_t*
 
   return NS_OK;
 }
 #endif
 
 void
 RasterImage::NotifyProgress(Progress aProgress,
                             const IntRect& aInvalidRect /* = IntRect() */,
-                            uint32_t aFlags /* = DECODE_FLAGS_DEFAULT */)
+                            SurfaceFlags aSurfaceFlags
+                              /* = DefaultSurfaceFlags() */)
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   // Ensure that we stay alive long enough to finish notifying.
   nsRefPtr<RasterImage> image(this);
 
-  bool wasDefaultFlags = aFlags == DECODE_FLAGS_DEFAULT;
+  bool wasDefaultFlags = aSurfaceFlags == DefaultSurfaceFlags();
 
   if (!aInvalidRect.IsEmpty() && wasDefaultFlags) {
     // Update our image container since we're invalidating.
     UpdateImageContainer();
   }
 
   // Tell the observers what happened.
   image->mProgressTracker->SyncNotifyProgress(aProgress, aInvalidRect);
@@ -1984,17 +2002,17 @@ RasterImage::FinalizeDecoder(Decoder* aD
     if (mAnim) {
       mAnim->SetDoneDecoding(true);
     }
   }
 
   // Send out any final notifications.
   NotifyProgress(aDecoder->TakeProgress(),
                  aDecoder->TakeInvalidRect(),
-                 aDecoder->GetDecodeFlags());
+                 aDecoder->GetSurfaceFlags());
 
   bool wasMetadata = aDecoder->IsMetadataDecode();
   bool done = aDecoder->GetDecodeDone();
 
   if (!wasMetadata && aDecoder->ChunkCount()) {
     Telemetry::Accumulate(Telemetry::IMAGE_DECODE_CHUNKS,
                           aDecoder->ChunkCount());
   }
@@ -2089,18 +2107,18 @@ RasterImage::OptimalImageSizeForDest(con
 
   if (aFilter == GraphicsFilter::FILTER_GOOD &&
       CanDownscaleDuringDecode(destSize, aFlags)) {
     return destSize;
   } else if (CanScale(aFilter, destSize, aFlags)) {
     LookupResult result =
       SurfaceCache::Lookup(ImageKey(this),
                            RasterSurfaceKey(destSize,
-                                            DecodeFlags(aFlags),
-                                            0));
+                                            ToSurfaceFlags(aFlags),
+                                            /* aFrameNum = */ 0));
 
     if (result && result.DrawableRef()->IsImageComplete()) {
       return destSize;  // We have an existing HQ scale for this size.
     }
     if (!result) {
       // We could HQ scale to this size, but we haven't. Request a scale now.
       DrawableFrameRef ref = LookupFrame(GetRequestedFrameIndex(aWhichFrame),
                                          mSize, aFlags);
--- a/image/RasterImage.h
+++ b/image/RasterImage.h
@@ -130,27 +130,16 @@ class Image;
 
 namespace image {
 
 class Decoder;
 class FrameAnimator;
 class ImageMetadata;
 class SourceBuffer;
 
-/**
- * Given a set of imgIContainer FLAG_* flags, returns those flags that can
- * affect the output of decoders.
- */
-inline MOZ_CONSTEXPR uint32_t
-DecodeFlags(uint32_t aFlags)
-{
-  return aFlags & (imgIContainer::FLAG_DECODE_NO_PREMULTIPLY_ALPHA |
-                   imgIContainer::FLAG_DECODE_NO_COLORSPACE_CONVERSION);
-}
-
 class RasterImage final : public ImageResource
                         , public nsIProperties
                         , public SupportsWeakPtr<RasterImage>
 #ifdef DEBUG
                         , public imgIContainerDebug
 #endif
 {
   // (no public constructor - use ImageFactory)
@@ -191,23 +180,23 @@ public:
 
   /**
    * Sends the provided progress notifications to ProgressTracker.
    *
    * Main-thread only.
    *
    * @param aProgress    The progress notifications to send.
    * @param aInvalidRect An invalidation rect to send.
-   * @param aFlags       The decode flags used by the decoder that generated
-   *                     these notifications, or DECODE_FLAGS_DEFAULT if the
+   * @param aFlags       The surface flags used by the decoder that generated
+   *                     these notifications, or DefaultSurfaceFlags() if the
    *                     notifications don't come from a decoder.
    */
   void NotifyProgress(Progress aProgress,
                       const nsIntRect& aInvalidRect = nsIntRect(),
-                      uint32_t aFlags = DECODE_FLAGS_DEFAULT);
+                      SurfaceFlags aSurfaceFlags = DefaultSurfaceFlags());
 
   /**
    * Records telemetry and does final teardown of the provided decoder.
    *
    * Main-thread only.
    */
   void FinalizeDecoder(Decoder* aDecoder);
 
--- a/image/SurfaceCache.cpp
+++ b/image/SurfaceCache.cpp
@@ -278,17 +278,17 @@ public:
   {
     nsRefPtr<CachedSurface> surface;
     mSurfaces.Get(aSurfaceKey, getter_AddRefs(surface));
     return surface.forget();
   }
 
   Pair<already_AddRefed<CachedSurface>, MatchType>
   LookupBestMatch(const SurfaceKey&      aSurfaceKey,
-                  const Maybe<uint32_t>& aAlternateFlags)
+                  const Maybe<SurfaceFlags>& aAlternateFlags)
   {
     // Try for an exact match first.
     nsRefPtr<CachedSurface> exactMatch;
     mSurfaces.Get(aSurfaceKey, getter_AddRefs(exactMatch));
     if (exactMatch && exactMatch->IsDecoded()) {
       return MakePair(exactMatch.forget(), MatchType::EXACT);
     }
 
@@ -330,23 +330,23 @@ public:
 
   void SetLocked(bool aLocked) { mLocked = aLocked; }
   bool IsLocked() const { return mLocked; }
 
 private:
   struct MatchContext
   {
     MatchContext(const SurfaceKey& aIdealKey,
-                 const Maybe<uint32_t>& aAlternateFlags)
+                 const Maybe<SurfaceFlags>& aAlternateFlags)
       : mIdealKey(aIdealKey)
       , mAlternateFlags(aAlternateFlags)
     { }
 
     const SurfaceKey& mIdealKey;
-    const Maybe<uint32_t> mAlternateFlags;
+    const Maybe<SurfaceFlags> mAlternateFlags;
     nsRefPtr<CachedSurface> mBestMatch;
   };
 
   static PLDHashOperator TryToImproveMatch(const SurfaceKey& aSurfaceKey,
                                            CachedSurface*    aSurface,
                                            void*             aContext)
   {
     auto context = static_cast<MatchContext*>(aContext);
@@ -639,17 +639,17 @@ public:
 
     MOZ_ASSERT(surface->GetSurfaceKey() == aSurfaceKey,
                "Lookup() not returning an exact match?");
     return LookupResult(Move(ref), MatchType::EXACT);
   }
 
   LookupResult LookupBestMatch(const ImageKey         aImageKey,
                                const SurfaceKey&      aSurfaceKey,
-                               const Maybe<uint32_t>& aAlternateFlags)
+                               const Maybe<SurfaceFlags>& aAlternateFlags)
   {
     nsRefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
     if (!cache) {
       // No cached surfaces for this image.
       return LookupResult(MatchType::NOT_FOUND);
     }
 
     // Repeatedly look up the best match, trying again if the resulting surface
@@ -1055,17 +1055,18 @@ SurfaceCache::Shutdown()
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(sInstance, "No singleton - was Shutdown() called twice?");
   sInstance = nullptr;
 }
 
 /* static */ LookupResult
 SurfaceCache::Lookup(const ImageKey         aImageKey,
                      const SurfaceKey&      aSurfaceKey,
-                     const Maybe<uint32_t>& aAlternateFlags /* = Nothing() */)
+                     const Maybe<SurfaceFlags>& aAlternateFlags
+                       /* = Nothing() */)
 {
   if (!sInstance) {
     return LookupResult(MatchType::NOT_FOUND);
   }
 
   MutexAutoLock lock(sInstance->GetMutex());
 
   LookupResult result = sInstance->Lookup(aImageKey, aSurfaceKey);
@@ -1075,17 +1076,17 @@ SurfaceCache::Lookup(const ImageKey     
   }
 
   return result;
 }
 
 /* static */ LookupResult
 SurfaceCache::LookupBestMatch(const ImageKey         aImageKey,
                               const SurfaceKey&      aSurfaceKey,
-                              const Maybe<uint32_t>& aAlternateFlags
+                              const Maybe<SurfaceFlags>& aAlternateFlags
                                 /* = Nothing() */)
 {
   if (!sInstance) {
     return LookupResult(MatchType::NOT_FOUND);
   }
 
   MutexAutoLock lock(sInstance->GetMutex());
   return sInstance->LookupBestMatch(aImageKey, aSurfaceKey, aAlternateFlags);
--- a/image/SurfaceCache.h
+++ b/image/SurfaceCache.h
@@ -14,16 +14,17 @@
 #include "mozilla/Maybe.h"           // for Maybe
 #include "mozilla/MemoryReporting.h" // for MallocSizeOf
 #include "mozilla/HashFunctions.h"   // for HashGeneric and AddToHash
 #include "gfx2DGlue.h"
 #include "gfxPoint.h"                // for gfxSize
 #include "nsCOMPtr.h"                // for already_AddRefed
 #include "mozilla/gfx/Point.h"       // for mozilla::gfx::IntSize
 #include "mozilla/gfx/2D.h"          // for SourceSurface
+#include "SurfaceFlags.h"
 #include "SVGImageContext.h"         // for SVGImageContext
 
 namespace mozilla {
 namespace image {
 
 class Image;
 class imgFrame;
 class LookupResult;
@@ -54,73 +55,75 @@ public:
            aOther.mAnimationTime == mAnimationTime &&
            aOther.mFlags == mFlags;
   }
 
   uint32_t Hash() const
   {
     uint32_t hash = HashGeneric(mSize.width, mSize.height);
     hash = AddToHash(hash, mSVGContext.map(HashSIC).valueOr(0));
-    hash = AddToHash(hash, mAnimationTime, mFlags);
+    hash = AddToHash(hash, mAnimationTime, uint32_t(mFlags));
     return hash;
   }
 
   IntSize Size() const { return mSize; }
   Maybe<SVGImageContext> SVGContext() const { return mSVGContext; }
   float AnimationTime() const { return mAnimationTime; }
-  uint32_t Flags() const { return mFlags; }
+  SurfaceFlags Flags() const { return mFlags; }
 
-  SurfaceKey WithNewFlags(uint32_t aFlags) const
+  SurfaceKey WithNewFlags(SurfaceFlags aFlags) const
   {
     return SurfaceKey(mSize, mSVGContext, mAnimationTime, aFlags);
   }
 
 private:
   SurfaceKey(const IntSize& aSize,
              const Maybe<SVGImageContext>& aSVGContext,
              const float aAnimationTime,
-             const uint32_t aFlags)
+             const SurfaceFlags aFlags)
     : mSize(aSize)
     , mSVGContext(aSVGContext)
     , mAnimationTime(aAnimationTime)
     , mFlags(aFlags)
   { }
 
   static uint32_t HashSIC(const SVGImageContext& aSIC) {
     return aSIC.Hash();
   }
 
-  friend SurfaceKey RasterSurfaceKey(const IntSize&, uint32_t, uint32_t);
+  friend SurfaceKey RasterSurfaceKey(const IntSize&,
+                                     SurfaceFlags,
+                                     uint32_t);
   friend SurfaceKey VectorSurfaceKey(const IntSize&,
                                      const Maybe<SVGImageContext>&,
                                      float);
 
   IntSize                mSize;
   Maybe<SVGImageContext> mSVGContext;
   float                  mAnimationTime;
-  uint32_t               mFlags;
+  SurfaceFlags           mFlags;
 };
 
 inline SurfaceKey
 RasterSurfaceKey(const gfx::IntSize& aSize,
-                 uint32_t aFlags,
+                 SurfaceFlags aFlags,
                  uint32_t aFrameNum)
 {
   return SurfaceKey(aSize, Nothing(), float(aFrameNum), aFlags);
 }
 
 inline SurfaceKey
 VectorSurfaceKey(const gfx::IntSize& aSize,
                  const Maybe<SVGImageContext>& aSVGContext,
                  float aAnimationTime)
 {
   // We don't care about aFlags for VectorImage because none of the flags we
   // have right now influence VectorImage's rendering. If we add a new flag that
   // *does* affect how a VectorImage renders, we'll have to change this.
-  return SurfaceKey(aSize, aSVGContext, aAnimationTime, 0);
+  return SurfaceKey(aSize, aSVGContext, aAnimationTime, DefaultSurfaceFlags());
 }
 
 enum class Lifetime : uint8_t {
   Transient,
   Persistent
 };
 
 enum class InsertOutcome : uint8_t {
@@ -191,17 +194,18 @@ struct SurfaceCache
    *                        lock each time.
    *
    * @return                a LookupResult, which will either contain a
    *                        DrawableFrameRef to the requested surface, or an
    *                        empty DrawableFrameRef if the surface was not found.
    */
   static LookupResult Lookup(const ImageKey    aImageKey,
                              const SurfaceKey& aSurfaceKey,
-                             const Maybe<uint32_t>& aAlternateFlags = Nothing());
+                             const Maybe<SurfaceFlags>& aAlternateFlags
+                               = Nothing());
 
   /**
    * Looks up the best matching surface in the cache and returns a drawable
    * reference to the imgFrame containing it.
    *
    * Returned surfaces may vary from the requested surface only in terms of
    * size, unless @aAlternateFlags is specified.
    *
@@ -219,17 +223,17 @@ struct SurfaceCache
    *                        DrawableFrameRef to a surface similar to the
    *                        requested surface, or an empty DrawableFrameRef if
    *                        the surface was not found. Callers can use
    *                        LookupResult::IsExactMatch() to check whether the
    *                        returned surface exactly matches @aSurfaceKey.
    */
   static LookupResult LookupBestMatch(const ImageKey    aImageKey,
                                       const SurfaceKey& aSurfaceKey,
-                                      const Maybe<uint32_t>& aAlternateFlags
+                                      const Maybe<SurfaceFlags>& aAlternateFlags
                                         = Nothing());
 
   /**
    * Insert a surface into the cache. If a surface with the same ImageKey and
    * SurfaceKey is already in the cache, Insert returns FAILURE_ALREADY_PRESENT.
    * If a matching placeholder is already present, the placeholder is removed.
    *
    * Each surface in the cache has a lifetime, either Transient or Persistent.
new file mode 100644
--- /dev/null
+++ b/image/SurfaceFlags.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_image_SurfaceFlags_h
+#define mozilla_image_SurfaceFlags_h
+
+#include "imgIContainer.h"
+#include "mozilla/TypedEnumBits.h"
+
+namespace mozilla {
+namespace image {
+
+/**
+ * Flags that change the output a decoder generates. Because different
+ * combinations of these flags result in logically different surfaces, these
+ * flags must be taken into account in SurfaceCache lookups.
+ */
+enum class SurfaceFlags : uint8_t
+{
+  NO_PREMULTIPLY_ALPHA     = 1 << 0,
+  NO_COLORSPACE_CONVERSION = 1 << 1
+};
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(SurfaceFlags)
+
+/**
+ * @return the default set of surface flags.
+ */
+inline SurfaceFlags
+DefaultSurfaceFlags()
+{
+  return SurfaceFlags();
+}
+
+/**
+ * Given a set of imgIContainer FLAG_* flags, returns a set of SurfaceFlags with
+ * the corresponding flags set.
+ */
+inline SurfaceFlags
+ToSurfaceFlags(uint32_t aFlags)
+{
+  SurfaceFlags flags = DefaultSurfaceFlags();
+  if (aFlags & imgIContainer::FLAG_DECODE_NO_PREMULTIPLY_ALPHA) {
+    flags |= SurfaceFlags::NO_PREMULTIPLY_ALPHA;
+  }
+  if (aFlags & imgIContainer::FLAG_DECODE_NO_COLORSPACE_CONVERSION) {
+    flags |= SurfaceFlags::NO_COLORSPACE_CONVERSION;
+  }
+  return flags;
+}
+
+} // namespace image
+} // namespace mozilla
+
+#endif // mozilla_image_SurfaceFlags_h
--- a/image/decoders/nsICODecoder.cpp
+++ b/image/decoders/nsICODecoder.cpp
@@ -355,20 +355,18 @@ nsICODecoder::WriteInternal(const char* 
     aCount -= toCopy;
     aBuffer += toCopy;
 
     mIsPNG = !memcmp(mSignature, nsPNGDecoder::pngSignatureBytes,
                      PNGSIGNATURESIZE);
     if (mIsPNG) {
       mContainedDecoder = new nsPNGDecoder(mImage);
       mContainedDecoder->SetMetadataDecode(IsMetadataDecode());
-      mContainedDecoder->SetSendPartialInvalidations(mSendPartialInvalidations);
-      if (mFirstFrameDecode) {
-        mContainedDecoder->SetIsFirstFrameDecode();
-      }
+      mContainedDecoder->SetDecoderFlags(GetDecoderFlags());
+      mContainedDecoder->SetSurfaceFlags(GetSurfaceFlags());
       mContainedDecoder->Init();
       if (!WriteToContainedDecoder(mSignature, PNGSIGNATURESIZE)) {
         return;
       }
     }
   }
 
   // If we have a PNG, let the PNG decoder do all of the rest of the work
@@ -435,20 +433,18 @@ nsICODecoder::WriteInternal(const char* 
 
     // Init the bitmap decoder which will do most of the work for us
     // It will do everything except the AND mask which isn't present in bitmaps
     // bmpDecoder is for local scope ease, it will be freed by mContainedDecoder
     nsBMPDecoder* bmpDecoder = new nsBMPDecoder(mImage);
     mContainedDecoder = bmpDecoder;
     bmpDecoder->SetUseAlphaData(true);
     mContainedDecoder->SetMetadataDecode(IsMetadataDecode());
-    mContainedDecoder->SetSendPartialInvalidations(mSendPartialInvalidations);
-    if (mFirstFrameDecode) {
-      mContainedDecoder->SetIsFirstFrameDecode();
-    }
+    mContainedDecoder->SetDecoderFlags(GetDecoderFlags());
+    mContainedDecoder->SetSurfaceFlags(GetSurfaceFlags());
     mContainedDecoder->Init();
 
     // The ICO format when containing a BMP does not include the 14 byte
     // bitmap file header. To use the code of the BMP decoder we need to
     // generate this header ourselves and feed it to the BMP decoder.
     int8_t bfhBuffer[BMPFILEHEADERSIZE];
     if (!FillBitmapFileHeaderBuffer(bfhBuffer)) {
       PostDataError();
--- a/image/decoders/nsJPEGDecoder.cpp
+++ b/image/decoders/nsJPEGDecoder.cpp
@@ -149,17 +149,17 @@ nsJPEGDecoder::SetTargetSize(const nsInt
 
   return NS_OK;
 }
 
 void
 nsJPEGDecoder::InitInternal()
 {
   mCMSMode = gfxPlatform::GetCMSMode();
-  if (GetDecodeFlags() & imgIContainer::FLAG_DECODE_NO_COLORSPACE_CONVERSION) {
+  if (GetSurfaceFlags() & SurfaceFlags::NO_COLORSPACE_CONVERSION) {
     mCMSMode = eCMSMode_Off;
   }
 
   // We set up the normal JPEG error routines, then override error_exit.
   mInfo.err = jpeg_std_error(&mErr.pub);
   //   mInfo.err = jpeg_std_error(&mErr.pub);
   mErr.pub.error_exit = my_error_exit;
   // Establish the setjmp return context for my_error_exit to use.
--- a/image/decoders/nsPNGDecoder.cpp
+++ b/image/decoders/nsPNGDecoder.cpp
@@ -228,21 +228,21 @@ nsPNGDecoder::EndImageFrame()
   PostFrameStop(opacity, mAnimInfo.mDispose, mAnimInfo.mTimeout,
                 mAnimInfo.mBlend);
 }
 
 void
 nsPNGDecoder::InitInternal()
 {
   mCMSMode = gfxPlatform::GetCMSMode();
-  if (GetDecodeFlags() & imgIContainer::FLAG_DECODE_NO_COLORSPACE_CONVERSION) {
+  if (GetSurfaceFlags() & SurfaceFlags::NO_COLORSPACE_CONVERSION) {
     mCMSMode = eCMSMode_Off;
   }
   mDisablePremultipliedAlpha =
-    GetDecodeFlags() & imgIContainer::FLAG_DECODE_NO_PREMULTIPLY_ALPHA;
+    bool(GetSurfaceFlags() & SurfaceFlags::NO_PREMULTIPLY_ALPHA);
 
 #ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED
   static png_byte color_chunks[]=
        { 99,  72,  82,  77, '\0',   // cHRM
         105,  67,  67,  80, '\0'};  // iCCP
   static png_byte unused_chunks[]=
        { 98,  75,  71,  68, '\0',   // bKGD
         104,  73,  83,  84, '\0',   // hIST
--- a/image/imgLoader.cpp
+++ b/image/imgLoader.cpp
@@ -271,19 +271,20 @@ private:
       surfacePathPrefix.AppendInt(counter.Key().Size().width);
       surfacePathPrefix.Append("x");
       surfacePathPrefix.AppendInt(counter.Key().Size().height);
 
       if (counter.Type() == SurfaceMemoryCounterType::NORMAL) {
         surfacePathPrefix.Append("@");
         surfacePathPrefix.AppendFloat(counter.Key().AnimationTime());
 
-        if (counter.Key().Flags() != imgIContainer::DECODE_FLAGS_DEFAULT) {
+        if (counter.Key().Flags() != DefaultSurfaceFlags()) {
           surfacePathPrefix.Append(", flags:");
-          surfacePathPrefix.AppendInt(counter.Key().Flags(), /* aRadix = */ 16);
+          surfacePathPrefix.AppendInt(uint32_t(counter.Key().Flags()),
+                                      /* aRadix = */ 16);
         }
       } else if (counter.Type() == SurfaceMemoryCounterType::COMPOSITING) {
         surfacePathPrefix.Append(", compositing frame");
       } else if (counter.Type() == SurfaceMemoryCounterType::COMPOSITING_PREV) {
         surfacePathPrefix.Append(", compositing prev frame");
       } else {
         MOZ_ASSERT_UNREACHABLE("Unknown counter type");
       }
--- a/image/moz.build
+++ b/image/moz.build
@@ -41,16 +41,17 @@ EXPORTS += [
     'ImageOps.h',
     'ImageRegion.h',
     'imgLoader.h',
     'imgRequest.h',
     'imgRequestProxy.h',
     'IProgressObserver.h',
     'Orientation.h',
     'SurfaceCache.h',
+    'SurfaceFlags.h',
 ]
 
 UNIFIED_SOURCES += [
     'ClippedImage.cpp',
     'DecodePool.cpp',
     'Decoder.cpp',
     'DecoderFactory.cpp',
     'DynamicImage.cpp',
--- a/image/test/gtest/TestMetadata.cpp
+++ b/image/test/gtest/TestMetadata.cpp
@@ -106,17 +106,17 @@ CheckMetadata(const ImageTestCase& aTest
   EXPECT_EQ(expectTransparency, bool(metadataProgress & FLAG_HAS_TRANSPARENCY));
 
   EXPECT_EQ(bool(aTestCase.mFlags & TEST_CASE_IS_ANIMATED),
             bool(metadataProgress & FLAG_IS_ANIMATED));
 
   // Create a full decoder, so we can compare the result.
   decoder =
     DecoderFactory::CreateAnonymousDecoder(decoderType, sourceBuffer,
-                                           imgIContainer::DECODE_FLAGS_DEFAULT);
+                                           DefaultSurfaceFlags());
   ASSERT_TRUE(decoder != nullptr);
 
   if (aBMPAlpha == BMPAlpha::ENABLED) {
     static_cast<nsBMPDecoder*>(decoder.get())->SetUseAlphaData(true);
   }
 
   // Run the full decoder synchronously.
   decoder->Decode();
@@ -236,19 +236,19 @@ TEST(ImageMetadata, NoFrameDelayGIFFullD
 
   EXPECT_TRUE(bool(imageProgress & FLAG_HAS_TRANSPARENCY) == false);
   EXPECT_TRUE(bool(imageProgress & FLAG_IS_ANIMATED) == true);
 
   // Ensure that we decoded both frames of the image.
   LookupResult firstFrameLookupResult =
     SurfaceCache::Lookup(ImageKey(image.get()),
                          RasterSurfaceKey(imageSize,
-                                          imgIContainer::DECODE_FLAGS_DEFAULT,
+                                          DefaultSurfaceFlags(),
                                           /* aFrameNum = */ 0));
   EXPECT_EQ(MatchType::EXACT, firstFrameLookupResult.Type());
                                                              
   LookupResult secondFrameLookupResult =
     SurfaceCache::Lookup(ImageKey(image.get()),
                          RasterSurfaceKey(imageSize,
-                                          imgIContainer::DECODE_FLAGS_DEFAULT,
+                                          DefaultSurfaceFlags(),
                                           /* aFrameNum = */ 1));
   EXPECT_EQ(MatchType::EXACT, secondFrameLookupResult.Type());
 }
--- a/js/public/UbiNode.h
+++ b/js/public/UbiNode.h
@@ -8,22 +8,26 @@
 #define js_UbiNode_h
 
 #include "mozilla/Alignment.h"
 #include "mozilla/Assertions.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/Maybe.h"
 #include "mozilla/MemoryReporting.h"
 #include "mozilla/Move.h"
+#include "mozilla/RangedPtr.h"
+#include "mozilla/TypeTraits.h"
 #include "mozilla/UniquePtr.h"
+#include "mozilla/Variant.h"
 
 #include "jspubtd.h"
 
 #include "js/GCAPI.h"
 #include "js/HashTable.h"
+#include "js/RootingAPI.h"
 #include "js/TracingAPI.h"
 #include "js/TypeDecls.h"
 #include "js/Vector.h"
 
 // JS::ubi::Node
 //
 // JS::ubi::Node is a pointer-like type designed for internal use by heap
 // analysis tools. A ubi::Node can refer to:
@@ -134,35 +138,302 @@
 // they complete. (For algorithms like path-finding and dominator tree
 // computation, we implement the algorithm avoiding any operation that could
 // cause a GC --- and use AutoCheckCannotGC to verify this.)
 //
 // If this restriction prevents us from implementing interesting tools, we may
 // teach the GC how to root ubi::Nodes, fix up hash tables that use them as
 // keys, etc.
 
+class JSAtom;
+
 namespace JS {
 namespace ubi {
 
 class Edge;
 class EdgeRange;
+class StackFrame;
 
 } // namespace ubi
 } // namespace JS
 
 namespace mozilla {
+
 template<>
 class DefaultDelete<JS::ubi::EdgeRange> : public JS::DeletePolicy<JS::ubi::EdgeRange> { };
+
+template<>
+class DefaultDelete<JS::ubi::StackFrame> : public JS::DeletePolicy<JS::ubi::StackFrame> { };
+
 } // namespace mozilla
 
 namespace JS {
 namespace ubi {
 
 using mozilla::Maybe;
+using mozilla::Move;
+using mozilla::RangedPtr;
 using mozilla::UniquePtr;
+using mozilla::Variant;
+
+/*** ubi::StackFrame ******************************************************************************/
+
+// Concrete JS::ubi::StackFrame instances backed by a live SavedFrame object
+// store their strings as JSAtom*, while deserialized stack frames from offline
+// heap snapshots store their strings as const char16_t*. In order to provide
+// zero-cost accessors to these strings in a single interface that works with
+// both cases, we use this variant type.
+using AtomOrTwoByteChars = Variant<JSAtom*, const char16_t*>;
+
+// The base class implemented by each ConcreteStackFrame<T> type. Subclasses
+// must not add data members to this class.
+class BaseStackFrame {
+    friend class StackFrame;
+
+    BaseStackFrame(const StackFrame&) = delete;
+    BaseStackFrame& operator=(const StackFrame&) = delete;
+
+  protected:
+    void* ptr;
+    explicit BaseStackFrame(void* ptr) : ptr(ptr) { }
+
+  public:
+    // This is a value type that should not have a virtual destructor. Don't add
+    // destructors in subclasses!
+
+    // Get a unique identifier for this StackFrame. The identifier is not valid
+    // across garbage collections.
+    virtual uintptr_t identifier() const { return reinterpret_cast<uintptr_t>(ptr); }
+
+    // Get this frame's parent frame.
+    virtual StackFrame parent() const = 0;
+
+    // Get this frame's line number.
+    virtual uint32_t line() const = 0;
+
+    // Get this frame's column number.
+    virtual uint32_t column() const = 0;
+
+    // Get this frame's source name. Never null.
+    virtual AtomOrTwoByteChars source() const = 0;
+
+    // Return this frame's function name if named, otherwise the inferred
+    // display name. Can be null.
+    virtual AtomOrTwoByteChars functionDisplayName() const = 0;
+
+    // Returns true if this frame's function is system JavaScript running with
+    // trusted principals, false otherwise.
+    virtual bool isSystem() const = 0;
+
+    // Return true if this frame's function is a self-hosted JavaScript builtin,
+    // false otherwise.
+    virtual bool isSelfHosted() const = 0;
+
+    // Construct a SavedFrame stack for the stack starting with this frame and
+    // containing all of its parents. The SavedFrame objects will be placed into
+    // cx's current compartment.
+    //
+    // Note that the process of
+    //
+    //     SavedFrame
+    //         |
+    //         V
+    //     JS::ubi::StackFrame
+    //         |
+    //         V
+    //     offline heap snapshot
+    //         |
+    //         V
+    //     JS::ubi::StackFrame
+    //         |
+    //         V
+    //     SavedFrame
+    //
+    // is lossy because we cannot serialize and deserialize the SavedFrame's
+    // principals in the offline heap snapshot, so JS::ubi::StackFrame
+    // simplifies the principals check into the boolean isSystem() state. This
+    // is fine because we only expose JS::ubi::Stack to devtools and chrome
+    // code, and not to the web platform.
+    virtual bool constructSavedFrameStack(JSContext* cx,
+                                          MutableHandleObject outSavedFrameStack) const = 0;
+
+    // Trace the concrete implementation of JS::ubi::StackFrame.
+    virtual void trace(JSTracer* trc) = 0;
+};
+
+// A traits template with a specialization for each backing type that implements
+// the ubi::BaseStackFrame interface. Each specialization must be the a subclass
+// of ubi::BaseStackFrame.
+template<typename T> class ConcreteStackFrame;
+
+// A JS::ubi::StackFrame represents a frame in a recorded stack. It can be
+// backed either by a live SavedFrame object or by a structure deserialized from
+// an offline heap snapshot.
+//
+// It is a value type that may be memcpy'd hither and thither without worrying
+// about constructors or destructors, similar to POD types.
+//
+// Its lifetime is the same as the lifetime of the graph that is being analyzed
+// by the JS::ubi::Node that the JS::ubi::StackFrame came from. That is, if the
+// graph being analyzed is the live heap graph, the JS::ubi::StackFrame is only
+// valid within the scope of an AutoCheckCannotGC; if the graph being analyzed
+// is an offline heap snapshot, the JS::ubi::StackFrame is valid as long as the
+// offline heap snapshot is alive.
+class StackFrame : public JS::Traceable {
+    // Storage in which we allocate BaseStackFrame subclasses.
+    mozilla::AlignedStorage2<BaseStackFrame> storage;
+
+    BaseStackFrame* base() { return storage.addr(); }
+    const BaseStackFrame* base() const { return storage.addr(); }
+
+    template<typename T>
+    void construct(T* ptr) {
+        static_assert(mozilla::IsBaseOf<BaseStackFrame, ConcreteStackFrame<T>>::value,
+                      "ConcreteStackFrame<T> must inherit from BaseStackFrame");
+        static_assert(sizeof(ConcreteStackFrame<T>) == sizeof(*base()),
+                      "ubi::ConcreteStackFrame<T> specializations must be the same size as "
+                      "ubi::BaseStackFrame");
+        ConcreteStackFrame<T>::construct(base(), ptr);
+    }
+    struct ConstructFunctor;
+
+  public:
+    StackFrame() { construct<void>(nullptr); }
+
+    template<typename T>
+    MOZ_IMPLICIT StackFrame(T* ptr) {
+        construct(ptr);
+    }
+
+    template<typename T>
+    StackFrame& operator=(T* ptr) {
+        construct(ptr);
+        return *this;
+    }
+
+    // Constructors accepting SpiderMonkey's generic-pointer-ish types.
+
+    template<typename T>
+    explicit StackFrame(const JS::Handle<T*>& handle) {
+        construct(handle.get());
+    }
+
+    template<typename T>
+    StackFrame& operator=(const JS::Handle<T*>& handle) {
+        construct(handle.get());
+        return *this;
+    }
+
+    template<typename T>
+    explicit StackFrame(const JS::Rooted<T*>& root) {
+        construct(root.get());
+    }
+
+    template<typename T>
+    StackFrame& operator=(const JS::Rooted<T*>& root) {
+        construct(root.get());
+        return *this;
+    }
+
+    // Because StackFrame is just a vtable pointer and an instance pointer, we
+    // can memcpy everything around instead of making concrete classes define
+    // virtual constructors. See the comment above Node's copy constructor for
+    // more details; that comment applies here as well.
+    StackFrame(const StackFrame& rhs) {
+        memcpy(storage.u.mBytes, rhs.storage.u.mBytes, sizeof(storage.u));
+    }
+
+    StackFrame& operator=(const StackFrame& rhs) {
+        memcpy(storage.u.mBytes, rhs.storage.u.mBytes, sizeof(storage.u));
+        return *this;
+    }
+
+    bool operator==(const StackFrame& rhs) const { return base()->ptr == rhs.base()->ptr; }
+    bool operator!=(const StackFrame& rhs) const { return !(*this == rhs); }
+
+    explicit operator bool() const {
+        return base()->ptr != nullptr;
+    }
+
+    // Copy this StackFrame's source name into the given |destination|
+    // buffer. Copy no more than |length| characters. The result is *not* null
+    // terminated. Returns how many characters were written into the buffer.
+    size_t source(RangedPtr<char16_t> destination, size_t length) const;
+
+    // Copy this StackFrame's function display name into the given |destination|
+    // buffer. Copy no more than |length| characters. The result is *not* null
+    // terminated. Returns how many characters were written into the buffer.
+    size_t functionDisplayName(RangedPtr<char16_t> destination, size_t length) const;
+
+    // JS::Traceable implementation just forwards to our virtual trace method.
+    static void trace(StackFrame* frame, JSTracer* trc) {
+        if (frame)
+            frame->trace(trc);
+    }
+
+    // Methods that forward to virtual calls through BaseStackFrame.
+
+    void trace(JSTracer* trc) { base()->trace(trc); }
+    uintptr_t identifier() const { return base()->identifier(); }
+    uint32_t line() const { return base()->line(); }
+    uint32_t column() const { return base()->column(); }
+    AtomOrTwoByteChars source() const { return base()->source(); }
+    AtomOrTwoByteChars functionDisplayName() const { return base()->functionDisplayName(); }
+    StackFrame parent() const { return base()->parent(); }
+    bool isSystem() const { return base()->isSystem(); }
+    bool isSelfHosted() const { return base()->isSelfHosted(); }
+    bool constructSavedFrameStack(JSContext* cx,
+                                  MutableHandleObject outSavedFrameStack) const {
+        return base()->constructSavedFrameStack(cx, outSavedFrameStack);
+    }
+
+    struct HashPolicy {
+        using Lookup = JS::ubi::StackFrame;
+
+        static js::HashNumber hash(const Lookup& lookup) {
+            return lookup.identifier();
+        }
+
+        static bool match(const StackFrame& key, const Lookup& lookup) {
+            return key == lookup;
+        }
+
+        static void rekey(StackFrame& k, const StackFrame& newKey) {
+            k = newKey;
+        }
+    };
+};
+
+// The ubi::StackFrame null pointer. Any attempt to operate on a null
+// ubi::StackFrame crashes.
+template<>
+class ConcreteStackFrame<void> : public BaseStackFrame {
+    explicit ConcreteStackFrame(void* ptr) : BaseStackFrame(ptr) { }
+
+  public:
+    static void construct(void* storage, void*) { new (storage) ConcreteStackFrame(nullptr); }
+
+    uintptr_t identifier() const override { return 0; }
+    void trace(JSTracer* trc) override { }
+    bool constructSavedFrameStack(JSContext* cx, MutableHandleObject out) const override {
+        out.set(nullptr);
+        return true;
+    }
+
+    uint32_t line() const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+    uint32_t column() const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+    AtomOrTwoByteChars source() const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+    AtomOrTwoByteChars functionDisplayName() const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+    StackFrame parent() const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+    bool isSystem() const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+    bool isSelfHosted() const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+};
+
+
+/*** ubi::Node ************************************************************************************/
 
 // The base class implemented by each ubi::Node referent type. Subclasses must
 // not add data members to this class.
 class Base {
     friend class Node;
 
     // For performance's sake, we'd prefer to avoid a virtual destructor; and
     // an empty constructor seems consistent with the 'lightweight value type'
@@ -232,16 +503,26 @@ class Base {
     virtual JS::Zone* zone() const { return nullptr; }
 
     // Return the compartment for this node. Some ubi::Node referents are not
     // associated with JSCompartments, such as JSStrings (which are associated
     // with Zones). When the referent is not associated with a compartment,
     // nullptr is returned.
     virtual JSCompartment* compartment() const { return nullptr; }
 
+    // Return whether this node's referent's allocation stack was captured.
+    virtual bool hasAllocationStack() const { return false; }
+
+    // Get the stack recorded at the time this node's referent was
+    // allocated. This must only be called when hasAllocationStack() is true.
+    virtual StackFrame allocationStack() const {
+        MOZ_CRASH("Concrete classes that have an allocation stack must override both "
+                  "hasAllocationStack and allocationStack.");
+    }
+
     // Methods for JSObject Referents
     //
     // These methods are only semantically valid if the referent is either a
     // JSObject in the live heap, or represents a previously existing JSObject
     // from some deserialized heap snapshot.
 
     // Return the object's [[Class]]'s name.
     virtual const char* jsObjectClassName() const { return nullptr; }
@@ -395,16 +676,21 @@ class Node {
     size_t size(mozilla::MallocSizeOf mallocSizeof) const {
         return base()->size(mallocSizeof);
     }
 
     UniquePtr<EdgeRange> edges(JSContext* cx, bool wantNames = true) const {
         return base()->edges(cx, wantNames);
     }
 
+    bool hasAllocationStack() const { return base()->hasAllocationStack(); }
+    StackFrame allocationStack() const {
+        return base()->allocationStack();
+    }
+
     typedef Base::Id Id;
     Id identifier() const { return base()->identifier(); }
 
     // A hash policy for ubi::Nodes.
     // This simply uses the stock PointerHasher on the ubi::Node's pointer.
     // We specialize DefaultHasher below to make this the default.
     class HashPolicy {
         typedef js::PointerHasher<void*, mozilla::tl::FloorLog2<sizeof(void*)>::value> PtrHash;
@@ -414,16 +700,18 @@ class Node {
 
         static js::HashNumber hash(const Lookup& l) { return PtrHash::hash(l.base()->ptr); }
         static bool match(const Node& k, const Lookup& l) { return k == l; }
         static void rekey(Node& k, const Node& newKey) { k = newKey; }
     };
 };
 
 
+/*** Edge and EdgeRange ***************************************************************************/
+
 // Edge is the abstract base class representing an outgoing edge of a node.
 // Edges are owned by EdgeRanges, and need not have assignment operators or copy
 // constructors.
 //
 // Each Edge class should inherit from this base class, overriding as
 // appropriate.
 class Edge {
   protected:
@@ -544,16 +832,17 @@ class PreComputedEdgeRange : public Edge
 
     void popFront() override {
         MOZ_ASSERT(!empty());
         i++;
         settle();
     }
 };
 
+/*** RootList *************************************************************************************/
 
 // RootList is a class that can be pointed to by a |ubi::Node|, creating a
 // fictional root-of-roots which has edges to every GC root in the JS
 // runtime. Having a single root |ubi::Node| is useful for algorithms written
 // with the assumption that there aren't multiple roots (such as computing
 // dominator trees) and you want a single point of entry. It also ensures that
 // the roots themselves get visited by |ubi::BreadthFirst| (they would otherwise
 // only be used as starting points).
@@ -601,17 +890,17 @@ class MOZ_STACK_CLASS RootList {
 
     // Explicitly add the given Node as a root in this RootList. If wantNames is
     // true, you must pass an edgeName. The RootList does not take ownership of
     // edgeName.
     bool addRoot(Node node, const char16_t* edgeName = nullptr);
 };
 
 
-// Concrete classes for ubi::Node referent types.
+/*** Concrete classes for ubi::Node referent types ************************************************/
 
 template<>
 struct Concrete<RootList> : public Base {
     UniquePtr<EdgeRange> edges(JSContext* cx, bool wantNames) const override;
     const char16_t* typeName() const override { return concreteTypeName; }
 
   protected:
     explicit Concrete(RootList* ptr) : Base(ptr) { }
@@ -662,16 +951,19 @@ template<> struct Concrete<JSScript> : T
 // The JSObject specialization.
 template<>
 class Concrete<JSObject> : public TracerConcreteWithCompartment<JSObject> {
     const char* jsObjectClassName() const override;
     bool jsObjectConstructorName(JSContext* cx,
                                  UniquePtr<char16_t[], JS::FreePolicy>& outName) const override;
     size_t size(mozilla::MallocSizeOf mallocSizeOf) const override;
 
+    bool hasAllocationStack() const override;
+    StackFrame allocationStack() const override;
+
   protected:
     explicit Concrete(JSObject* ptr) : TracerConcreteWithCompartment(ptr) { }
 
   public:
     static void construct(void* storage, JSObject* ptr) {
         new (storage) Concrete(ptr);
     }
 };
@@ -706,12 +998,13 @@ class Concrete<void> : public Base {
 
 } // namespace ubi
 } // namespace JS
 
 namespace js {
 
 // Make ubi::Node::HashPolicy the default hash policy for ubi::Node.
 template<> struct DefaultHasher<JS::ubi::Node> : JS::ubi::Node::HashPolicy { };
+template<> struct DefaultHasher<JS::ubi::StackFrame> : JS::ubi::StackFrame::HashPolicy { };
 
 } // namespace js
 
 #endif // js_UbiNode_h
--- a/js/src/jit/BaselineJIT.cpp
+++ b/js/src/jit/BaselineJIT.cpp
@@ -62,17 +62,18 @@ BaselineScript::BaselineScript(uint32_t 
 # endif
     traceLoggerEnterToggleOffset_(traceLoggerEnterToggleOffset),
     traceLoggerExitToggleOffset_(traceLoggerExitToggleOffset),
     traceLoggerScriptEvent_(),
 #endif
     postDebugPrologueOffset_(postDebugPrologueOffset),
     flags_(0),
     inlinedBytecodeLength_(0),
-    maxInliningDepth_(UINT8_MAX)
+    maxInliningDepth_(UINT8_MAX),
+    pendingBuilder_(nullptr)
 { }
 
 static const unsigned BASELINE_MAX_ARGS_LENGTH = 20000;
 
 static bool
 CheckFrame(InterpreterFrame* fp)
 {
     if (fp->isDebuggerEvalFrame()) {
@@ -468,16 +469,18 @@ BaselineScript::Destroy(FreeOp* fop, Bas
      * When the script contains pointers to nursery things, the store buffer
      * will contain entries refering to the referenced things. Since we can
      * destroy scripts outside the context of a GC, this situation can result
      * in invalid store buffer entries. Assert that if we do destroy scripts
      * outside of a GC that we at least emptied the nursery first.
      */
     MOZ_ASSERT(fop->runtime()->gc.nursery.isEmpty());
 
+    MOZ_ASSERT(!script->hasPendingIonBuilder());
+
     script->unlinkDependentAsmJSModules(fop);
 
     fop->delete_(script);
 }
 
 void
 BaselineScript::unlinkDependentAsmJSModules(FreeOp* fop)
 {
--- a/js/src/jit/BaselineJIT.h
+++ b/js/src/jit/BaselineJIT.h
@@ -219,16 +219,19 @@ struct BaselineScript
 
     // The max inlining depth where we can still inline all functions we inlined
     // when we Ion-compiled this script. This starts as UINT8_MAX, since we have
     // no data yet, and won't affect inlining heuristics in that case. The value
     // is updated when we Ion-compile this script. See makeInliningDecision for
     // more info.
     uint8_t maxInliningDepth_;
 
+    // An ion compilation that is ready, but isn't linked yet.
+    IonBuilder *pendingBuilder_;
+
   public:
     // Do not call directly, use BaselineScript::New. This is public for cx->new_.
     BaselineScript(uint32_t prologueOffset, uint32_t epilogueOffset,
                    uint32_t profilerEnterToggleOffset,
                    uint32_t profilerExitToggleOffset,
                    uint32_t traceLoggerEnterToggleOffset,
                    uint32_t traceLoggerExitToggleOffset,
                    uint32_t postDebugPrologueOffset);
@@ -451,16 +454,42 @@ struct BaselineScript
     uint16_t inlinedBytecodeLength() const {
         return inlinedBytecodeLength_;
     }
     void setInlinedBytecodeLength(uint32_t len) {
         if (len > UINT16_MAX)
             len = UINT16_MAX;
         inlinedBytecodeLength_ = len;
     }
+
+    bool hasPendingIonBuilder() const {
+        return !!pendingBuilder_;
+    }
+
+    js::jit::IonBuilder* pendingIonBuilder() {
+        MOZ_ASSERT(hasPendingIonBuilder());
+        return pendingBuilder_;
+    }
+    void setPendingIonBuilder(JSContext* maybecx, JSScript* script, js::jit::IonBuilder* builder) {
+        MOZ_ASSERT(script->baselineScript() == this);
+        MOZ_ASSERT(!builder || !hasPendingIonBuilder());
+
+        if (script->isIonCompilingOffThread())
+            script->setIonScript(maybecx, ION_PENDING_SCRIPT);
+
+        pendingBuilder_ = builder;
+
+        script->updateBaselineOrIonRaw(maybecx);
+    }
+    void removePendingIonBuilder(JSScript* script) {
+        setPendingIonBuilder(nullptr, script, nullptr);
+        if (script->maybeIonScript() == ION_PENDING_SCRIPT)
+            script->setIonScript(nullptr, nullptr);
+    }
+
 };
 static_assert(sizeof(BaselineScript) % sizeof(uintptr_t) == 0,
               "The data attached to the script must be aligned for fast JIT access.");
 
 inline bool
 IsBaselineEnabled(JSContext* cx)
 {
 #ifdef JS_CODEGEN_NONE
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -463,18 +463,21 @@ PrepareForDebuggerOnIonCompilationHook(J
         info->numBlocks = 0;
     }
 }
 
 void
 jit::FinishOffThreadBuilder(JSContext* cx, IonBuilder* builder)
 {
     // Clean the references to the pending IonBuilder, if we just finished it.
-    if (builder->script()->hasIonScript() && builder->script()->pendingIonBuilder() == builder)
-        builder->script()->setPendingIonBuilder(cx, nullptr);
+    if (builder->script()->baselineScript()->hasPendingIonBuilder() &&
+        builder->script()->baselineScript()->pendingIonBuilder() == builder)
+    {
+        builder->script()->baselineScript()->removePendingIonBuilder(builder->script());
+    }
 
     // If the builder is still in one of the helper thread list, then remove it.
     if (builder->isInList())
         builder->removeFrom(HelperThreadState().ionLazyLinkList());
 
     // Clear the recompiling flag of the old ionScript, since we continue to
     // use the old ionScript if recompiling fails.
     if (builder->script()->hasIonScript())
@@ -561,31 +564,23 @@ LinkBackgroundCodeGen(JSContext* cx, Ion
     // Root the assembler until the builder is finished below. As it was
     // constructed off thread, the assembler has not been rooted previously,
     // though any GC activity would discard the builder.
     codegen->masm.constructRoot(cx);
 
     return LinkCodeGen(cx, builder, codegen, scripts, info);
 }
 
-uint8_t*
-jit::LazyLinkTopActivation(JSContext* cx)
+void
+jit::LazyLink(JSContext* cx, HandleScript calleeScript)
 {
-    JitActivationIterator iter(cx->runtime());
-    AutoLazyLinkExitFrame lazyLinkExitFrame(iter->asJit());
-
-    // First frame should be an exit frame.
-    JitFrameIterator it(iter);
-    LazyLinkExitFrameLayout* ll = it.exitFrame()->as<LazyLinkExitFrameLayout>();
-    RootedScript calleeScript(cx, ScriptFromCalleeToken(ll->jsFrame()->calleeToken()));
-
-
     // Get the pending builder from the Ion frame.
-    IonBuilder* builder = calleeScript->ionScript()->pendingBuilder();
-    calleeScript->setPendingIonBuilder(cx, nullptr);
+    MOZ_ASSERT(calleeScript->hasBaselineScript());
+    IonBuilder* builder = calleeScript->baselineScript()->pendingIonBuilder();
+    calleeScript->baselineScript()->removePendingIonBuilder(calleeScript);
 
     // See PrepareForDebuggerOnIonCompilationHook
     AutoScriptVector debugScripts(cx);
     OnIonCompilationInfo info(builder->alloc().lifoAlloc());
 
     // Remove from pending.
     builder->removeFrom(HelperThreadState().ionLazyLinkList());
 
@@ -601,16 +596,30 @@ jit::LazyLinkTopActivation(JSContext* cx
 
     if (info.filled())
         Debugger::onIonCompilation(cx, debugScripts, info.graph);
 
     FinishOffThreadBuilder(cx, builder);
 
     MOZ_ASSERT(calleeScript->hasBaselineScript());
     MOZ_ASSERT(calleeScript->baselineOrIonRawPointer());
+}
+
+uint8_t*
+jit::LazyLinkTopActivation(JSContext* cx)
+{
+    JitActivationIterator iter(cx->runtime());
+    AutoLazyLinkExitFrame lazyLinkExitFrame(iter->asJit());
+
+    // First frame should be an exit frame.
+    JitFrameIterator it(iter);
+    LazyLinkExitFrameLayout* ll = it.exitFrame()->as<LazyLinkExitFrameLayout>();
+    RootedScript calleeScript(cx, ScriptFromCalleeToken(ll->jsFrame()->calleeToken()));
+
+    LazyLink(cx, calleeScript);
 
     return calleeScript->baselineOrIonRawPointer();
 }
 
 /* static */ void
 JitRuntime::Mark(JSTracer* trc)
 {
     MOZ_ASSERT(!trc->runtime()->isHeapMinorCollecting());
@@ -884,18 +893,17 @@ IonScript::IonScript()
     snapshotsListSize_(0),
     snapshotsRVATableSize_(0),
     constantTable_(0),
     constantEntries_(0),
     backedgeList_(0),
     backedgeEntries_(0),
     invalidationCount_(0),
     recompileInfo_(),
-    osrPcMismatchCounter_(0),
-    pendingBuilder_(nullptr)
+    osrPcMismatchCounter_(0)
 {
 }
 
 IonScript*
 IonScript::New(JSContext* cx, RecompileInfo recompileInfo,
                uint32_t frameSlots, uint32_t argumentSlots, uint32_t frameSize,
                size_t snapshotsListSize, size_t snapshotsRVATableSize,
                size_t recoversSize, size_t bailoutEntries,
@@ -1205,19 +1213,16 @@ IonScript::Trace(JSTracer* trc, IonScrip
 {
     if (script != ION_DISABLED_SCRIPT)
         script->trace(trc);
 }
 
 void
 IonScript::Destroy(FreeOp* fop, IonScript* script)
 {
-    if (script->pendingBuilder())
-        jit::FinishOffThreadBuilder(nullptr, script->pendingBuilder());
-
     script->unlinkFromRuntime(fop);
     fop->free_(script);
 }
 
 void
 IonScript::toggleBarriers(bool enabled)
 {
     method()->togglePreBarriers(enabled);
@@ -1758,115 +1763,44 @@ GetFinishedBuilder(JSContext* cx, Global
             HelperThreadState().remove(finished, &i);
             return testBuilder;
         }
     }
 
     return nullptr;
 }
 
-static bool
-IsBuilderScriptOnStack(JSContext* cx, IonBuilder* builder)
-{
-    for (JitActivationIterator iter(cx->runtime()); !iter.done(); ++iter) {
-        for (JitFrameIterator it(iter); !it.done(); ++it) {
-            if (!it.isIonJS())
-                continue;
-            if (it.checkInvalidation())
-                continue;
-
-            JSScript* script = it.script();
-            if (builder->script() == script)
-                return true;
-        }
-    }
-
-    return false;
-}
-
 void
 AttachFinishedCompilations(JSContext* cx)
 {
     JitCompartment* ion = cx->compartment()->jitCompartment();
     if (!ion)
         return;
 
-    LifoAlloc* debuggerAlloc = cx->new_<LifoAlloc>(TempAllocator::PreferredLifoChunkSize);
-    if (!debuggerAlloc) {
-        // Silently ignore OOM during code generation. The caller is
-        // InvokeInterruptCallback, which always runs at a nondeterministic
-        // time. It's not OK to throw a catchable exception from there.
-        cx->clearPendingException();
-        return;
-    }
-
-    // See PrepareForDebuggerOnIonCompilationHook
-    AutoScriptVector debugScripts(cx);
-    OnIonCompilationVector onIonCompilationVector(cx);
-
     {
         AutoEnterAnalysis enterTypes(cx);
         AutoLockHelperThreadState lock;
 
         GlobalHelperThreadState::IonBuilderVector& finished = HelperThreadState().ionFinishedList();
 
         // Incorporate any off thread compilations for the compartment which have
         // finished, failed or have been cancelled.
         while (true) {
             // Find a finished builder for the compartment.
             IonBuilder* builder = GetFinishedBuilder(cx, finished);
             if (!builder)
                 break;
 
-            // Try to defer linking if the script is on the stack, to postpone
-            // invalidating them.
-            if (builder->script()->hasIonScript() && IsBuilderScriptOnStack(cx, builder)) {
-                builder->script()->setPendingIonBuilder(cx, builder);
-                HelperThreadState().ionLazyLinkList().insertFront(builder);
-                continue;
-            }
-
-            AutoUnlockHelperThreadState unlock;
-
-            OnIonCompilationInfo info(debuggerAlloc);
-            if (!LinkBackgroundCodeGen(cx, builder, &debugScripts, &info)) {
-                // Silently ignore OOM during code generation. The caller is
-                // InvokeInterruptCallback, which always runs at a
-                // nondeterministic time. It's not OK to throw a catchable
-                // exception from there.
-                cx->clearPendingException();
-            }
-
-            if (info.filled()) {
-                if (!onIonCompilationVector.append(info))
-                    cx->clearPendingException();
-            }
-
-            FinishOffThreadBuilder(cx, builder);
+            JSScript* script = builder->script();
+            MOZ_ASSERT(script->hasBaselineScript());
+            script->baselineScript()->setPendingIonBuilder(cx, script, builder);
+            HelperThreadState().ionLazyLinkList().insertFront(builder);
+            continue;
         }
     }
-
-    for (size_t i = 0; i < onIonCompilationVector.length(); i++) {
-        OnIonCompilationInfo& info = onIonCompilationVector[i];
-
-        // As it is easier to root a vector, instead of a vector of vector, we
-        // slice for each compilation.
-        AutoScriptVector sliceScripts(cx);
-        if (!sliceScripts.reserve(info.numBlocks)) {
-            cx->clearPendingException();
-            continue;
-        }
-
-        for (size_t b = 0; b < info.numBlocks; b++)
-            sliceScripts.infallibleAppend(debugScripts[info.scriptIndex + b]);
-
-        Debugger::onIonCompilation(cx, sliceScripts, info.graph);
-    }
-
-    js_delete(debuggerAlloc);
 }
 
 static void
 TrackAllProperties(JSContext* cx, JSObject* obj)
 {
     MOZ_ASSERT(obj->isSingleton());
 
     for (Shape::Range<NoGC> range(obj->as<NativeObject>().lastProperty()); !range.empty(); range.popFront())
@@ -2210,18 +2144,18 @@ Compile(JSContext* cx, HandleScript scri
         return status;
     }
 
     bool recompile = false;
     OptimizationLevel optimizationLevel = GetOptimizationLevel(script, osrPc);
     if (optimizationLevel == Optimization_DontCompile)
         return Method_Skipped;
 
-    IonScript* scriptIon = script->maybeIonScript();
-    if (scriptIon) {
+    if (script->hasIonScript()) {
+        IonScript* scriptIon = script->ionScript();
         if (!scriptIon->method())
             return Method_CantCompile;
 
         // Don't recompile/overwrite higher optimized code,
         // with a lower optimization level.
         if (optimizationLevel <= scriptIon->optimizationLevel() && !forceRecompile)
             return Method_Compiled;
 
@@ -2230,16 +2164,24 @@ Compile(JSContext* cx, HandleScript scri
             return Method_Compiled;
 
         if (osrPc)
             scriptIon->resetOsrPcMismatchCounter();
 
         recompile = true;
     }
 
+    if (script->baselineScript()->hasPendingIonBuilder()) {
+        IonBuilder* buildIon = script->baselineScript()->pendingIonBuilder();
+        if (optimizationLevel <= buildIon->optimizationInfo().level() && !forceRecompile)
+            return Method_Compiled;
+
+        recompile = true;
+    }
+
     AbortReason reason = IonCompile(cx, script, osrFrame, osrPc, constructing,
                                     recompile, optimizationLevel);
     if (reason == AbortReason_Error)
         return Method_Error;
 
     if (reason == AbortReason_Disable)
         return Method_CantCompile;
 
@@ -2268,17 +2210,17 @@ jit::OffThreadCompilationAvailable(JSCon
     return cx->runtime()->canUseOffthreadIonCompilation()
         && HelperThreadState().cpuCount > 1
         && CanUseExtraThreads();
 }
 
 // Decide if a transition from interpreter execution to Ion code should occur.
 // May compile or recompile the target JSScript.
 MethodStatus
-jit::CanEnterAtBranch(JSContext* cx, JSScript* script, BaselineFrame* osrFrame, jsbytecode* pc)
+jit::CanEnterAtBranch(JSContext* cx, HandleScript script, BaselineFrame* osrFrame, jsbytecode* pc)
 {
     MOZ_ASSERT(jit::IsIonEnabled(cx));
     MOZ_ASSERT((JSOp)*pc == JSOP_LOOPENTRY);
     MOZ_ASSERT(LoopEntryCanIonOsr(pc));
 
     // Skip if the script has been disabled.
     if (!script->canIonCompile())
         return Method_Skipped;
@@ -2296,16 +2238,21 @@ jit::CanEnterAtBranch(JSContext* cx, JSS
         return Method_Skipped;
 
     // Mark as forbidden if frame can't be handled.
     if (!CheckFrame(cx, osrFrame)) {
         ForbidCompilation(cx, script);
         return Method_CantCompile;
     }
 
+    // Check if the jitcode still needs to get linked and do this
+    // to have a valid IonScript.
+    if (script->baselineScript()->hasPendingIonBuilder())
+        LazyLink(cx, script);
+
     // By default a recompilation doesn't happen on osr mismatch.
     // Decide if we want to force a recompilation if this happens too much.
     bool force = false;
     if (script->hasIonScript() && pc != script->ionScript()->osrPc()) {
         uint32_t count = script->ionScript()->incrOsrPcMismatchCounter();
         if (count <= js_JitOptions.osrPcMismatchesBeforeRecompile)
             return Method_Skipped;
         force = true;
@@ -2324,17 +2271,17 @@ jit::CanEnterAtBranch(JSContext* cx, JSS
             ForbidCompilation(cx, script);
         return status;
     }
 
     // Return the compilation was skipped when the osr pc wasn't adjusted.
     // This can happen when there was still an IonScript available and a
     // background compilation started, but hasn't finished yet.
     // Or when we didn't force a recompile.
-    if (pc != script->ionScript()->osrPc())
+    if (script->hasIonScript() && pc != script->ionScript()->osrPc())
         return Method_Skipped;
 
     return Method_Compiled;
 }
 
 MethodStatus
 jit::CanEnter(JSContext* cx, RunState& state)
 {
@@ -2392,16 +2339,22 @@ jit::CanEnter(JSContext* cx, RunState& s
     bool constructing = state.isInvoke() && state.asInvoke()->constructing();
     MethodStatus status = Compile(cx, rscript, nullptr, nullptr, constructing);
     if (status != Method_Compiled) {
         if (status == Method_CantCompile)
             ForbidCompilation(cx, rscript);
         return status;
     }
 
+    if (state.script()->baselineScript()->hasPendingIonBuilder()) {
+        LazyLink(cx, state.script());
+        if (!state.script()->hasIonScript())
+            return jit::Method_Skipped;
+    }
+
     return Method_Compiled;
 }
 
 MethodStatus
 jit::CompileFunctionForBaseline(JSContext* cx, HandleScript script, BaselineFrame* frame)
 {
     MOZ_ASSERT(jit::IsIonEnabled(cx));
     MOZ_ASSERT(frame->fun()->nonLazyScript()->canIonCompile());
--- a/js/src/jit/Ion.h
+++ b/js/src/jit/Ion.h
@@ -77,17 +77,17 @@ bool InitializeIon();
 // Get and set the current JIT context.
 JitContext* GetJitContext();
 JitContext* MaybeGetJitContext();
 
 void SetJitContext(JitContext* ctx);
 
 bool CanIonCompileScript(JSContext* cx, JSScript* script, bool osr);
 
-MethodStatus CanEnterAtBranch(JSContext* cx, JSScript* script,
+MethodStatus CanEnterAtBranch(JSContext* cx, HandleScript script,
                               BaselineFrame* frame, jsbytecode* pc);
 MethodStatus CanEnter(JSContext* cx, RunState& state);
 MethodStatus CompileFunctionForBaseline(JSContext* cx, HandleScript script, BaselineFrame* frame);
 MethodStatus CanEnterUsingFastInvoke(JSContext* cx, HandleScript script, uint32_t numActualArgs);
 
 MethodStatus
 Recompile(JSContext* cx, HandleScript script, BaselineFrame* osrFrame, jsbytecode* osrPc,
           bool constructing, bool force);
@@ -142,16 +142,17 @@ LIRGraph* GenerateLIR(MIRGenerator* mir)
 CodeGenerator* GenerateCode(MIRGenerator* mir, LIRGraph* lir);
 CodeGenerator* CompileBackEnd(MIRGenerator* mir);
 
 void AttachFinishedCompilations(JSContext* cx);
 void FinishOffThreadBuilder(JSContext* cx, IonBuilder* builder);
 void StopAllOffThreadCompilations(Zone* zone);
 void StopAllOffThreadCompilations(JSCompartment* comp);
 
+void LazyLink(JSContext* cx, HandleScript calleescript);
 uint8_t* LazyLinkTopActivation(JSContext* cx);
 
 static inline bool
 IsIonEnabled(JSContext* cx)
 {
 #ifdef JS_CODEGEN_NONE
     return false;
 #else
--- a/js/src/jit/IonCode.h
+++ b/js/src/jit/IonCode.h
@@ -270,36 +270,26 @@ struct IonScript
 
     // Number of times we tried to enter this script via OSR but failed due to
     // a LOOPENTRY pc other than osrPc_.
     uint32_t osrPcMismatchCounter_;
 
     // The tracelogger event used to log the start/stop of this IonScript.
     TraceLoggerEvent traceLoggerScriptEvent_;
 
-    IonBuilder* pendingBuilder_;
-
   private:
     inline uint8_t* bottomBuffer() {
         return reinterpret_cast<uint8_t*>(this);
     }
     inline const uint8_t* bottomBuffer() const {
         return reinterpret_cast<const uint8_t*>(this);
     }
 
   public:
 
-    // SHOULD ONLY BE CALLED FROM JSScript
-    void setPendingBuilderPrivate(IonBuilder* builder) {
-        pendingBuilder_ = builder;
-    }
-    IonBuilder* pendingBuilder() const {
-        return pendingBuilder_;
-    }
-
     SnapshotOffset* bailoutTable() {
         return (SnapshotOffset*) &bottomBuffer()[bailoutTable_];
     }
     PreBarrieredValue* constants() {
         return (PreBarrieredValue*) &bottomBuffer()[constantTable_];
     }
     const SafepointIndex* safepointIndices() const {
         return const_cast<IonScript*>(this)->safepointIndices();
--- a/js/src/jsapi-tests/testUbiNode.cpp
+++ b/js/src/jsapi-tests/testUbiNode.cpp
@@ -1,15 +1,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
+#include "builtin/TestingFunctions.h"
 #include "js/UbiNode.h"
-
 #include "jsapi-tests/tests.h"
+#include "vm/SavedFrame.h"
 
 using JS::RootedObject;
 using JS::RootedScript;
 using JS::RootedString;
 
 // ubi::Node::zone works
 BEGIN_TEST(test_ubiNodeZone)
 {
@@ -98,8 +99,99 @@ BEGIN_TEST(test_ubiNodeJSObjectConstruct
     mozilla::UniquePtr<char16_t[], JS::FreePolicy> ctorName;
     CHECK(JS::ubi::Node(&val.toObject()).jsObjectConstructorName(cx, ctorName));
     CHECK(ctorName);
     CHECK(js_strcmp(ctorName.get(), MOZ_UTF16("Ctor")) == 0);
 
     return true;
 }
 END_TEST(test_ubiNodeJSObjectConstructorName)
+
+template <typename F, typename G>
+static bool
+checkString(const char* expected, F fillBufferFunction, G stringGetterFunction)
+{
+    auto expectedLength = strlen(expected);
+    char16_t buf[1024];
+    if (fillBufferFunction(mozilla::RangedPtr<char16_t>(buf, 1024), 1024) != expectedLength ||
+        !EqualChars(buf, expected, expectedLength))
+    {
+        return false;
+    }
+
+    auto string = stringGetterFunction();
+    // Expecting a |JSAtom*| from a live |JS::ubi::StackFrame|.
+    if (!string.template is<JSAtom*>() ||
+        !StringEqualsAscii(string.template as<JSAtom*>(), expected))
+    {
+        return false;
+    }
+
+    return true;
+}
+
+BEGIN_TEST(test_ubiStackFrame)
+{
+    CHECK(js::DefineTestingFunctions(cx, global, false));
+
+    JS::RootedValue val(cx);
+    CHECK(evaluate("(function one() {                      \n"  // 1
+                   "  return (function two() {             \n"  // 2
+                   "    return (function three() {         \n"  // 3
+                   "      return saveStack();              \n"  // 4
+                   "    }());                              \n"  // 5
+                   "  }());                                \n"  // 6
+                   "}());                                  \n", // 7
+                   "filename.js",
+                   1,
+                   &val));
+
+    CHECK(val.isObject());
+    JS::RootedObject obj(cx, &val.toObject());
+
+    CHECK(obj->is<SavedFrame>());
+    JS::Rooted<SavedFrame*> savedFrame(cx, &obj->as<SavedFrame>());
+
+    JS::ubi::StackFrame ubiFrame(savedFrame);
+
+    // All frames should be from the "filename.js" source.
+    while (ubiFrame) {
+        CHECK(checkString("filename.js",
+                          [&] (mozilla::RangedPtr<char16_t> ptr, size_t length) {
+                              return ubiFrame.source(ptr, length);
+                          },
+                          [&] {
+                              return ubiFrame.source();
+                          }));
+        ubiFrame = ubiFrame.parent();
+    }
+
+    ubiFrame = savedFrame;
+
+    auto bufferFunctionDisplayName = [&] (mozilla::RangedPtr<char16_t> ptr, size_t length) {
+        return ubiFrame.functionDisplayName(ptr, length);
+    };
+    auto getFunctionDisplayName = [&] {
+        return ubiFrame.functionDisplayName();
+    };
+
+    CHECK(checkString("three", bufferFunctionDisplayName, getFunctionDisplayName));
+    CHECK(ubiFrame.line() == 4);
+
+    ubiFrame = ubiFrame.parent();
+    CHECK(checkString("two", bufferFunctionDisplayName, getFunctionDisplayName));
+    CHECK(ubiFrame.line() == 3);
+
+    ubiFrame = ubiFrame.parent();
+    CHECK(checkString("one", bufferFunctionDisplayName, getFunctionDisplayName));
+    CHECK(ubiFrame.line() == 2);
+
+    ubiFrame = ubiFrame.parent();
+    CHECK(ubiFrame.functionDisplayName().is<JSAtom*>());
+    CHECK(ubiFrame.functionDisplayName().as<JSAtom*>() == nullptr);
+    CHECK(ubiFrame.line() == 1);
+
+    ubiFrame = ubiFrame.parent();
+    CHECK(!ubiFrame);
+
+    return true;
+}
+END_TEST(test_ubiStackFrame)
--- a/js/src/jsscript.cpp
+++ b/js/src/jsscript.cpp
@@ -1323,16 +1323,27 @@ static inline ScriptCountsMap::Ptr GetSc
 js::PCCounts&
 JSScript::getPCCounts(jsbytecode* pc) {
     MOZ_ASSERT(containsPC(pc));
     ScriptCountsMap::Ptr p = GetScriptCountsMapEntry(this);
     return p->value().pcCountsVector[pcToOffset(pc)];
 }
 
 void
+JSScript::setIonScript(JSContext* maybecx, js::jit::IonScript* ionScript)
+{
+    MOZ_ASSERT_IF(ionScript != ION_DISABLED_SCRIPT, !baselineScript()->hasPendingIonBuilder());
+    if (hasIonScript())
+        js::jit::IonScript::writeBarrierPre(zone(), ion);
+    ion = ionScript;
+    MOZ_ASSERT_IF(hasIonScript(), hasBaselineScript());
+    updateBaselineOrIonRaw(maybecx);
+}
+
+void
 JSScript::addIonCounts(jit::IonScriptCounts* ionCounts)
 {
     ScriptCountsMap::Ptr p = GetScriptCountsMapEntry(this);
     if (p->value().ionCounts)
         ionCounts->setPrevious(p->value().ionCounts);
     p->value().ionCounts = ionCounts;
 }
 
@@ -4040,25 +4051,24 @@ LazyScript::hasUncompiledEnclosingScript
 
     JSFunction& fun = enclosingScope()->as<JSFunction>();
     return !fun.hasScript() || fun.hasUncompiledScript() || !fun.nonLazyScript()->code();
 }
 
 void
 JSScript::updateBaselineOrIonRaw(JSContext* maybecx)
 {
-    if (hasIonScript()) {
-        if (ion->pendingBuilder()) {
-            MOZ_ASSERT(maybecx);
-            baselineOrIonRaw = maybecx->runtime()->jitRuntime()->lazyLinkStub()->raw();
-            baselineOrIonSkipArgCheck = maybecx->runtime()->jitRuntime()->lazyLinkStub()->raw();
-        } else {
-            baselineOrIonRaw = ion->method()->raw();
-            baselineOrIonSkipArgCheck = ion->method()->raw() + ion->getSkipArgCheckEntryOffset();
-        }
+    if (hasBaselineScript() && baseline->hasPendingIonBuilder()) {
+        MOZ_ASSERT(maybecx);
+        MOZ_ASSERT(!isIonCompilingOffThread());
+        baselineOrIonRaw = maybecx->runtime()->jitRuntime()->lazyLinkStub()->raw();
+        baselineOrIonSkipArgCheck = maybecx->runtime()->jitRuntime()->lazyLinkStub()->raw();
+    } else if (hasIonScript()) {
+        baselineOrIonRaw = ion->method()->raw();
+        baselineOrIonSkipArgCheck = ion->method()->raw() + ion->getSkipArgCheckEntryOffset();
     } else if (hasBaselineScript()) {
         baselineOrIonRaw = baseline->method()->raw();
         baselineOrIonSkipArgCheck = baseline->method()->raw();
     } else {
         baselineOrIonRaw = nullptr;
         baselineOrIonSkipArgCheck = nullptr;
     }
 }
--- a/js/src/jsscript.h
+++ b/js/src/jsscript.h
@@ -33,16 +33,17 @@ namespace js {
 
 namespace jit {
     struct BaselineScript;
     struct IonScriptCounts;
 } // namespace jit
 
 # define ION_DISABLED_SCRIPT ((js::jit::IonScript*)0x1)
 # define ION_COMPILING_SCRIPT ((js::jit::IonScript*)0x2)
+# define ION_PENDING_SCRIPT ((js::jit::IonScript*)0x3)
 
 # define BASELINE_DISABLED_SCRIPT ((js::jit::BaselineScript*)0x1)
 
 class BreakpointSite;
 class BindingIter;
 class Debugger;
 class LazyScript;
 class RegExpObject;
@@ -960,18 +961,25 @@ class JSScript : public js::gc::TenuredC
     //
     // (When we clone a JSScript into a new compartment, we don't clone its
     // source object. Instead, the clone refers to a wrapper.)
     js::HeapPtrObject sourceObject_;
 
     js::HeapPtrFunction function_;
     js::HeapPtrObject   enclosingStaticScope_;
 
-    /* Information attached by Baseline/Ion for sequential mode execution. */
+    /*
+     * Information attached by Ion. Nexto a valid IonScript this could be
+     * ION_DISABLED_SCRIPT, ION_COMPILING_SCRIPT or ION_PENDING_SCRIPT.
+     * The later is a ion compilation that is ready, but hasn't been linked
+     * yet.
+     */
     js::jit::IonScript* ion;
+
+    /* Information attached by Baseline. */
     js::jit::BaselineScript* baseline;
 
     /* Information used to re-lazify a lazily-parsed interpreted function. */
     js::LazyScript* lazyScript;
 
     /*
      * Pointer to either baseline->method()->raw() or ion->method()->raw(), or
      * nullptr if there's no Baseline or Ion script.
@@ -1467,45 +1475,39 @@ class JSScript : public js::gc::TenuredC
         doNotRelazify_ = b;
     }
 
     bool hasAnyIonScript() const {
         return hasIonScript();
     }
 
     bool hasIonScript() const {
-        bool res = ion && ion != ION_DISABLED_SCRIPT && ion != ION_COMPILING_SCRIPT;
+        bool res = ion && ion != ION_DISABLED_SCRIPT && ion != ION_COMPILING_SCRIPT &&
+                          ion != ION_PENDING_SCRIPT;
         MOZ_ASSERT_IF(res, baseline);
         return res;
     }
     bool canIonCompile() const {
         return ion != ION_DISABLED_SCRIPT;
     }
-
     bool isIonCompilingOffThread() const {
         return ion == ION_COMPILING_SCRIPT;
     }
 
     js::jit::IonScript* ionScript() const {
         MOZ_ASSERT(hasIonScript());
         return ion;
     }
     js::jit::IonScript* maybeIonScript() const {
         return ion;
     }
     js::jit::IonScript* const* addressOfIonScript() const {
         return &ion;
     }
-    void setIonScript(JSContext* maybecx, js::jit::IonScript* ionScript) {
-        if (hasIonScript())
-            js::jit::IonScript::writeBarrierPre(zone(), ion);
-        ion = ionScript;
-        MOZ_ASSERT_IF(hasIonScript(), hasBaselineScript());
-        updateBaselineOrIonRaw(maybecx);
-    }
+    void setIonScript(JSContext* maybecx, js::jit::IonScript* ionScript);
 
     bool hasBaselineScript() const {
         bool res = baseline && baseline != BASELINE_DISABLED_SCRIPT;
         MOZ_ASSERT_IF(!res, !ion || ion == ION_DISABLED_SCRIPT);
         return res;
     }
     bool canBaselineCompile() const {
         return baseline != BASELINE_DISABLED_SCRIPT;
@@ -1513,26 +1515,16 @@ class JSScript : public js::gc::TenuredC
     js::jit::BaselineScript* baselineScript() const {
         MOZ_ASSERT(hasBaselineScript());
         return baseline;
     }
     inline void setBaselineScript(JSContext* maybecx, js::jit::BaselineScript* baselineScript);
 
     void updateBaselineOrIonRaw(JSContext* maybecx);
 
-    void setPendingIonBuilder(JSContext* maybecx, js::jit::IonBuilder* builder) {
-        MOZ_ASSERT(!builder || !ion->pendingBuilder());
-        ion->setPendingBuilderPrivate(builder);
-        updateBaselineOrIonRaw(maybecx);
-    }
-    js::jit::IonBuilder* pendingIonBuilder() {
-        MOZ_ASSERT(hasIonScript());
-        return ion->pendingBuilder();
-    }
-
     static size_t offsetOfBaselineScript() {
         return offsetof(JSScript, baseline);
     }
     static size_t offsetOfIonScript() {
         return offsetof(JSScript, ion);
     }
     static size_t offsetOfBaselineOrIonRaw() {
         return offsetof(JSScript, baselineOrIonRaw);
--- a/js/src/vm/Debugger.cpp
+++ b/js/src/vm/Debugger.cpp
@@ -6948,26 +6948,26 @@ DebuggerObject_getGlobal(JSContext* cx, 
 
 static bool
 null(CallArgs& args)
 {
     args.rval().setNull();
     return true;
 }
 
-/* static */ JSObject*
+/* static */ SavedFrame*
 Debugger::getObjectAllocationSite(JSObject& obj)
 {
     JSObject* metadata = GetObjectMetadata(&obj);
     if (!metadata)
         return nullptr;
 
     MOZ_ASSERT(!metadata->is<WrapperObject>());
     return SavedFrame::isSavedFrameAndNotProto(*metadata)
-        ? metadata
+        ? &metadata->as<SavedFrame>()
         : nullptr;
 }
 
 static bool
 DebuggerObject_getAllocationSite(JSContext* cx, unsigned argc, Value* vp)
 {
     THIS_DEBUGOBJECT_REFERENT(cx, argc, vp, "get allocationSite", args, obj);
 
--- a/js/src/vm/Debugger.h
+++ b/js/src/vm/Debugger.h
@@ -282,17 +282,17 @@ class Debugger : private mozilla::Linked
         return trackingTenurePromotions;
     }
 
     bool isEnabled() const {
         return enabled;
     }
 
     void logTenurePromotion(JSRuntime* rt, JSObject& obj, double when);
-    static JSObject* getObjectAllocationSite(JSObject& obj);
+    static SavedFrame* getObjectAllocationSite(JSObject& obj);
 
     struct TenurePromotionsLogEntry : public JS::Traceable
     {
         TenurePromotionsLogEntry(JSRuntime* rt, JSObject& obj, double when);
 
         const char* className;
         double when;
         RelocatablePtrObject frame;
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -176,17 +176,17 @@ js::CancelOffThreadIonCompile(JSCompartm
         }
     }
 
     /* Cancel lazy linking for pending builders (attached to the ionScript). */
     jit::IonBuilder* builder = HelperThreadState().ionLazyLinkList().getFirst();
     while (builder) {
         jit::IonBuilder* next = builder->getNext();
         if (CompiledScriptMatches(compartment, script, builder->script())) {
-            builder->script()->setPendingIonBuilder(nullptr, nullptr);
+            builder->script()->baselineScript()->removePendingIonBuilder(builder->script());
             jit::FinishOffThreadBuilder(nullptr, builder);
         }
         builder = next;
     }
 }
 
 static const JSClass parseTaskGlobalClass = {
     "internal-parse-task-global", JSCLASS_GLOBAL_FLAGS,
--- a/js/src/vm/SavedFrame.h
+++ b/js/src/vm/SavedFrame.h
@@ -2,16 +2,18 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef vm_SavedFrame_h
 #define vm_SavedFrame_h
 
+#include "js/UbiNode.h"
+
 namespace js {
 
 class SavedFrame : public NativeObject {
     friend class SavedStacks;
 
   public:
     static const Class          class_;
     static const JSPropertySpec protoAccessors[];
@@ -104,9 +106,54 @@ struct SavedFrame::HashPolicy
 };
 
 // Assert that if the given object is not null, that it must be either a
 // SavedFrame object or wrapper (Xray or CCW) around a SavedFrame object.
 inline void AssertObjectIsSavedFrameOrWrapper(JSContext* cx, HandleObject stack);
 
 } // namespace js
 
+namespace JS {
+namespace ubi {
+
+using js::SavedFrame;
+
+// A concrete JS::ubi::StackFrame that is backed by a live SavedFrame object.
+template<>
+class ConcreteStackFrame<SavedFrame> : public BaseStackFrame {
+    explicit ConcreteStackFrame(SavedFrame* ptr) : BaseStackFrame(ptr) { }
+    SavedFrame& get() const { return *static_cast<SavedFrame*>(ptr); }
+
+  public:
+    static void construct(void* storage, SavedFrame* ptr) { new (storage) ConcreteStackFrame(ptr); }
+
+    StackFrame parent() const override { return get().getParent(); }
+    uint32_t line() const override { return get().getLine(); }
+    uint32_t column() const override { return get().getColumn(); }
+
+    AtomOrTwoByteChars source() const override {
+        auto source = get().getSource();
+        return AtomOrTwoByteChars(source);
+    }
+
+    AtomOrTwoByteChars functionDisplayName() const override {
+        auto name = get().getFunctionDisplayName();
+        return AtomOrTwoByteChars(name);
+    }
+
+    void trace(JSTracer* trc) override {
+        JSObject* obj = &get();
+        js::TraceManuallyBarrieredEdge(trc, &obj, "ConcreteStackFrame<SavedFrame>::ptr");
+        ptr = obj;
+    }
+
+    bool isSelfHosted() const override { return get().isSelfHosted(); }
+
+    bool isSystem() const override;
+
+    bool constructSavedFrameStack(JSContext* cx,
+                                 MutableHandleObject outSavedFrameStack) const override;
+};
+
+} // namespace ubi
+} // namespace JS
+
 #endif // vm_SavedFrame_h
--- a/js/src/vm/SavedStacks.cpp
+++ b/js/src/vm/SavedStacks.cpp
@@ -1370,8 +1370,34 @@ CompartmentChecker::check(SavedStacks* s
         printf("*** Compartment SavedStacks mismatch: %p vs. %p\n",
                (void*) &compartment->savedStacks(), stacks);
         MOZ_CRASH();
     }
 }
 #endif /* JS_CRASH_DIAGNOSTICS */
 
 } /* namespace js */
+
+namespace JS {
+namespace ubi {
+
+bool
+ConcreteStackFrame<SavedFrame>::isSystem() const
+{
+    auto trustedPrincipals = get().runtimeFromAnyThread()->trustedPrincipals();
+    return get().getPrincipals() == trustedPrincipals;
+}
+
+bool
+ConcreteStackFrame<SavedFrame>::constructSavedFrameStack(JSContext* cx,
+                                                         MutableHandleObject outSavedFrameStack)
+    const
+{
+    outSavedFrameStack.set(&get());
+    if (!cx->compartment()->wrap(cx, outSavedFrameStack)) {
+        outSavedFrameStack.set(nullptr);
+        return false;
+    }
+    return true;
+}
+
+} // namespace ubi
+} // namespace JS
--- a/js/src/vm/TypeInference.cpp
+++ b/js/src/vm/TypeInference.cpp
@@ -3334,18 +3334,35 @@ PreliminaryObjectArray::empty() const
 
 void
 PreliminaryObjectArray::sweep()
 {
     // All objects in the array are weak, so clear any that are about to be
     // destroyed.
     for (size_t i = 0; i < COUNT; i++) {
         JSObject** ptr = &objects[i];
-        if (*ptr && IsAboutToBeFinalizedUnbarriered(ptr))
+        if (*ptr && IsAboutToBeFinalizedUnbarriered(ptr)) {
+            // Before we clear this reference, change the object's group to the
+            // Object.prototype group. This is done to ensure JSObject::finalize
+            // sees a NativeObject Class even if we change the current group's
+            // Class to one of the unboxed object classes in the meantime. If
+            // the compartment's global is dead, we don't do anything as the
+            // group's Class is not going to change in that case.
+            JSObject* obj = *ptr;
+            GlobalObject* global = obj->compartment()->maybeGlobal();
+            if (global && !obj->isSingleton()) {
+                JSObject* objectProto = GetBuiltinPrototypePure(global, JSProto_Object);
+                obj->setGroup(objectProto->groupRaw());
+                MOZ_ASSERT(obj->is<NativeObject>());
+                MOZ_ASSERT(obj->getClass() == objectProto->getClass());
+                MOZ_ASSERT(!obj->getClass()->finalize);
+            }
+
             *ptr = nullptr;
+        }
     }
 }
 
 void
 PreliminaryObjectArrayWithTemplate::trace(JSTracer* trc)
 {
     if (shape_)
         TraceEdge(trc, &shape_, "PreliminaryObjectArrayWithTemplate_shape");
--- a/js/src/vm/UbiNode.cpp
+++ b/js/src/vm/UbiNode.cpp
@@ -6,55 +6,121 @@
 
 #include "js/UbiNode.h"
 
 #include "mozilla/Assertions.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/Range.h"
 #include "mozilla/Scoped.h"
 
+#include <algorithm>
+
 #include "jscntxt.h"
 #include "jsobj.h"
 #include "jsscript.h"
 #include "jsstr.h"
 
 #include "jit/IonCode.h"
 #include "js/Debug.h"
 #include "js/TracingAPI.h"
 #include "js/TypeDecls.h"
 #include "js/Utility.h"
 #include "js/Vector.h"
+#include "vm/Debugger.h"
 #include "vm/GlobalObject.h"
 #include "vm/ScopeObject.h"
 #include "vm/Shape.h"
 #include "vm/String.h"
 #include "vm/Symbol.h"
 
 #include "jsobjinlines.h"
 #include "vm/Debugger-inl.h"
 
 using mozilla::Some;
+using mozilla::RangedPtr;
 using mozilla::UniquePtr;
 using JS::DispatchTraceKindTyped;
 using JS::HandleValue;
 using JS::Value;
 using JS::ZoneSet;
+using JS::ubi::AtomOrTwoByteChars;
 using JS::ubi::Concrete;
 using JS::ubi::Edge;
 using JS::ubi::EdgeRange;
 using JS::ubi::Node;
 using JS::ubi::SimpleEdge;
 using JS::ubi::SimpleEdgeVector;
+using JS::ubi::StackFrame;
 using JS::ubi::TracerConcrete;
 using JS::ubi::TracerConcreteWithCompartment;
 
+template<typename CharT>
+static size_t
+copyToBuffer(const CharT* src, RangedPtr<char16_t> dest, size_t length)
+{
+    size_t i = 0;
+    for ( ; i < length; i++)
+        dest[i] = src[i];
+    return i;
+}
+
+struct CopyToBufferMatcher
+{
+    using ReturnType = size_t;
+
+    RangedPtr<char16_t> destination;
+    size_t              maxLength;
+
+    CopyToBufferMatcher(RangedPtr<char16_t> destination, size_t maxLength)
+      : destination(destination)
+      , maxLength(maxLength)
+    { }
+
+    size_t
+    match(JSAtom* atom)
+    {
+        if (!atom)
+            return 0;
+
+        size_t length = std::min(atom->length(), maxLength);
+        JS::AutoCheckCannotGC noGC;
+        return atom->hasTwoByteChars()
+            ? copyToBuffer(atom->twoByteChars(noGC), destination, length)
+            : copyToBuffer(atom->latin1Chars(noGC), destination, length);
+    }
+
+    size_t
+    match(const char16_t* chars)
+    {
+        if (!chars)
+            return 0;
+
+        size_t length = std::min(js_strlen(chars), maxLength);
+        return copyToBuffer(chars, destination, length);
+    }
+};
+
+size_t
+StackFrame::source(RangedPtr<char16_t> destination, size_t length) const
+{
+    CopyToBufferMatcher m(destination, length);
+    return source().match(m);
+}
+
+size_t
+StackFrame::functionDisplayName(RangedPtr<char16_t> destination, size_t length) const
+{
+    CopyToBufferMatcher m(destination, length);
+    return functionDisplayName().match(m);
+}
+
 // All operations on null ubi::Nodes crash.
-const char16_t* Concrete<void>::typeName() const          { MOZ_CRASH("null ubi::Node"); }
-JS::Zone* Concrete<void>::zone() const                    { MOZ_CRASH("null ubi::Node"); }
-JSCompartment* Concrete<void>::compartment() const        { MOZ_CRASH("null ubi::Node"); }
+const char16_t* Concrete<void>::typeName() const   { MOZ_CRASH("null ubi::Node"); }
+JS::Zone* Concrete<void>::zone() const             { MOZ_CRASH("null ubi::Node"); }
+JSCompartment* Concrete<void>::compartment() const { MOZ_CRASH("null ubi::Node"); }
 
 UniquePtr<EdgeRange>
 Concrete<void>::edges(JSContext*, bool) const {
     MOZ_CRASH("null ubi::Node");
 }
 
 size_t
 Concrete<void>::size(mozilla::MallocSizeOf mallocSizeof) const
@@ -213,16 +279,29 @@ TracerConcrete<Referent>::edges(JSContex
 
 template<typename Referent>
 JSCompartment*
 TracerConcreteWithCompartment<Referent>::compartment() const
 {
     return TracerBase::get().compartment();
 }
 
+bool
+Concrete<JSObject>::hasAllocationStack() const
+{
+    return !!js::Debugger::getObjectAllocationSite(get());
+}
+
+StackFrame
+Concrete<JSObject>::allocationStack() const
+{
+    MOZ_ASSERT(hasAllocationStack());
+    return StackFrame(js::Debugger::getObjectAllocationSite(get()));
+}
+
 const char*
 Concrete<JSObject>::jsObjectClassName() const
 {
     return Concrete::get().getClass()->name;
 }
 
 bool
 Concrete<JSObject>::jsObjectConstructorName(JSContext* cx,
--- a/js/xpconnect/shell/moz.build
+++ b/js/xpconnect/shell/moz.build
@@ -27,9 +27,13 @@ LOCAL_INCLUDES += [
 if CONFIG['_MSC_VER']:
     # Always enter a Windows program through wmain, whether or not we're
     # a console application.
     WIN32_EXE_LDFLAGS += ['-ENTRY:wmainCRTStartup']
 
 if CONFIG['OS_ARCH'] == 'WINNT':
     RCINCLUDE = 'xpcshell.rc'
 
+CFLAGS += CONFIG['TK_CFLAGS']
+CXXFLAGS += CONFIG['TK_CFLAGS']
+OS_LIBS += CONFIG['TK_LIBS']
+
 FAIL_ON_WARNINGS = True
--- a/js/xpconnect/shell/xpcshell.cpp
+++ b/js/xpconnect/shell/xpcshell.cpp
@@ -20,19 +20,30 @@
 
 // we want a wmain entry point
 #define XRE_DONT_PROTECT_DLL_LOAD
 #define XRE_DONT_SUPPORT_XPSP2 // xpcshell does not ship
 #define XRE_WANT_ENVIRON
 #include "nsWindowsWMain.cpp"
 #endif
 
+#ifdef MOZ_WIDGET_GTK
+#include <gtk/gtk.h>
+#endif
+
 int
 main(int argc, char** argv, char** envp)
 {
+#ifdef MOZ_WIDGET_GTK
+    // A default display may or may not be required for xpcshell tests, and so
+    // is not created here. Instead we set the command line args, which is a
+    // fairly cheap operation.
+    gtk_parse_args(&argc, &argv);
+#endif
+
 #ifdef XP_MACOSX
     InitAutoreleasePool();
 #endif
 
     // unbuffer stdout so that output is in the correct order; note that stderr
     // is unbuffered by default
     setbuf(stdout, 0);
 
--- a/layout/base/PositionedEventTargeting.cpp
+++ b/layout/base/PositionedEventTargeting.cpp
@@ -530,16 +530,21 @@ FindFrameTargetedByInputEvent(WidgetGUIE
   nsIContent* clickableAncestor = nullptr;
   if (target) {
     clickableAncestor = GetClickableAncestor(target, nsGkAtoms::body);
     if (clickableAncestor) {
       if (!IsElementClickableAndReadable(target, aEvent, prefs)) {
         aEvent->AsMouseEventBase()->hitCluster = true;
       }
       PET_LOG("Target %p is clickable\n", target);
+      // If the target that was directly hit has a clickable ancestor, that
+      // means it too is clickable. And since it is the same as or a descendant
+      // of clickableAncestor, it should become the root for the GetClosest
+      // search.
+      clickableAncestor = target->GetContent();
     }
   }
 
   // Do not modify targeting for actual mouse hardware; only for mouse
   // events generated by touch-screen hardware.
   if (aEvent->mClass == eMouseEventClass &&
       prefs->mTouchOnly &&
       aEvent->AsMouseEvent()->inputSource !=
--- a/layout/base/nsPresShell.cpp
+++ b/layout/base/nsPresShell.cpp
@@ -4084,17 +4084,17 @@ PresShell::FlushPendingNotifications(moz
       if (aFlush.mFlushAnimations &&
           !mPresContext->StyleUpdateForAllAnimationsIsUpToDate()) {
         if (mPresContext->AnimationManager()) {
           mPresContext->AnimationManager()->
             FlushAnimations(CommonAnimationManager::Cannot_Throttle);
         }
         if (mPresContext->TransitionManager()) {
           mPresContext->TransitionManager()->
-            FlushTransitions(CommonAnimationManager::Cannot_Throttle);
+            FlushAnimations(CommonAnimationManager::Cannot_Throttle);
         }
         mPresContext->TickLastStyleUpdateForAllAnimations();
       }
 
       // The FlushResampleRequests() above flushed style changes.
       if (!mIsDestroying) {
         nsAutoScriptBlocker scriptBlocker;
         mPresContext->RestyleManager()->ProcessPendingRestyles();
--- a/layout/base/tests/test_event_target_radius.html
+++ b/layout/base/tests/test_event_target_radius.html
@@ -34,16 +34,17 @@ https://bugzilla.mozilla.org/show_bug.cg
   <div class="target" style="transform:translate(-80px,0);" id="t4" onmousedown="x=1" hidden></div>
 
   <div class="target" style="left:0; z-index:1" id="t5_left" onmousedown="x=1" hidden></div>
   <div class="target" style="left:106px;" id="t5_right" onmousedown="x=1" hidden></div>
   <div class="target" style="left:0; top:210px;" id="t5_below" onmousedown="x=1" hidden></div>
 
   <div class="target" id="t6" onmousedown="x=1" hidden>
     <div id="t6_inner" style="position:absolute; left:-40px; top:20px; width:60px; height:60px; background:yellow;"></div>
+    <div id="t6_inner_clickable" style="position:absolute; left:-40px; top: 80px; width: 60px; height: 5px; background:red" onmousedown="x=1"></div>
   </div>
   <div id="t6_outer" style="position:absolute; left:160px; top:120px; width:60px; height:60px; background:green;" onmousedown="x=1" hidden></div>
 
   <div class="target" id="t7" onmousedown="x=1" hidden></div>
   <div class="target" id="t7_over" hidden></div>
 
   <div id="t8" contenteditable="true" class="target" hidden></div>
 
@@ -190,16 +191,26 @@ function test3() {
   setShowing("t6", true);
   setShowing("t6_outer", true);
   testMouseClick("t6_inner", -1, 10, "t6_inner",
     "inner element is clickable because its parent is, even when it sticks outside parent");
   testMouseClick("t6_inner", 39, -1, "t6_inner",
     "when outside both inner and parent, but in range of both, the inner is selected");
   testMouseClick("t6_inner", 45, -1, "t6_inner",
     "clicking in clickable parent close to inner activates inner, not parent");
+  testMouseClick("t6_inner_clickable", 1, -1, "t6_inner",
+    "clicking on inner doesn't get redirected to inner_clickable because they are both clickable");
+  testMouseClick("t6_inner_clickable", 1, 1, "t6_inner_clickable",
+    "clicking on inner_clickable doesn't get redirected to inner because they are both clickable");
+  testMouseClick("t6_inner_clickable", 45, -1, "t6_inner",
+    "clicking on inner while backed by its parent still doesn't get redirected to inner_clickable");
+  testMouseClick("t6_inner_clickable", 45, 1, "t6_inner_clickable",
+    "clicking on inner_clickable while backed by its parent still doesn't get redirected to inner");
+  testMouseClick("t6_inner_clickable", 45, 6, "t6_inner_clickable",
+    "clicking on parent near inner_clickable gets redirected to inner_clickable rather than inner because it is closer");
   ok(13*mm < 80, "no point inside t6 that's not within radius of t6_inner; adjust layout of t6/inner/outer as needed");
   testMouseClick("t6_outer", -40 + 13*mm, -1, "t6",
     "clicking in clickable container close to outer activates parent, not outer");
   testMouseClick("t6_outer", 1, 1, "t6_outer",
     "clicking directly on the outer activates it");
   setShowing("t6", false);
   setShowing("t6_outer", false);
 
--- a/layout/reftests/w3c-css/submitted/ui3/reftest.list
+++ b/layout/reftests/w3c-css/submitted/ui3/reftest.list
@@ -2,9 +2,9 @@
 == box-sizing-border-box-002.xht box-sizing-border-box-002-ref.xht
 == box-sizing-border-box-003.xht box-sizing-border-box-003-ref.xht
 == box-sizing-border-box-004.xht box-sizing-border-box-004-ref.xht
 == box-sizing-content-box-001.xht box-sizing-content-box-001-ref.xht
 == box-sizing-content-box-002.xht box-sizing-content-box-002-ref.xht
 == box-sizing-content-box-003.xht box-sizing-content-box-003-ref.xht
 random-if(Android) skip-if((B2G&&browserIsRemote)||Mulet)  == box-sizing-replaced-001.xht box-sizing-replaced-001-ref.xht #bug 982547 # Initial mulet triage: parity with B2G/B2G Desktop
 fuzzy-if(Android,27,874) random-if((B2G&&browserIsRemote)||Mulet) == box-sizing-replaced-002.xht box-sizing-replaced-002-ref.xht # Bug 1128229 # Initial mulet triage: parity with B2G/B2G Desktop
-fuzzy-if(Android,14,869) random-if((B2G&&browserIsRemote)||Mulet) == box-sizing-replaced-003.xht box-sizing-replaced-003-ref.xht # Bug 1128229 # Initial mulet triage: parity with B2G/B2G Desktop
+fuzzy-if(Android,27,869) random-if((B2G&&browserIsRemote)||Mulet) == box-sizing-replaced-003.xht box-sizing-replaced-003-ref.xht # Bug 1128229 # Initial mulet triage: parity with B2G/B2G Desktop
--- a/layout/style/AnimationCommon.cpp
+++ b/layout/style/AnimationCommon.cpp
@@ -23,17 +23,16 @@
 #include "nsDisplayList.h"
 #include "mozilla/MemoryReporting.h"
 #include "mozilla/dom/KeyframeEffect.h"
 #include "RestyleManager.h"
 #include "nsRuleProcessorData.h"
 #include "nsStyleSet.h"
 #include "nsStyleChangeList.h"
 
-
 using mozilla::layers::Layer;
 using mozilla::dom::Animation;
 using mozilla::dom::KeyframeEffectReadOnly;
 
 namespace mozilla {
 
 /* static */ bool
 IsGeometricProperty(nsCSSProperty aProperty)
@@ -374,16 +373,47 @@ CommonAnimationManager::GetAnimations(do
     }
 
     AddElementCollection(collection);
   }
 
   return collection;
 }
 
+void
+CommonAnimationManager::FlushAnimations(FlushFlags aFlags)
+{
+  TimeStamp now = mPresContext->RefreshDriver()->MostRecentRefresh();
+  for (PRCList *l = PR_LIST_HEAD(&mElementCollections);
+       l != &mElementCollections;
+       l = PR_NEXT_LINK(l)) {
+    AnimationCollection* collection = static_cast<AnimationCollection*>(l);
+
+    if (collection->mStyleRuleRefreshTime == now) {
+      continue;
+    }
+
+    nsAutoAnimationMutationBatch mb(collection->mElement);
+    collection->Tick();
+
+    bool canThrottleTick = aFlags == Can_Throttle;
+    for (auto iter = collection->mAnimations.cbegin();
+         canThrottleTick && iter != collection->mAnimations.cend();
+         ++iter) {
+      canThrottleTick &= (*iter)->CanThrottle();
+    }
+
+    collection->RequestRestyle(canThrottleTick ?
+                               AnimationCollection::RestyleType::Throttled :
+                               AnimationCollection::RestyleType::Standard);
+  }
+
+  MaybeStartOrStopObservingRefreshDriver();
+}
+
 nsIStyleRule*
 CommonAnimationManager::GetAnimationRule(mozilla::dom::Element* aElement,
                                          nsCSSPseudoElements::Type aPseudoType)
 {
   MOZ_ASSERT(
     aPseudoType == nsCSSPseudoElements::ePseudo_NotPseudoElement ||
     aPseudoType == nsCSSPseudoElements::ePseudo_before ||
     aPseudoType == nsCSSPseudoElements::ePseudo_after,
@@ -431,16 +461,34 @@ CommonAnimationManager::LayerAnimationRe
   for (size_t i = 0; i < ArrayLength(info); ++i) {
     if (aProperty == info[i].mProperty) {
       return &info[i];
     }
   }
   return nullptr;
 }
 
+/* virtual */ void
+CommonAnimationManager::WillRefresh(TimeStamp aTime)
+{
+  MOZ_ASSERT(mPresContext,
+             "refresh driver should not notify additional observers "
+             "after pres context has been destroyed");
+  if (!mPresContext->GetPresShell()) {
+    // Someone might be keeping mPresContext alive past the point
+    // where it has been torn down; don't bother doing anything in
+    // this case.  But do get rid of all our animations so we stop
+    // triggering refreshes.
+    RemoveAllElementCollections();
+    return;
+  }
+
+  FlushAnimations(Can_Throttle);
+}
+
 #ifdef DEBUG
 /* static */ void
 CommonAnimationManager::Initialize()
 {
   const auto& info = CommonAnimationManager::sLayerAnimationInfo;
   for (size_t i = 0; i < ArrayLength(info); i++) {
     auto record = info[i];
     MOZ_ASSERT(nsCSSProps::PropHasFlags(record.mProperty,
@@ -819,16 +867,18 @@ AnimationCollection::Tick()
     mAnimations[animIdx]->Tick();
   }
 }
 
 void
 AnimationCollection::EnsureStyleRuleFor(TimeStamp aRefreshTime,
                                         EnsureStyleRuleFlags aFlags)
 {
+  mHasPendingAnimationRestyle = false;
+
   if (!mNeedsRefreshes) {
     mStyleRuleRefreshTime = aRefreshTime;
     return;
   }
 
   if (!mStyleRuleRefreshTime.IsNull() &&
       mStyleRuleRefreshTime == aRefreshTime) {
     // mStyleRule may be null and valid, if we have no style to apply.
@@ -960,16 +1010,61 @@ AnimationCollection::CanThrottleAnimatio
       return false;
     }
   }
 
   return true;
 }
 
 void
+AnimationCollection::RequestRestyle(RestyleType aRestyleType)
+{
+  MOZ_ASSERT(IsForElement() || IsForBeforePseudo() || IsForAfterPseudo(),
+             "Unexpected mElementProperty; might restyle too much");
+
+  nsPresContext* presContext = mManager->PresContext();
+  if (!presContext) {
+    // Pres context will be null after the manager is disconnected.
+    return;
+  }
+
+  MOZ_ASSERT(mElement->GetCrossShadowCurrentDoc() == presContext->Document(),
+             "Element::UnbindFromTree should have destroyed the element "
+             "transition/animations object");
+
+  // SetNeedStyleFlush is cheap and required regardless of the restyle type
+  // so we do it unconditionally. Furthermore, if the posted animation restyle
+  // has been postponed due to the element being display:none (i.e.
+  // mHasPendingAnimationRestyle is set) then we should still mark the
+  // document as needing a style flush.
+  presContext->Document()->SetNeedStyleFlush();
+
+  // If we are already waiting on an animation restyle then there's nothing
+  // more to do.
+  if (mHasPendingAnimationRestyle) {
+    return;
+  }
+
+  // Upgrade throttled restyles if other factors prevent
+  // throttling (e.g. async animations are not enabled).
+  if (aRestyleType == RestyleType::Throttled) {
+    TimeStamp now = presContext->RefreshDriver()->MostRecentRefresh();
+    if (!CanPerformOnCompositorThread(CanAnimateFlags(0)) ||
+        !CanThrottleAnimation(now)) {
+      aRestyleType = RestyleType::Standard;
+    }
+  }
+
+  if (aRestyleType == RestyleType::Standard) {
+    mHasPendingAnimationRestyle = true;
+    PostRestyleForAnimation(presContext);
+  }
+}
+
+void
 AnimationCollection::UpdateAnimationGeneration(nsPresContext* aPresContext)
 {
   mAnimationGeneration =
     aPresContext->RestyleManager()->GetAnimationGeneration();
 }
 
 void
 AnimationCollection::UpdateCheckGeneration(
--- a/layout/style/AnimationCommon.h
+++ b/layout/style/AnimationCommon.h
@@ -57,16 +57,19 @@ public:
 #ifdef MOZ_XUL
   virtual void RulesMatching(XULTreeRuleProcessorData* aData) override;
 #endif
   virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf)
     const MOZ_MUST_OVERRIDE override;
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf)
     const MOZ_MUST_OVERRIDE override;
 
+  // nsARefreshObserver
+  void WillRefresh(TimeStamp aTime) override;
+
 #ifdef DEBUG
   static void Initialize();
 #endif
 
   // NOTE:  This can return null after Disconnect().
   nsPresContext* PresContext() const { return mPresContext; }
 
   /**
@@ -100,16 +103,17 @@ public:
   // Notify this manager that one of its collections of animations,
   // has been updated.
   void NotifyCollectionUpdated(AnimationCollection& aCollection);
 
   enum FlushFlags {
     Can_Throttle,
     Cannot_Throttle
   };
+  void FlushAnimations(FlushFlags aFlags);
 
   nsIStyleRule* GetAnimationRule(dom::Element* aElement,
                                  nsCSSPseudoElements::Type aPseudoType);
 
   static bool ExtractComputedValueForTransition(
                   nsCSSProperty aProperty,
                   nsStyleContext* aStyleContext,
                   StyleAnimationValue& aComputedValue);
@@ -244,16 +248,17 @@ struct AnimationCollection : public PRCL
   AnimationCollection(dom::Element *aElement, nsIAtom *aElementProperty,
                       CommonAnimationManager *aManager)
     : mElement(aElement)
     , mElementProperty(aElementProperty)
     , mManager(aManager)
     , mAnimationGeneration(0)
     , mCheckGeneration(0)
     , mNeedsRefreshes(true)
+    , mHasPendingAnimationRestyle(false)
 #ifdef DEBUG
     , mCalledPropertyDtor(false)
 #endif
   {
     MOZ_COUNT_CTOR(AnimationCollection);
     PR_INIT_CLIST(this);
   }
   ~AnimationCollection()
@@ -273,34 +278,43 @@ struct AnimationCollection : public PRCL
 
   static void PropertyDtor(void *aObject, nsIAtom *aPropertyName,
                            void *aPropertyValue, void *aData);
 
   void Tick();
 
   void EnsureStyleRuleFor(TimeStamp aRefreshTime, EnsureStyleRuleFlags aFlags);
 
-  bool CanThrottleTransformChanges(TimeStamp aTime);
-
-  bool CanThrottleAnimation(TimeStamp aTime);
-
   enum CanA