Backed out 15 changesets (bug 1500049, bug 1172394, bug 1546756, bug 1302379) for failures on browser_disabledForMediaStreamVideos.js. CLOSED TREE
authorCsoregi Natalia <ncsoregi@mozilla.com>
Thu, 14 Nov 2019 00:32:51 +0200
changeset 501824 7272d77d4e808dcbbd1f4f50210786dd326b218a
parent 501823 b42b424565738fdf7dd1edaddf73bda03df040f5
child 501825 0e1844d65a2504f02679253ae9a536887cddc720
push id114172
push userdluca@mozilla.com
push dateTue, 19 Nov 2019 11:31:10 +0000
treeherdermozilla-inbound@b5c5ba07d3db [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1500049, 1172394, 1546756, 1302379
milestone72.0a1
backs out355f090421a6b38618b3f2e2e0c08d9166a78e72
306341d0b5869bafb98f11492f64f3aa0e4e51a7
3ff0d72d23a267ace6188dcb339b06e7d09d51f2
a4f256e68ceff48ff088715a51ff793d880cd6e9
d0aa43657e8cb3f06eafe5c537dc64ff1499a353
edff95b6f7242918c660bcf4ed0cc029b4e72906
94bd21d9b396a1d0ec3232a1afc0ea154879c491
7e7baa73e1efeacd1d19dc95e3f79133d796daf4
c3bd415507e8bd658b1fe760da997015b098af3e
1c45b135318d8cb519b8d58af72ab864fe6bfd62
c57c41e8c39ea51e96c13af8ecdcbe1640e2a9d6
a796541fe5ef045738f1c10d95830b71fc5c024d
89ad0b553b0f657612e3082a9c37f36221bb38c6
744fb77a58333b632dbf6820c77c7d8e97674b2c
afb4b226ff0409682b7bb2eb2475f227bc52aa96
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 15 changesets (bug 1500049, bug 1172394, bug 1546756, bug 1302379) for failures on browser_disabledForMediaStreamVideos.js. CLOSED TREE Backed out changeset 355f090421a6 (bug 1500049) Backed out changeset 306341d0b586 (bug 1302379) Backed out changeset 3ff0d72d23a2 (bug 1546756) Backed out changeset a4f256e68cef (bug 1172394) Backed out changeset d0aa43657e8c (bug 1172394) Backed out changeset edff95b6f724 (bug 1172394) Backed out changeset 94bd21d9b396 (bug 1172394) Backed out changeset 7e7baa73e1ef (bug 1172394) Backed out changeset c3bd415507e8 (bug 1172394) Backed out changeset 1c45b135318d (bug 1172394) Backed out changeset c57c41e8c39e (bug 1172394) Backed out changeset a796541fe5ef (bug 1172394) Backed out changeset 89ad0b553b0f (bug 1172394) Backed out changeset 744fb77a5833 (bug 1172394) Backed out changeset afb4b226ff04 (bug 1172394)
dom/html/HTMLMediaElement.cpp
dom/html/HTMLMediaElement.h
dom/media/ChannelMediaDecoder.cpp
dom/media/ChannelMediaDecoder.h
dom/media/ChannelMediaResource.cpp
dom/media/ChannelMediaResource.h
dom/media/CloneableWithRangeMediaResource.cpp
dom/media/CloneableWithRangeMediaResource.h
dom/media/DOMMediaStream.cpp
dom/media/DOMMediaStream.h
dom/media/FileMediaResource.cpp
dom/media/FileMediaResource.h
dom/media/MediaCache.cpp
dom/media/MediaCache.h
dom/media/MediaDecoder.cpp
dom/media/MediaDecoder.h
dom/media/MediaDecoderOwner.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
dom/media/MediaResource.h
dom/media/MediaStreamTrack.h
dom/media/mediasink/AudioSink.cpp
dom/media/mediasink/AudioSink.h
dom/media/mediasink/AudioSinkWrapper.cpp
dom/media/mediasink/AudioSinkWrapper.h
dom/media/mediasink/DecodedStream.cpp
dom/media/mediasink/DecodedStream.h
dom/media/mediasink/MediaSink.h
dom/media/mediasink/OutputStreamManager.cpp
dom/media/mediasink/OutputStreamManager.h
dom/media/mediasink/VideoSink.cpp
dom/media/mediasink/VideoSink.h
dom/media/mediasink/moz.build
dom/media/mediasource/SourceBufferResource.cpp
dom/media/mediasource/SourceBufferResource.h
dom/media/test/test_mediatrack_consuming_mediaresource.html
dom/media/test/test_mediatrack_replay_from_end.html
dom/media/test/test_streams_element_capture_reset.html
dom/media/webaudio/MediaElementAudioSourceNode.cpp
dom/media/webaudio/MediaStreamAudioSourceNode.cpp
dom/media/webaudio/MediaStreamAudioSourceNode.h
testing/web-platform/meta/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/mediaElementAudioSourceToScriptProcessorTest.html.ini
--- a/dom/html/HTMLMediaElement.cpp
+++ b/dom/html/HTMLMediaElement.cpp
@@ -653,81 +653,66 @@ class HTMLMediaElement::MediaStreamRende
 
   // Currently enabled (and rendered) audio tracks.
   nsTArray<WeakPtr<MediaStreamTrack>> mAudioTracks;
 
   // Currently selected (and rendered) video track.
   WeakPtr<MediaStreamTrack> mVideoTrack;
 };
 
-class HTMLMediaElement::MediaElementTrackSource
+class HTMLMediaElement::StreamCaptureTrackSource
     : public MediaStreamTrackSource,
       public MediaStreamTrackSource::Sink {
  public:
   NS_DECL_ISUPPORTS_INHERITED
-  NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(MediaElementTrackSource,
+  NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(StreamCaptureTrackSource,
                                            MediaStreamTrackSource)
 
-  /* MediaDecoder track source */
-  MediaElementTrackSource(ProcessedMediaTrack* aTrack, nsIPrincipal* aPrincipal)
-      : MediaStreamTrackSource(aPrincipal, nsString()), mTrack(aTrack) {
-    MOZ_ASSERT(mTrack);
-  }
-
-  /* MediaStream track source */
-  MediaElementTrackSource(MediaStreamTrackSource* aCapturedTrackSource,
-                          ProcessedMediaTrack* aTrack, MediaInputPort* aPort)
+  StreamCaptureTrackSource(MediaStreamTrackSource* aCapturedTrackSource,
+                           ProcessedMediaTrack* aStream, MediaInputPort* aPort)
       : MediaStreamTrackSource(aCapturedTrackSource->GetPrincipal(),
                                nsString()),
         mCapturedTrackSource(aCapturedTrackSource),
-        mTrack(aTrack),
+        mTrack(aStream),
         mPort(aPort) {
+    MOZ_ASSERT(mCapturedTrackSource);
     MOZ_ASSERT(mTrack);
-    MOZ_ASSERT(mCapturedTrackSource);
     MOZ_ASSERT(mPort);
 
     mCapturedTrackSource->RegisterSink(this);
   }
 
   void SetEnabled(bool aEnabled) {
     if (!mTrack) {
       return;
     }
     mTrack->SetEnabled(aEnabled ? DisabledTrackMode::ENABLED
                                 : DisabledTrackMode::SILENCE_FREEZE);
   }
 
-  void SetPrincipal(RefPtr<nsIPrincipal> aPrincipal) {
-    mPrincipal = std::move(aPrincipal);
-    MediaStreamTrackSource::PrincipalChanged();
-  }
-
   void Destroy() override {
     if (mCapturedTrackSource) {
       mCapturedTrackSource->UnregisterSink(this);
       mCapturedTrackSource = nullptr;
     }
-    if (mTrack && !mTrack->IsDestroyed()) {
+    if (mTrack) {
       mTrack->Destroy();
+      mTrack = nullptr;
     }
     if (mPort) {
       mPort->Destroy();
       mPort = nullptr;
     }
   }
 
   MediaSourceEnum GetMediaSource() const override {
     return MediaSourceEnum::Other;
   }
 
-  void Stop() override {
-    // Do nothing. There may appear new output streams
-    // that need tracks sourced from this source, so we
-    // cannot destroy things yet.
-  }
+  void Stop() override { Destroy(); }
 
   /**
    * Do not keep the track source alive. The source lifetime is controlled by
    * its associated tracks.
    */
   bool KeepsSourceAlive() const override { return false; }
 
   /**
@@ -740,79 +725,60 @@ class HTMLMediaElement::MediaElementTrac
   void Enable() override {}
 
   void PrincipalChanged() override {
     if (!mCapturedTrackSource) {
       // This could happen during shutdown.
       return;
     }
 
-    SetPrincipal(mCapturedTrackSource->GetPrincipal());
+    mPrincipal = mCapturedTrackSource->GetPrincipal();
+    MediaStreamTrackSource::PrincipalChanged();
   }
 
   void MutedChanged(bool aNewState) override {
+    if (!mCapturedTrackSource) {
+      // This could happen during shutdown.
+      return;
+    }
+
     MediaStreamTrackSource::MutedChanged(aNewState);
   }
 
   void OverrideEnded() override {
+    if (!mCapturedTrackSource) {
+      // This could happen during shutdown.
+      return;
+    }
+
     Destroy();
     MediaStreamTrackSource::OverrideEnded();
   }
 
-  ProcessedMediaTrack* Track() const { return mTrack; }
-
  private:
-  virtual ~MediaElementTrackSource() { Destroy(); };
+  virtual ~StreamCaptureTrackSource() {
+    MOZ_ASSERT(!mCapturedTrackSource);
+    MOZ_ASSERT(!mTrack);
+    MOZ_ASSERT(!mPort);
+  };
 
   RefPtr<MediaStreamTrackSource> mCapturedTrackSource;
-  const RefPtr<ProcessedMediaTrack> mTrack;
+  RefPtr<ProcessedMediaTrack> mTrack;
   RefPtr<MediaInputPort> mPort;
 };
 
-HTMLMediaElement::OutputMediaStream::OutputMediaStream(
-    RefPtr<DOMMediaStream> aStream, bool aCapturingAudioOnly,
-    bool aFinishWhenEnded)
-    : mStream(std::move(aStream)),
-      mCapturingAudioOnly(aCapturingAudioOnly),
-      mFinishWhenEnded(aFinishWhenEnded) {}
-HTMLMediaElement::OutputMediaStream::~OutputMediaStream() = default;
-
-void ImplCycleCollectionTraverse(nsCycleCollectionTraversalCallback& aCallback,
-                                 HTMLMediaElement::OutputMediaStream& aField,
-                                 const char* aName, uint32_t aFlags) {
-  ImplCycleCollectionTraverse(aCallback, aField.mStream, "mStream", aFlags);
-  ImplCycleCollectionTraverse(aCallback, aField.mFinishWhenEndedLoadingSrc,
-                              "mFinishWhenEndedLoadingSrc", aFlags);
-  ImplCycleCollectionTraverse(aCallback, aField.mFinishWhenEndedAttrStream,
-                              "mFinishWhenEndedAttrStream", aFlags);
-}
-
-void ImplCycleCollectionUnlink(HTMLMediaElement::OutputMediaStream& aField) {
-  ImplCycleCollectionUnlink(aField.mStream);
-  ImplCycleCollectionUnlink(aField.mFinishWhenEndedLoadingSrc);
-  ImplCycleCollectionUnlink(aField.mFinishWhenEndedAttrStream);
-}
-
-NS_IMPL_ADDREF_INHERITED(HTMLMediaElement::MediaElementTrackSource,
+NS_IMPL_ADDREF_INHERITED(HTMLMediaElement::StreamCaptureTrackSource,
                          MediaStreamTrackSource)
-NS_IMPL_RELEASE_INHERITED(HTMLMediaElement::MediaElementTrackSource,
+NS_IMPL_RELEASE_INHERITED(HTMLMediaElement::StreamCaptureTrackSource,
                           MediaStreamTrackSource)
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(
-    HTMLMediaElement::MediaElementTrackSource)
+    HTMLMediaElement::StreamCaptureTrackSource)
 NS_INTERFACE_MAP_END_INHERITING(MediaStreamTrackSource)
-NS_IMPL_CYCLE_COLLECTION_CLASS(HTMLMediaElement::MediaElementTrackSource)
-NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(
-    HTMLMediaElement::MediaElementTrackSource, MediaStreamTrackSource)
-  tmp->Destroy();
-  NS_IMPL_CYCLE_COLLECTION_UNLINK(mCapturedTrackSource)
-NS_IMPL_CYCLE_COLLECTION_UNLINK_END
-NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(
-    HTMLMediaElement::MediaElementTrackSource, MediaStreamTrackSource)
-  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mCapturedTrackSource)
-NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
+NS_IMPL_CYCLE_COLLECTION_INHERITED(HTMLMediaElement::StreamCaptureTrackSource,
+                                   MediaStreamTrackSource, mCapturedTrackSource)
 
 /**
  * There is a reference cycle involving this class: MediaLoadListener
  * holds a reference to the HTMLMediaElement, which holds a reference
  * to an nsIChannel, which holds a reference to this listener.
  * We break the reference cycle in OnStartRequest by clearing mElement.
  */
 class HTMLMediaElement::MediaLoadListener final
@@ -1671,18 +1637,19 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSrcMediaSource)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSrcStream)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSrcAttrStream)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSourcePointer)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mLoadBlockedDoc)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSourceLoadCandidate)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mAudioChannelWrapper)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mErrorSink->mError)
-  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputStreams)
-  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputTrackSources);
+  for (uint32_t i = 0; i < tmp->mOutputStreams.Length(); ++i) {
+    NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputStreams[i].mStream)
+  }
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPlayed);
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mTextTrackManager)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mAudioTrackList)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mVideoTrackList)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mMediaKeys)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mIncomingMediaKeys)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSelectedVideoStreamTrack)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPendingPlayPromises)
@@ -1704,18 +1671,20 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_IN
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mSourcePointer)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mLoadBlockedDoc)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mSourceLoadCandidate)
   if (tmp->mAudioChannelWrapper) {
     tmp->mAudioChannelWrapper->Shutdown();
   }
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mAudioChannelWrapper)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mErrorSink->mError)
+  for (OutputMediaStream& s : tmp->mOutputStreams) {
+    s.mStream->SetFinishedOnInactive(true);
+  }
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutputStreams)
-  NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutputTrackSources)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mPlayed)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mTextTrackManager)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mAudioTrackList)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mVideoTrackList)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mMediaKeys)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mIncomingMediaKeys)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mSelectedVideoStreamTrack)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mPendingPlayPromises)
@@ -1860,16 +1829,17 @@ nsresult HTMLMediaElement::OnChannelRedi
 void HTMLMediaElement::ShutdownDecoder() {
   RemoveMediaElementFromURITable();
   NS_ASSERTION(mDecoder, "Must have decoder to shut down");
 
   mWaitingForKeyListener.DisconnectIfExists();
   if (mMediaSource) {
     mMediaSource->CompletePendingTransactions();
   }
+  DiscardFinishWhenEndedOutputStreams();
   mDecoder->Shutdown();
   DDUNLINKCHILD(mDecoder.get());
   mDecoder = nullptr;
   ReportAudioTrackSilenceProportionTelemetry();
 }
 
 void HTMLMediaElement::ReportPlayedTimeAfterBlockedTelemetry() {
   if (!mHasPlayEverBeenBlocked) {
@@ -1942,24 +1912,37 @@ void HTMLMediaElement::AbortExistingLoad
   bool fireTimeUpdate = false;
 
   // We need to remove FirstFrameListener before VideoTracks get emptied.
   if (mFirstFrameListener) {
     mSelectedVideoStreamTrack->RemoveVideoOutput(mFirstFrameListener);
     mFirstFrameListener = nullptr;
   }
 
+  // When aborting the existing loads, empty the objects in audio track list and
+  // video track list, no events (in particular, no removetrack events) are
+  // fired as part of this. Ending MediaTrack sends track ended notifications,
+  // so we empty the track lists prior.
+  if (AudioTracks()) {
+    AudioTracks()->EmptyTracks();
+  }
+  if (VideoTracks()) {
+    VideoTracks()->EmptyTracks();
+  }
+
   if (mDecoder) {
     fireTimeUpdate = mDecoder->GetCurrentTime() != 0.0;
     ShutdownDecoder();
   }
   if (mSrcStream) {
     EndSrcMediaStreamPlayback();
   }
 
+  DiscardFinishWhenEndedOutputStreams();
+
   RemoveMediaElementFromURITable();
   mLoadingSrc = nullptr;
   mLoadingSrcTriggeringPrincipal = nullptr;
   DDLOG(DDLogCategory::Property, "loading_src", "");
   DDUNLINKCHILD(mMediaSource.get());
   mMediaSource = nullptr;
 
   if (mNetworkState == NETWORK_LOADING || mNetworkState == NETWORK_IDLE) {
@@ -1991,17 +1974,16 @@ void HTMLMediaElement::AbortExistingLoad
                  "How did someone setup a new stream/decoder already?");
     // ChangeNetworkState() will call UpdateAudioChannelPlayingState()
     // indirectly which depends on mPaused. So we need to update mPaused first.
     if (!mPaused) {
       mPaused = true;
       RejectPromises(TakePendingPlayPromises(), NS_ERROR_DOM_MEDIA_ABORT_ERR);
     }
     ChangeNetworkState(NETWORK_EMPTY);
-    RemoveMediaTracks();
     ChangeReadyState(HAVE_NOTHING);
 
     // TODO: Apply the rules for text track cue rendering Bug 865407
     if (mTextTrackManager) {
       mTextTrackManager->GetTextTracks()->SetCuesInactive();
     }
 
     if (fireTimeUpdate) {
@@ -2039,17 +2021,16 @@ void HTMLMediaElement::AbortExistingLoad
 }
 
 void HTMLMediaElement::NoSupportedMediaSourceError(
     const nsACString& aErrorDetails) {
   if (mDecoder) {
     ShutdownDecoder();
   }
   mErrorSink->SetError(MEDIA_ERR_SRC_NOT_SUPPORTED, aErrorDetails);
-  RemoveMediaTracks();
   ChangeDelayLoadStatus(false);
   UpdateAudioChannelPlayingState();
   RejectPromises(TakePendingPlayPromises(),
                  NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR);
 }
 
 typedef void (HTMLMediaElement::*SyncSectionFn)();
 
@@ -2356,34 +2337,44 @@ void HTMLMediaElement::NotifyMediaTrackE
           if (!mFirstFrameListener) {
             mFirstFrameListener =
                 new FirstFrameListener(container, mAbstractMainThread);
           }
           mSelectedVideoStreamTrack->AddVideoOutput(mFirstFrameListener);
         }
       }
     }
-  }
-
-  // The set of enabled/selected tracks changed.
-  mWatchManager.ManualNotify(&HTMLMediaElement::UpdateOutputTrackSources);
+
+    if (mReadyState == HAVE_NOTHING) {
+      // No MediaStreamTracks are captured until we have metadata.
+      return;
+    }
+    for (OutputMediaStream& ms : mOutputStreams) {
+      if (aTrack->AsVideoTrack() && ms.mCapturingAudioOnly) {
+        // If the output stream is for audio only we ignore video tracks.
+        continue;
+      }
+      AddCaptureMediaTrackToOutputStream(aTrack, ms);
+    }
+  }
 }
 
 void HTMLMediaElement::NotifyMediaTrackDisabled(dom::MediaTrack* aTrack) {
   MOZ_ASSERT(aTrack);
   if (!aTrack) {
     return;
   }
-
+#ifdef DEBUG
   nsString id;
   aTrack->GetId(id);
 
   LOG(LogLevel::Debug, ("MediaElement %p %sTrack with id %s disabled", this,
                         aTrack->AsAudioTrack() ? "Audio" : "Video",
                         NS_ConvertUTF16toUTF8(id).get()));
+#endif
 
   MOZ_ASSERT((!aTrack->AsAudioTrack() || !aTrack->AsAudioTrack()->Enabled()) &&
              (!aTrack->AsVideoTrack() || !aTrack->AsVideoTrack()->Selected()));
 
   if (AudioTrack* t = aTrack->AsAudioTrack()) {
     if (mSrcStream) {
       if (mMediaStreamRenderer) {
         mMediaStreamRenderer->RemoveTrack(t->GetAudioStreamTrack());
@@ -2415,18 +2406,55 @@ void HTMLMediaElement::NotifyMediaTrackD
       if (mMediaStreamRenderer) {
         mMediaStreamRenderer->RemoveTrack(mSelectedVideoStreamTrack);
       }
       mSelectedVideoStreamTrack->RemovePrincipalChangeObserver(this);
       mSelectedVideoStreamTrack = nullptr;
     }
   }
 
-  // The set of enabled/selected tracks changed.
-  mWatchManager.ManualNotify(&HTMLMediaElement::UpdateOutputTrackSources);
+  if (mReadyState == HAVE_NOTHING) {
+    // No MediaStreamTracks are captured until we have metadata, and code
+    // below doesn't do anything for captured decoders.
+    return;
+  }
+
+  for (OutputMediaStream& ms : mOutputStreams) {
+    if (ms.mCapturingDecoder) {
+      MOZ_ASSERT(!ms.mCapturingMediaStream);
+      continue;
+    }
+    if (ms.mCapturingAudioOnly && aTrack->AsVideoTrack()) {
+      continue;
+    }
+    MOZ_ASSERT(ms.mCapturingMediaStream);
+    for (int32_t i = ms.mTracks.Length() - 1; i >= 0; --i) {
+      if (ms.mTracks[i].first() != aTrack->GetId()) {
+        continue;
+      }
+      // The source of this track just ended. Force-notify that it ended.
+      // If we bounce it to the MediaTrackGraph it might not be picked up,
+      // for instance if the MediaInputPort was destroyed in the same
+      // iteration as it was added.
+      mMainThreadEventTarget->Dispatch(NewRunnableMethod(
+          "StreamCaptureTrackSource::OverrideEnded",
+          static_cast<StreamCaptureTrackSource*>(ms.mTracks[i].second().get()),
+          &StreamCaptureTrackSource::OverrideEnded));
+
+      ms.mTracks.RemoveElementAt(i);
+      break;
+    }
+#ifdef DEBUG
+    for (auto pair : ms.mTracks) {
+      MOZ_ASSERT(pair.first() != aTrack->GetId(),
+                 "The same MediaTrack was forwarded to the output stream more "
+                 "than once. This shouldn't happen.");
+    }
+#endif
+  }
 }
 
 void HTMLMediaElement::DealWithFailedElement(nsIContent* aSourceElement) {
   if (mShuttingDown) {
     return;
   }
 
   DispatchAsyncSourceError(aSourceElement);
@@ -2438,18 +2466,16 @@ void HTMLMediaElement::DealWithFailedEle
 void HTMLMediaElement::LoadFromSourceChildren() {
   NS_ASSERTION(mDelayingLoadEvent,
                "Should delay load event (if in document) during load");
   NS_ASSERTION(mIsLoadingFromSourceChildren,
                "Must remember we're loading from source children");
 
   AddMutationObserverUnlessExists(this);
 
-  RemoveMediaTracks();
-
   while (true) {
     Element* child = GetNextSource();
     if (!child) {
       // Exhausted candidates, wait for more candidates to be appended to
       // the media element.
       mLoadWaitStatus = WAITING_FOR_SOURCE;
       ChangeNetworkState(NETWORK_NO_SOURCE);
       ChangeDelayLoadStatus(false);
@@ -3136,267 +3162,119 @@ void HTMLMediaElement::SetMuted(bool aMu
 
   DispatchAsyncEvent(NS_LITERAL_STRING("volumechange"));
 
   // We allow inaudible autoplay. But changing our mute status may make this
   // media audible. So pause if we are no longer supposed to be autoplaying.
   PauseIfShouldNotBePlaying();
 }
 
-void HTMLMediaElement::GetAllEnabledMediaTracks(
-    nsTArray<RefPtr<MediaTrack>>& aTracks) {
-  if (AudioTrackList* tracks = AudioTracks()) {
-    for (size_t i = 0; i < tracks->Length(); ++i) {
-      AudioTrack* track = (*tracks)[i];
-      if (track->Enabled()) {
-        aTracks.AppendElement(track);
-      }
-    }
-  }
-  if (IsVideo()) {
-    if (VideoTrackList* tracks = VideoTracks()) {
-      for (size_t i = 0; i < tracks->Length(); ++i) {
-        VideoTrack* track = (*tracks)[i];
-        if (track->Selected()) {
-          aTracks.AppendElement(track);
-        }
-      }
-    }
-  }
-}
-
 void HTMLMediaElement::SetCapturedOutputStreamsEnabled(bool aEnabled) {
-  for (auto& entry : mOutputTrackSources) {
-    entry.GetData()->SetEnabled(aEnabled);
-  }
-}
-
-void HTMLMediaElement::AddOutputTrackSourceToOutputStream(
-    MediaElementTrackSource* aSource, OutputMediaStream& aOutputStream,
-    AddTrackMode aMode) {
+  for (OutputMediaStream& ms : mOutputStreams) {
+    if (ms.mCapturingDecoder) {
+      MOZ_ASSERT(!ms.mCapturingMediaStream);
+      continue;
+    }
+    for (auto pair : ms.mTracks) {
+      static_cast<StreamCaptureTrackSource*>(pair.second().get())
+          ->SetEnabled(aEnabled);
+
+      LOG(LogLevel::Debug, ("%s track %p for captured MediaStream %p",
+                            aEnabled ? "Enabled" : "Disabled",
+                            pair.second().get(), ms.mStream.get()));
+    }
+  }
+}
+
+void HTMLMediaElement::AddCaptureMediaTrackToOutputStream(
+    dom::MediaTrack* aTrack, OutputMediaStream& aOutputStream,
+    bool aAsyncAddtrack) {
+  if (aOutputStream.mCapturingDecoder) {
+    MOZ_ASSERT(!aOutputStream.mCapturingMediaStream);
+    return;
+  }
+  aOutputStream.mCapturingMediaStream = true;
+
   if (aOutputStream.mStream == mSrcStream) {
     // Cycle detected. This can happen since tracks are added async.
     // We avoid forwarding it to the output here or we'd get into an infloop.
-    LOG(LogLevel::Warning,
-        ("NOT adding output track source %p to output stream "
-         "%p -- cycle detected",
-         aSource, aOutputStream.mStream.get()));
+    return;
+  }
+
+  if (!aTrack) {
+    MOZ_ASSERT(false, "Bad MediaTrack");
     return;
   }
 
-  LOG(LogLevel::Debug, ("Adding output track source %p to output stream %p",
-                        aSource, aOutputStream.mStream.get()));
-
-  RefPtr<MediaStreamTrack> domTrack;
-  if (aSource->Track()->mType == MediaSegment::AUDIO) {
-    domTrack = new AudioStreamTrack(aOutputStream.mStream->GetParentObject(),
-                                    aSource->Track(), aSource);
-  } else {
-    domTrack = new VideoStreamTrack(aOutputStream.mStream->GetParentObject(),
-                                    aSource->Track(), aSource);
-  }
-
-  switch (aMode) {
-    case AddTrackMode::ASYNC:
-      mMainThreadEventTarget->Dispatch(
-          NewRunnableMethod<StoreRefPtrPassByPtr<MediaStreamTrack>>(
-              "DOMMediaStream::AddTrackInternal", aOutputStream.mStream,
-              &DOMMediaStream::AddTrackInternal, domTrack));
-      break;
-    case AddTrackMode::SYNC:
-      aOutputStream.mStream->AddTrackInternal(domTrack);
-      break;
-    default:
-      MOZ_CRASH("Unexpected mode");
-  }
-
-  LOG(LogLevel::Debug,
-      ("Created capture %s track %p",
-       domTrack->AsAudioStreamTrack() ? "audio" : "video", domTrack.get()));
-}
-
-void HTMLMediaElement::UpdateOutputTrackSources() {
-  // This updates the track sources in mOutputTrackSources so they're in sync
-  // with the tracks being currently played, and state saying whether we should
-  // be capturing tracks. This method is long so here is a breakdown:
-  // - Figure out the tracks that should be captured
-  // - Diff those against currently captured tracks (mOutputTrackSources), into
-  //   tracks-to-add, and tracks-to-remove
-  // - Remove the tracks in tracks-to-remove and dispatch "removetrack" and
-  //   "ended" events for them
-  // - If playback has ended, or there is no longer a media provider object,
-  //   remove any OutputMediaStreams that have the finish-when-ended flag set
-  // - Create track sources for, and add to OutputMediaStreams, the tracks in
-  //   tracks-to-add
-
-  const bool shouldHaveTrackSources = mTracksCaptured.Ref() &&
-                                      !IsPlaybackEnded() &&
-                                      mReadyState >= HAVE_METADATA;
-
-  // Add track sources for all enabled/selected MediaTracks.
+  MediaStreamTrack* inputTrack = mSrcStream->GetTrackById(aTrack->GetId());
+  MOZ_ASSERT(inputTrack);
+  if (!inputTrack) {
+    NS_ERROR("Input track not found in source stream");
+    return;
+  }
+  MOZ_DIAGNOSTIC_ASSERT(!inputTrack->Ended());
+
   nsPIDOMWindowInner* window = OwnerDoc()->GetInnerWindow();
   if (!window) {
     return;
   }
 
-  if (mDecoder) {
-    mDecoder->SetOutputCaptured(mTracksCaptured.Ref());
-  }
-
-  // Start with all MediaTracks
-  AutoTArray<RefPtr<MediaTrack>, 4> mediaTracksToAdd;
-  if (shouldHaveTrackSources) {
-    GetAllEnabledMediaTracks(mediaTracksToAdd);
-  }
-
-  // ...and all MediaElementTrackSources.
-  AutoTArray<nsString, 4> trackSourcesToRemove;
-  for (const auto& entry : mOutputTrackSources) {
-    trackSourcesToRemove.AppendElement(entry.GetKey());
-  }
-
-  // Then work out the differences.
-  for (const auto& track :
-       AutoTArray<RefPtr<MediaTrack>, 4>(mediaTracksToAdd)) {
-    if (mOutputTrackSources.GetWeak(track->GetId())) {
-      mediaTracksToAdd.RemoveElement(track);
-      trackSourcesToRemove.RemoveElement(track->GetId());
-    }
-  }
-
-  // First remove stale track sources.
-  for (const auto& id : trackSourcesToRemove) {
-    RefPtr<MediaElementTrackSource> source = mOutputTrackSources.GetWeak(id);
-
-    LOG(LogLevel::Debug, ("Removing output track source %p for track %s",
-                          source.get(), NS_ConvertUTF16toUTF8(id).get()));
-
-    if (mDecoder) {
-      mDecoder->RemoveOutputTrack(source->Track());
-    }
-
-    // The source of this track just ended. Force-notify that it ended.
-    // If we bounce it to the MediaTrackGraph it might not be picked up,
-    // for instance if the MediaInputPort was destroyed in the same
-    // iteration as it was added.
+  MediaSegment::Type type = inputTrack->AsAudioStreamTrack()
+                                ? MediaSegment::AUDIO
+                                : MediaSegment::VIDEO;
+  ProcessedMediaTrack* track =
+      inputTrack->Graph()->CreateForwardedInputTrack(type);
+  RefPtr<MediaInputPort> port = inputTrack->ForwardTrackContentsTo(track);
+  auto source = MakeRefPtr<StreamCaptureTrackSource>(&inputTrack->GetSource(),
+                                                     track, port);
+
+  // Track is muted initially, so we don't leak data if it's added while paused
+  // and an MTG iteration passes before the mute comes into effect.
+  source->SetEnabled(mSrcStreamIsPlaying);
+
+  RefPtr<MediaStreamTrack> domTrack;
+  if (inputTrack->AsAudioStreamTrack()) {
+    domTrack = new AudioStreamTrack(window, track, source);
+  } else {
+    domTrack = new VideoStreamTrack(window, track, source);
+  }
+
+  aOutputStream.mTracks.AppendElement(
+      Pair<nsString, RefPtr<MediaStreamTrackSource>>(aTrack->GetId(),
+                                                     source.get()));
+
+  if (aAsyncAddtrack) {
     mMainThreadEventTarget->Dispatch(
-        NewRunnableMethod("MediaElementTrackSource::OverrideEnded", source,
-                          &MediaElementTrackSource::OverrideEnded));
-
-    mOutputTrackSources.Remove(id);
-  }
-
-  // Then update finish-when-ended output streams as needed.
+        NewRunnableMethod<StoreRefPtrPassByPtr<MediaStreamTrack>>(
+            "DOMMediaStream::AddTrackInternal", aOutputStream.mStream,
+            &DOMMediaStream::AddTrackInternal, domTrack));
+  } else {
+    aOutputStream.mStream->AddTrackInternal(domTrack);
+  }
+
+  LOG(LogLevel::Debug,
+      ("Created %s track %p from track %p through MediaInputPort %p",
+       inputTrack->AsAudioStreamTrack() ? "audio" : "video", domTrack.get(),
+       inputTrack, port.get()));
+}
+
+void HTMLMediaElement::DiscardFinishWhenEndedOutputStreams() {
+  // Discard all output streams that have finished now.
   for (int32_t i = mOutputStreams.Length() - 1; i >= 0; --i) {
     if (!mOutputStreams[i].mFinishWhenEnded) {
       continue;
     }
-
-    if (!mOutputStreams[i].mFinishWhenEndedLoadingSrc &&
-        !mOutputStreams[i].mFinishWhenEndedAttrStream) {
-      // This finish-when-ended stream has not seen any source loaded yet.
-      // Update the loading src if it's time.
-      if (!IsPlaybackEnded()) {
-        if (mLoadingSrc) {
-          mOutputStreams[i].mFinishWhenEndedLoadingSrc = mLoadingSrc;
-        } else if (mSrcAttrStream) {
-          mOutputStreams[i].mFinishWhenEndedAttrStream = mSrcAttrStream;
-        }
-      }
-      continue;
-    }
-
-    // Discard finish-when-ended output streams with a loading src set as
-    // needed.
-    if (!IsPlaybackEnded() &&
-        mLoadingSrc == mOutputStreams[i].mFinishWhenEndedLoadingSrc) {
-      continue;
-    }
-    if (!IsPlaybackEnded() &&
-        mSrcAttrStream == mOutputStreams[i].mFinishWhenEndedAttrStream) {
-      continue;
-    }
     LOG(LogLevel::Debug,
-        ("Playback ended or source changed. Discarding stream %p",
+        ("Playback ended. Letting output stream %p go inactive",
          mOutputStreams[i].mStream.get()));
+    mOutputStreams[i].mStream->SetFinishedOnInactive(true);
+    if (mOutputStreams[i].mCapturingDecoder) {
+      mDecoder->RemoveOutputStream(mOutputStreams[i].mStream);
+    }
     mOutputStreams.RemoveElementAt(i);
-    if (mOutputStreams.IsEmpty()) {
-      mTracksCaptured = nullptr;
-    }
-  }
-
-  // Finally add new MediaTracks.
-  for (const auto& mediaTrack : mediaTracksToAdd) {
-    nsAutoString id;
-    mediaTrack->GetId(id);
-
-    MediaSegment::Type type;
-    if (mediaTrack->AsAudioTrack()) {
-      type = MediaSegment::AUDIO;
-    } else if (mediaTrack->AsVideoTrack()) {
-      type = MediaSegment::VIDEO;
-    } else {
-      MOZ_CRASH("Unknown track type");
-    }
-
-    RefPtr<ProcessedMediaTrack> track;
-    RefPtr<MediaElementTrackSource> source;
-    if (mDecoder) {
-      track = mTracksCaptured.Ref()->mTrack->Graph()->CreateForwardedInputTrack(
-          type);
-      RefPtr<nsIPrincipal> principal = GetCurrentPrincipal();
-      if (!principal || IsCORSSameOrigin()) {
-        principal = NodePrincipal();
-      }
-      source = MakeAndAddRef<MediaElementTrackSource>(track, principal);
-      mDecoder->AddOutputTrack(track);
-    } else if (mSrcStream) {
-      MediaStreamTrack* inputTrack;
-      if (AudioTrack* t = mediaTrack->AsAudioTrack()) {
-        inputTrack = t->GetAudioStreamTrack();
-      } else if (VideoTrack* t = mediaTrack->AsVideoTrack()) {
-        inputTrack = t->GetVideoStreamTrack();
-      } else {
-        MOZ_CRASH("Unknown track type");
-      }
-      MOZ_ASSERT(inputTrack);
-      if (!inputTrack) {
-        NS_ERROR("Input track not found in source stream");
-        return;
-      }
-      MOZ_DIAGNOSTIC_ASSERT(!inputTrack->Ended());
-
-      track = inputTrack->Graph()->CreateForwardedInputTrack(type);
-      RefPtr<MediaInputPort> port = inputTrack->ForwardTrackContentsTo(track);
-      source = MakeAndAddRef<MediaElementTrackSource>(&inputTrack->GetSource(),
-                                                      track, port);
-
-      // Track is muted initially, so we don't leak data if it's added while
-      // paused and an MTG iteration passes before the mute comes into effect.
-      source->SetEnabled(mSrcStreamIsPlaying);
-    } else {
-      MOZ_CRASH("Unknown source");
-    }
-
-    LOG(LogLevel::Debug, ("Adding output track source %p for track %s",
-                          source.get(), NS_ConvertUTF16toUTF8(id).get()));
-
-    track->QueueSetAutoend(false);
-    MOZ_DIAGNOSTIC_ASSERT(!mOutputTrackSources.GetWeak(id));
-    mOutputTrackSources.Put(id, source);
-
-    // Add the new track source to any existing output streams
-    for (OutputMediaStream& ms : mOutputStreams) {
-      if (source->Track()->mType == MediaSegment::VIDEO &&
-          ms.mCapturingAudioOnly) {
-        // If the output stream is for audio only we ignore video sources.
-        continue;
-      }
-      AddOutputTrackSourceToOutputStream(source, ms);
-    }
   }
 }
 
 bool HTMLMediaElement::CanBeCaptured(StreamCaptureType aCaptureType) {
   // Don't bother capturing when the document has gone away
   nsPIDOMWindowInner* window = OwnerDoc()->GetInnerWindow();
   if (!window) {
     return false;
@@ -3408,89 +3286,91 @@ bool HTMLMediaElement::CanBeCaptured(Str
     return false;
   }
   return true;
 }
 
 already_AddRefed<DOMMediaStream> HTMLMediaElement::CaptureStreamInternal(
     StreamCaptureBehavior aFinishBehavior, StreamCaptureType aStreamCaptureType,
     MediaTrackGraph* aGraph) {
+  MOZ_RELEASE_ASSERT(aGraph);
   MOZ_ASSERT(CanBeCaptured(aStreamCaptureType));
 
   MarkAsContentSource(CallerAPI::CAPTURE_STREAM);
   MarkAsTainted();
 
-  if (mTracksCaptured.Ref() &&
-      aGraph != mTracksCaptured.Ref()->mTrack->Graph()) {
+  // We don't support routing to a different graph.
+  if (!mOutputStreams.IsEmpty() &&
+      aGraph != mOutputStreams[0].mGraphKeepAliveDummyStream->mTrack->Graph()) {
     return nullptr;
   }
 
-  if (!mTracksCaptured.Ref()) {
-    // This is the first output stream, or there are no tracks. If the former,
-    // start capturing all tracks. If the latter, they will be added later.
-    mTracksCaptured = MakeRefPtr<SharedDummyTrack>(
-        aGraph->CreateSourceTrack(MediaSegment::AUDIO));
-    UpdateOutputTrackSources();
-  }
-
+  OutputMediaStream* out = mOutputStreams.AppendElement();
   nsPIDOMWindowInner* window = OwnerDoc()->GetInnerWindow();
-  OutputMediaStream* out = mOutputStreams.AppendElement(OutputMediaStream(
-      MakeRefPtr<DOMMediaStream>(window),
-      aStreamCaptureType == StreamCaptureType::CAPTURE_AUDIO,
-      aFinishBehavior == StreamCaptureBehavior::FINISH_WHEN_ENDED));
-
-  if (aFinishBehavior == StreamCaptureBehavior::FINISH_WHEN_ENDED &&
-      !mOutputTrackSources.IsEmpty()) {
-    // This output stream won't receive any more tracks when playback of the
-    // current src of this media element ends, or when the src of this media
-    // element changes. If we're currently playing something (i.e., if there are
-    // tracks currently captured), set the current src on the output stream so
-    // this can be tracked. If we're not playing anything,
-    // UpdateOutputTrackSources will set the current src when it becomes
-    // available later.
-    if (mLoadingSrc) {
-      out->mFinishWhenEndedLoadingSrc = mLoadingSrc;
-    }
-    if (mSrcAttrStream) {
-      out->mFinishWhenEndedAttrStream = mSrcAttrStream;
-    }
-    MOZ_ASSERT(out->mFinishWhenEndedLoadingSrc ||
-               out->mFinishWhenEndedAttrStream);
-  }
+  out->mGraphKeepAliveDummyStream =
+      mOutputStreams.Length() == 1
+          ? MakeRefPtr<SharedDummyTrack>(
+                aGraph->CreateSourceTrack(MediaSegment::AUDIO))
+          : mOutputStreams[0].mGraphKeepAliveDummyStream;
+  out->mStream = MakeAndAddRef<DOMMediaStream>(window);
+  out->mStream->SetFinishedOnInactive(false);
+  out->mFinishWhenEnded =
+      aFinishBehavior == StreamCaptureBehavior::FINISH_WHEN_ENDED;
+  out->mCapturingAudioOnly =
+      aStreamCaptureType == StreamCaptureType::CAPTURE_AUDIO;
 
   if (aStreamCaptureType == StreamCaptureType::CAPTURE_AUDIO) {
     if (mSrcStream) {
       // We don't support applying volume and mute to the captured stream, when
       // capturing a MediaStream.
       ReportToConsole(nsIScriptError::errorFlag,
                       "MediaElementAudioCaptureOfMediaStreamError");
     }
 
     // mAudioCaptured tells the user that the audio played by this media element
     // is being routed to the captureStreams *instead* of being played to
     // speakers.
     mAudioCaptured = true;
   }
 
-  for (const auto& entry : mOutputTrackSources) {
-    const RefPtr<MediaElementTrackSource>& source = entry.GetData();
-    if (source->Track()->mType == MediaSegment::VIDEO) {
+  if (mDecoder) {
+    out->mCapturingDecoder = true;
+    mDecoder->AddOutputStream(out->mStream, out->mGraphKeepAliveDummyStream);
+  } else if (mSrcStream) {
+    out->mCapturingMediaStream = true;
+  }
+
+  if (mReadyState == HAVE_NOTHING) {
+    // Do not expose the tracks until we have metadata.
+    RefPtr<DOMMediaStream> result = out->mStream;
+    return result.forget();
+  }
+
+  if (mSrcStream) {
+    MOZ_DIAGNOSTIC_ASSERT(AudioTracks(), "Element can't have been unlinked");
+    for (size_t i = 0; i < AudioTracks()->Length(); ++i) {
+      AudioTrack* t = (*AudioTracks())[i];
+      if (t->Enabled()) {
+        AddCaptureMediaTrackToOutputStream(t, *out, false);
+      }
+    }
+    if (IsVideo() && !out->mCapturingAudioOnly) {
+      MOZ_DIAGNOSTIC_ASSERT(VideoTracks(), "Element can't have been unlinked");
       // Only add video tracks if we're a video element and the output stream
       // wants video.
-      if (!IsVideo()) {
-        continue;
-      }
-      if (out->mCapturingAudioOnly) {
-        continue;
+      for (size_t i = 0; i < VideoTracks()->Length(); ++i) {
+        VideoTrack* t = (*VideoTracks())[i];
+        if (t->Selected()) {
+          AddCaptureMediaTrackToOutputStream(t, *out, false);
+        }
       }
     }
-    AddOutputTrackSourceToOutputStream(source, *out, AddTrackMode::SYNC);
-  }
-
-  return do_AddRef(out->mStream);
+  }
+  RefPtr<DOMMediaStream> result = out->mStream;
+  return result.forget();
 }
 
 already_AddRefed<DOMMediaStream> HTMLMediaElement::CaptureAudio(
     ErrorResult& aRv, MediaTrackGraph* aGraph) {
   MOZ_RELEASE_ASSERT(aGraph);
 
   if (!CanBeCaptured(StreamCaptureType::CAPTURE_AUDIO)) {
     aRv.Throw(NS_ERROR_FAILURE);
@@ -3766,17 +3646,17 @@ HTMLMediaElement::HTMLMediaElement(
     : nsGenericHTMLElement(std::move(aNodeInfo)),
       mWatchManager(this,
                     OwnerDoc()->AbstractMainThreadFor(TaskCategory::Other)),
       mMainThreadEventTarget(OwnerDoc()->EventTargetFor(TaskCategory::Other)),
       mAbstractMainThread(
           OwnerDoc()->AbstractMainThreadFor(TaskCategory::Other)),
       mShutdownObserver(new ShutdownObserver),
       mPlayed(new TimeRanges(ToSupports(OwnerDoc()))),
-      mTracksCaptured(nullptr, "HTMLMediaElement::mTracksCaptured"),
+      mPaused(true, "HTMLMediaElement::mPaused"),
       mErrorSink(new ErrorSink(this)),
       mAudioChannelWrapper(new AudioChannelAgentCallback(this)),
       mSink(MakePair(nsString(), RefPtr<AudioDeviceInfo>())),
       mShowPoster(IsVideo()) {
   MOZ_ASSERT(mMainThreadEventTarget);
   MOZ_ASSERT(mAbstractMainThread);
   // Please don't add anything to this constructor or the initialization
   // list that can cause AddRef to be called. This prevents subclasses
@@ -3794,27 +3674,16 @@ void HTMLMediaElement::Init() {
 
   mAudioTrackList = new AudioTrackList(OwnerDoc()->GetParentObject(), this);
   mVideoTrackList = new VideoTrackList(OwnerDoc()->GetParentObject(), this);
 
   DecoderDoctorLogger::LogConstruction(this);
 
   mWatchManager.Watch(mPaused, &HTMLMediaElement::UpdateWakeLock);
 
-  mWatchManager.Watch(mTracksCaptured,
-                      &HTMLMediaElement::UpdateOutputTrackSources);
-  mWatchManager.Watch(mReadyState, &HTMLMediaElement::UpdateOutputTrackSources);
-
-  mWatchManager.Watch(mDownloadSuspendedByCache,
-                      &HTMLMediaElement::UpdateReadyStateInternal);
-  mWatchManager.Watch(mFirstFrameLoaded,
-                      &HTMLMediaElement::UpdateReadyStateInternal);
-  mWatchManager.Watch(mSrcStreamPlaybackEnded,
-                      &HTMLMediaElement::UpdateReadyStateInternal);
-
   ErrorResult rv;
 
   double defaultVolume = Preferences::GetFloat("media.default_volume", 1.0);
   SetVolume(defaultVolume, rv);
 
   RegisterActivityObserver();
   NotifyOwnerDocumentActivityChanged();
 
@@ -3830,18 +3699,16 @@ void HTMLMediaElement::Init() {
 
 HTMLMediaElement::~HTMLMediaElement() {
   MOZ_ASSERT(mInitialized,
              "HTMLMediaElement must be initialized before it is destroyed.");
   NS_ASSERTION(
       !mHasSelfReference,
       "How can we be destroyed if we're still holding a self reference?");
 
-  mWatchManager.Shutdown();
-
   mShutdownObserver->Unsubscribe();
 
   if (mVideoFrameContainer) {
     mVideoFrameContainer->ForgetElement();
   }
   UnregisterActivityObserver();
 
   mSetCDMRequest.DisconnectIfExists();
@@ -4174,16 +4041,24 @@ void HTMLMediaElement::ReleaseAudioWakeL
     mWakeLock->Unlock(rv);
     rv.SuppressException();
     mWakeLock = nullptr;
   }
 }
 
 void HTMLMediaElement::WakeLockRelease() { ReleaseAudioWakeLockIfExists(); }
 
+HTMLMediaElement::OutputMediaStream::OutputMediaStream()
+    : mFinishWhenEnded(false),
+      mCapturingAudioOnly(false),
+      mCapturingDecoder(false),
+      mCapturingMediaStream(false) {}
+
+HTMLMediaElement::OutputMediaStream::~OutputMediaStream() = default;
+
 void HTMLMediaElement::GetEventTargetParent(EventChainPreVisitor& aVisitor) {
   if (!this->Controls() || !aVisitor.mEvent->mFlags.mIsTrusted) {
     nsGenericHTMLElement::GetEventTargetParent(aVisitor);
     return;
   }
 
   HTMLInputElement* el = nullptr;
   nsCOMPtr<nsINode> node;
@@ -4766,16 +4641,26 @@ nsresult HTMLMediaElement::FinishDecoder
                [](const GenericPromise::ResolveOrRejectValue& aValue) {
                  MOZ_ASSERT(aValue.IsResolve() && !aValue.ResolveValue());
                });
 #else
         ;
 #endif
   }
 
+  for (OutputMediaStream& ms : mOutputStreams) {
+    if (ms.mCapturingMediaStream) {
+      MOZ_ASSERT(!ms.mCapturingDecoder);
+      continue;
+    }
+
+    ms.mCapturingDecoder = true;
+    aDecoder->AddOutputStream(ms.mStream, ms.mGraphKeepAliveDummyStream);
+  }
+
   if (mMediaKeys) {
     if (mMediaKeys->GetCDMProxy()) {
       mDecoder->SetCDMProxy(mMediaKeys->GetCDMProxy());
     } else {
       // CDM must have crashed.
       ShutdownDecoder();
       return NS_ERROR_FAILURE;
     }
@@ -4899,16 +4784,17 @@ class HTMLMediaElement::MediaStreamTrack
 
     if (mElement->IsPlaybackEnded()) {
       return;
     }
     LOG(LogLevel::Debug, ("%p, mSrcStream %p became inactive", mElement.get(),
                           mElement->mSrcStream.get()));
 
     mElement->PlaybackEnded();
+    mElement->UpdateReadyStateInternal();
   }
 
   void NotifyInactive() override {
     if (!mElement) {
       return;
     }
 
     if (!mElement->IsVideo()) {
@@ -5086,20 +4972,28 @@ void HTMLMediaElement::EndSrcMediaStream
     mWatchManager.Unwatch(mMediaStreamRenderer->CurrentGraphTime(),
                           &HTMLMediaElement::UpdateSrcStreamTime);
     mMediaStreamRenderer->Shutdown();
     mMediaStreamRenderer = nullptr;
   }
 
   mSrcStream->UnregisterTrackListener(mMediaStreamTrackListener.get());
   mMediaStreamTrackListener = nullptr;
+  mSrcStreamTracksAvailable = false;
   mSrcStreamPlaybackEnded = false;
   mSrcStreamReportPlaybackEnded = false;
   mSrcStreamVideoPrincipal = nullptr;
 
+#ifdef DEBUG
+  for (OutputMediaStream& ms : mOutputStreams) {
+    // These tracks were removed by clearing AudioTracks() and VideoTracks().
+    MOZ_ASSERT(ms.mTracks.IsEmpty());
+  }
+#endif
+
   mSrcStream = nullptr;
 }
 
 static already_AddRefed<AudioTrack> CreateAudioTrack(
     AudioStreamTrack* aStreamTrack, nsIGlobalObject* aOwnerGlobal) {
   nsAutoString id;
   nsAutoString label;
   aStreamTrack->GetId(id);
@@ -5126,17 +5020,17 @@ void HTMLMediaElement::NotifyMediaStream
     const RefPtr<MediaStreamTrack>& aTrack) {
   MOZ_ASSERT(aTrack);
 
   if (aTrack->Ended()) {
     return;
   }
 
 #ifdef DEBUG
-  nsAutoString id;
+  nsString id;
   aTrack->GetId(id);
 
   LOG(LogLevel::Debug, ("%p, Adding %sTrack with id %s", this,
                         aTrack->AsAudioStreamTrack() ? "Audio" : "Video",
                         NS_ConvertUTF16toUTF8(id).get()));
 #endif
 
   if (AudioStreamTrack* t = aTrack->AsAudioStreamTrack()) {
@@ -5156,21 +5050,35 @@ void HTMLMediaElement::NotifyMediaStream
     // New MediaStreamTrack added, set the new added video track as selected
     // video track when there is no selected track.
     if (VideoTracks()->SelectedIndex() == -1) {
       MOZ_ASSERT(!mSelectedVideoStreamTrack);
       videoTrack->SetEnabledInternal(true, dom::MediaTrack::FIRE_NO_EVENTS);
     }
   }
 
-  // The set of enabled AudioTracks and selected video track might have changed.
-  mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
-  mAbstractMainThread->TailDispatcher().AddDirectTask(
-      NewRunnableMethod("HTMLMediaElement::FirstFrameLoaded", this,
-                        &HTMLMediaElement::FirstFrameLoaded));
+  UpdateReadyStateInternal();
+
+  if (!mSrcStreamTracksAvailable) {
+    mAbstractMainThread->Dispatch(NS_NewRunnableFunction(
+        "HTMLMediaElement::NotifyMediaStreamTrackAdded->FirstFrameLoaded",
+        [this, self = RefPtr<HTMLMediaElement>(this), stream = mSrcStream]() {
+          if (!mSrcStream || mSrcStream != stream) {
+            return;
+          }
+
+          LOG(LogLevel::Debug,
+              ("MediaElement %p MediaStream tracks available", this));
+
+          mSrcStreamTracksAvailable = true;
+
+          FirstFrameLoaded();
+          UpdateReadyStateInternal();
+        }));
+  }
 }
 
 void HTMLMediaElement::NotifyMediaStreamTrackRemoved(
     const RefPtr<MediaStreamTrack>& aTrack) {
   MOZ_ASSERT(aTrack);
 
   nsAutoString id;
   aTrack->GetId(id);
@@ -5206,32 +5114,24 @@ void HTMLMediaElement::ProcessMediaFragm
     mFragmentStart = parser.GetStartTime();
   }
 }
 
 void HTMLMediaElement::MetadataLoaded(const MediaInfo* aInfo,
                                       UniquePtr<const MetadataTags> aTags) {
   MOZ_ASSERT(NS_IsMainThread());
 
-  if (mDecoder) {
-    ConstructMediaTracks(aInfo);
-  }
-
   SetMediaInfo(*aInfo);
 
   mIsEncrypted =
       aInfo->IsEncrypted() || mPendingEncryptedInitData.IsEncrypted();
   mTags = std::move(aTags);
   mLoadedDataFired = false;
   ChangeReadyState(HAVE_METADATA);
 
-  // Add output tracks synchronously now to be sure they're available in
-  // "loadedmetadata" event handlers.
-  UpdateOutputTrackSources();
-
   DispatchAsyncEvent(NS_LITERAL_STRING("durationchange"));
   if (IsVideo() && HasVideo()) {
     DispatchAsyncEvent(NS_LITERAL_STRING("resize"));
   }
   NS_ASSERTION(!HasVideo() || (mMediaInfo.mVideo.mDisplay.width > 0 &&
                                mMediaInfo.mVideo.mDisplay.height > 0),
                "Video resolution must be known on 'loadedmetadata'");
   DispatchAsyncEvent(NS_LITERAL_STRING("loadedmetadata"));
@@ -5262,28 +5162,54 @@ void HTMLMediaElement::MetadataLoaded(co
     NotifyOwnerDocumentActivityChanged();
   }
 
   if (mDefaultPlaybackStartPosition != 0.0) {
     SetCurrentTime(mDefaultPlaybackStartPosition);
     mDefaultPlaybackStartPosition = 0.0;
   }
 
-  mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
+  UpdateReadyStateInternal();
+
+  if (!mSrcStream) {
+    return;
+  }
+
+  for (OutputMediaStream& ms : mOutputStreams) {
+    if (AudioTracks()) {
+      for (size_t i = 0; i < AudioTracks()->Length(); ++i) {
+        AudioTrack* t = (*AudioTracks())[i];
+        if (t->Enabled()) {
+          AddCaptureMediaTrackToOutputStream(t, ms);
+        }
+      }
+    }
+    if (VideoTracks() && IsVideo() && !ms.mCapturingAudioOnly) {
+      // Only add video tracks if we're a video element and the output stream
+      // wants video.
+      for (size_t i = 0; i < VideoTracks()->Length(); ++i) {
+        VideoTrack* t = (*VideoTracks())[i];
+        if (t->Selected()) {
+          AddCaptureMediaTrackToOutputStream(t, ms);
+        }
+      }
+    }
+  }
 }
 
 void HTMLMediaElement::FirstFrameLoaded() {
   LOG(LogLevel::Debug,
       ("%p, FirstFrameLoaded() mFirstFrameLoaded=%d mWaitingForKey=%d", this,
-       mFirstFrameLoaded.Ref(), mWaitingForKey));
+       mFirstFrameLoaded, mWaitingForKey));
 
   NS_ASSERTION(!mSuspendedAfterFirstFrame, "Should not have already suspended");
 
   if (!mFirstFrameLoaded) {
     mFirstFrameLoaded = true;
+    UpdateReadyStateInternal();
   }
 
   ChangeDelayLoadStatus(false);
 
   if (mDecoder && mAllowSuspendAfterFirstFrame && mPaused &&
       !HasAttr(kNameSpaceID_None, nsGkAtoms::autoplay) &&
       mPreloadAction == HTMLMediaElement::PRELOAD_METADATA) {
     mSuspendedAfterFirstFrame = true;
@@ -5303,16 +5229,22 @@ void HTMLMediaElement::DecodeError(const
   nsAutoString src;
   GetCurrentSrc(src);
   AutoTArray<nsString, 1> params = {src};
   ReportLoadError("MediaLoadDecodeError", params);
 
   DecoderDoctorDiagnostics diagnostics;
   diagnostics.StoreDecodeError(OwnerDoc(), aError, src, __func__);
 
+  if (AudioTracks()) {
+    AudioTracks()->EmptyTracks();
+  }
+  if (VideoTracks()) {
+    VideoTracks()->EmptyTracks();
+  }
   if (mIsLoadingFromSourceChildren) {
     mErrorSink->ResetError();
     if (mSourceLoadCandidate) {
       DispatchAsyncSourceError(mSourceLoadCandidate);
       QueueLoadFromSourceTask();
     } else {
       NS_WARNING("Should know the source we were loading from!");
     }
@@ -5343,18 +5275,17 @@ void HTMLMediaElement::Error(uint16_t aE
 
 void HTMLMediaElement::PlaybackEnded() {
   // We changed state which can affect AddRemoveSelfReference
   AddRemoveSelfReference();
 
   NS_ASSERTION(!mDecoder || mDecoder->IsEnded(),
                "Decoder fired ended, but not in ended state");
 
-  // IsPlaybackEnded() became true.
-  mWatchManager.ManualNotify(&HTMLMediaElement::UpdateOutputTrackSources);
+  DiscardFinishWhenEndedOutputStreams();
 
   if (mSrcStream) {
     LOG(LogLevel::Debug,
         ("%p, got duration by reaching the end of the resource", this));
     mSrcStreamPlaybackEnded = true;
     DispatchAsyncEvent(NS_LITERAL_STRING("durationchange"));
   } else {
     // mediacapture-main:
@@ -5426,16 +5357,17 @@ void HTMLMediaElement::SeekAborted() {
           promise->MaybeReject(NS_ERROR_DOM_ABORT_ERR);
         }));
   }
   MOZ_ASSERT(!mSeekDOMPromise);
 }
 
 void HTMLMediaElement::NotifySuspendedByCache(bool aSuspendedByCache) {
   mDownloadSuspendedByCache = aSuspendedByCache;
+  UpdateReadyStateInternal();
 }
 
 void HTMLMediaElement::DownloadSuspended() {
   if (mNetworkState == NETWORK_LOADING) {
     DispatchAsyncEvent(NS_LITERAL_STRING("progress"));
   }
   ChangeNetworkState(NETWORK_IDLE);
 }
@@ -5480,17 +5412,17 @@ void HTMLMediaElement::CheckProgress(boo
       // Were stalled.  Restart timer.
       StartProgressTimer();
       if (!mLoadedDataFired) {
         ChangeDelayLoadStatus(true);
       }
     }
     // Download statistics may have been updated, force a recheck of the
     // readyState.
-    mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
+    UpdateReadyStateInternal();
   }
 
   if (now - mDataTime >= TimeDuration::FromMilliseconds(STALL_MS)) {
     if (!mMediaSource) {
       DispatchAsyncEvent(NS_LITERAL_STRING("stalled"));
     } else {
       ChangeDelayLoadStatus(false);
     }
@@ -5573,22 +5505,24 @@ void HTMLMediaElement::UpdateReadyStateI
     // on its own thread before MetadataLoaded gets a chance to run.
     // The arrival of more data can't change us out of this readyState.
     LOG(LogLevel::Debug, ("MediaElement %p UpdateReadyStateInternal() "
                           "Decoder ready state < HAVE_METADATA",
                           this));
     return;
   }
 
-  if (mDecoder) {
-    // IsPlaybackEnded() might have become false.
-    mWatchManager.ManualNotify(&HTMLMediaElement::UpdateOutputTrackSources);
-  }
-
   if (mSrcStream && mReadyState < HAVE_METADATA) {
+    if (!mSrcStreamTracksAvailable) {
+      LOG(LogLevel::Debug, ("MediaElement %p UpdateReadyStateInternal() "
+                            "MediaStreamTracks not available yet",
+                            this));
+      return;
+    }
+
     bool hasAudioTracks = AudioTracks() && !AudioTracks()->IsEmpty();
     bool hasVideoTracks = VideoTracks() && !VideoTracks()->IsEmpty();
     if (!hasAudioTracks && !hasVideoTracks) {
       LOG(LogLevel::Debug, ("MediaElement %p UpdateReadyStateInternal() "
                             "Stream with no tracks",
                             this));
       // Give it one last chance to remove the self reference if needed.
       AddRemoveSelfReference();
@@ -6166,24 +6100,18 @@ already_AddRefed<nsIPrincipal> HTMLMedia
   }
   return nullptr;
 }
 
 void HTMLMediaElement::NotifyDecoderPrincipalChanged() {
   RefPtr<nsIPrincipal> principal = GetCurrentPrincipal();
   bool isSameOrigin = !principal || IsCORSSameOrigin();
   mDecoder->UpdateSameOriginStatus(isSameOrigin);
-
-  if (isSameOrigin) {
-    principal = NodePrincipal();
-  }
-  for (const auto& entry : mOutputTrackSources) {
-    entry.GetData()->SetPrincipal(principal);
-  }
-  mDecoder->SetOutputTracksPrincipal(principal);
+  mDecoder->SetOutputStreamPrincipal(isSameOrigin ? NodePrincipal()
+                                                  : principal.get());
 }
 
 void HTMLMediaElement::Invalidate(bool aImageSizeChanged,
                                   Maybe<nsIntSize>& aNewIntrinsicSize,
                                   bool aForceInvalidate) {
   nsIFrame* frame = GetPrimaryFrame();
   if (aNewIntrinsicSize) {
     UpdateMediaSize(aNewIntrinsicSize.value());
@@ -6214,17 +6142,17 @@ void HTMLMediaElement::UpdateMediaSize(c
   MOZ_ASSERT(NS_IsMainThread());
 
   if (IsVideo() && mReadyState != HAVE_NOTHING &&
       mMediaInfo.mVideo.mDisplay != aSize) {
     DispatchAsyncEvent(NS_LITERAL_STRING("resize"));
   }
 
   mMediaInfo.mVideo.mDisplay = aSize;
-  mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
+  UpdateReadyStateInternal();
 
   if (mFirstFrameListener) {
     mSelectedVideoStreamTrack->RemoveVideoOutput(mFirstFrameListener);
     // The first-frame listener won't be needed again for this stream.
     mFirstFrameListener = nullptr;
   }
 }
 
@@ -6957,19 +6885,17 @@ void HTMLMediaElement::NotifyWaitingForK
   // 7.3.4 Queue a "waitingforkey" Event
   // 1. Let the media element be the specified HTMLMediaElement object.
   // 2. If the media element's waiting for key value is true, abort these steps.
   if (mWaitingForKey == NOT_WAITING_FOR_KEY) {
     // 3. Set the media element's waiting for key value to true.
     // Note: algorithm continues in UpdateReadyStateInternal() when all decoded
     // data enqueued in the MDSM is consumed.
     mWaitingForKey = WAITING_FOR_KEY;
-    // mWaitingForKey changed outside of UpdateReadyStateInternal. This may
-    // affect mReadyState.
-    mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
+    UpdateReadyStateInternal();
   }
 }
 
 AudioTrackList* HTMLMediaElement::AudioTracks() { return mAudioTrackList; }
 
 VideoTrackList* HTMLMediaElement::VideoTracks() { return mVideoTrackList; }
 
 TextTrackList* HTMLMediaElement::GetTextTracks() {
@@ -6997,19 +6923,17 @@ TextTrackManager* HTMLMediaElement::GetO
   return mTextTrackManager;
 }
 
 MediaDecoderOwner::NextFrameStatus HTMLMediaElement::NextFrameStatus() {
   if (mDecoder) {
     return mDecoder->NextFrameStatus();
   }
   if (mSrcStream) {
-    AutoTArray<RefPtr<MediaTrack>, 4> tracks;
-    GetAllEnabledMediaTracks(tracks);
-    if (!tracks.IsEmpty() && !mSrcStreamPlaybackEnded) {
+    if (mSrcStreamTracksAvailable && !mSrcStreamPlaybackEnded) {
       return NEXT_FRAME_AVAILABLE;
     }
     return NEXT_FRAME_UNAVAILABLE;
   }
   return NEXT_FRAME_UNINITIALIZED;
 }
 
 void HTMLMediaElement::SetDecoder(MediaDecoder* aDecoder) {
@@ -7166,30 +7090,24 @@ void HTMLMediaElement::AudioCaptureTrack
     RefPtr<DOMMediaStream> stream =
         CaptureStreamInternal(StreamCaptureBehavior::CONTINUE_WHEN_ENDED,
                               StreamCaptureType::CAPTURE_AUDIO, mtg);
     mStreamWindowCapturer =
         MakeUnique<MediaStreamWindowCapturer>(stream, window->WindowID());
   } else if (!aCapture && mStreamWindowCapturer) {
     for (size_t i = 0; i < mOutputStreams.Length(); i++) {
       if (mOutputStreams[i].mStream == mStreamWindowCapturer->mStream) {
-        // We own this MediaStream, it is not exposed to JS.
-        AutoTArray<RefPtr<MediaStreamTrack>, 2> tracks;
-        mStreamWindowCapturer->mStream->GetTracks(tracks);
-        for (auto& track : tracks) {
-          track->Stop();
+        if (mOutputStreams[i].mCapturingDecoder && mDecoder) {
+          mDecoder->RemoveOutputStream(mOutputStreams[i].mStream);
         }
         mOutputStreams.RemoveElementAt(i);
         break;
       }
     }
     mStreamWindowCapturer = nullptr;
-    if (mOutputStreams.IsEmpty()) {
-      mTracksCaptured = nullptr;
-    }
   }
 }
 
 void HTMLMediaElement::NotifyCueDisplayStatesChanged() {
   if (!mTextTrackManager) {
     return;
   }
 
@@ -7318,20 +7236,22 @@ bool HTMLMediaElement::IsAudible() const
   if (mMuted || (std::fabs(Volume()) <= 1e-7)) {
     return false;
   }
 
   return mIsAudioTrackAudible;
 }
 
 void HTMLMediaElement::ConstructMediaTracks(const MediaInfo* aInfo) {
-  if (!aInfo) {
+  if (mMediaTracksConstructed || !aInfo) {
     return;
   }
 
+  mMediaTracksConstructed = true;
+
   AudioTrackList* audioList = AudioTracks();
   if (audioList && aInfo->HasAudio()) {
     const TrackInfo& info = aInfo->mAudio;
     RefPtr<AudioTrack> track = MediaTrackList::CreateAudioTrack(
         audioList->GetOwnerGlobal(), info.mId, info.mKind, info.mLabel,
         info.mLanguage, info.mEnabled);
 
     audioList->AddTrack(track);
@@ -7348,19 +7268,22 @@ void HTMLMediaElement::ConstructMediaTra
     track->SetEnabledInternal(info.mEnabled, MediaTrack::FIRE_NO_EVENTS);
   }
 }
 
 void HTMLMediaElement::RemoveMediaTracks() {
   if (mAudioTrackList) {
     mAudioTrackList->RemoveTracks();
   }
+
   if (mVideoTrackList) {
     mVideoTrackList->RemoveTracks();
   }
+
+  mMediaTracksConstructed = false;
 }
 
 class MediaElementGMPCrashHelper : public GMPCrashHelper {
  public:
   explicit MediaElementGMPCrashHelper(HTMLMediaElement* aElement)
       : mElement(aElement) {
     MOZ_ASSERT(NS_IsMainThread());  // WeakPtr isn't thread safe.
   }
--- a/dom/html/HTMLMediaElement.h
+++ b/dom/html/HTMLMediaElement.h
@@ -108,36 +108,16 @@ class HTMLMediaElement : public nsGeneri
  public:
   typedef mozilla::TimeStamp TimeStamp;
   typedef mozilla::layers::ImageContainer ImageContainer;
   typedef mozilla::VideoFrameContainer VideoFrameContainer;
   typedef mozilla::MediaResource MediaResource;
   typedef mozilla::MediaDecoderOwner MediaDecoderOwner;
   typedef mozilla::MetadataTags MetadataTags;
 
-  // Helper struct to keep track of the MediaStreams returned by
-  // mozCaptureStream(). For each OutputMediaStream, dom::MediaTracks get
-  // captured into MediaStreamTracks which get added to
-  // OutputMediaStream::mStream.
-  struct OutputMediaStream {
-    OutputMediaStream(RefPtr<DOMMediaStream> aStream, bool aCapturingAudioOnly,
-                      bool aFinishWhenEnded);
-    ~OutputMediaStream();
-
-    RefPtr<DOMMediaStream> mStream;
-    const bool mCapturingAudioOnly;
-    const bool mFinishWhenEnded;
-    // If mFinishWhenEnded is true, this is the URI of the first resource
-    // mStream got tracks for, if not a MediaStream.
-    nsCOMPtr<nsIURI> mFinishWhenEndedLoadingSrc;
-    // If mFinishWhenEnded is true, this is the first MediaStream mStream got
-    // tracks for, if not a resource.
-    RefPtr<DOMMediaStream> mFinishWhenEndedAttrStream;
-  };
-
   MOZ_DECLARE_WEAKREFERENCE_TYPENAME(HTMLMediaElement)
   NS_DECL_NSIMUTATIONOBSERVER_CONTENTREMOVED
 
   CORSMode GetCORSMode() { return mCORSMode; }
 
   explicit HTMLMediaElement(
       already_AddRefed<mozilla::dom::NodeInfo>&& aNodeInfo);
   void Init();
@@ -266,19 +246,17 @@ class HTMLMediaElement : public nsGeneri
   void PrincipalHandleChangedForVideoFrameContainer(
       VideoFrameContainer* aContainer,
       const PrincipalHandle& aNewPrincipalHandle) override;
 
   // Dispatch events
   void DispatchAsyncEvent(const nsAString& aName) final;
 
   // Triggers a recomputation of readyState.
-  void UpdateReadyState() override {
-    mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
-  }
+  void UpdateReadyState() override { UpdateReadyStateInternal(); }
 
   // Dispatch events that were raised while in the bfcache
   nsresult DispatchPendingMediaEvents();
 
   // Return true if we can activate autoplay assuming enough data has arrived.
   bool CanActivateAutoplay();
 
   // Notify that state has changed that might cause an autoplay element to
@@ -710,16 +688,20 @@ class HTMLMediaElement : public nsGeneri
     CREATE_PATTERN,
     CREATE_IMAGEBITMAP,
     CAPTURE_STREAM,
   };
   void MarkAsContentSource(CallerAPI aAPI);
 
   Document* GetDocument() const override;
 
+  void ConstructMediaTracks(const MediaInfo* aInfo) override;
+
+  void RemoveMediaTracks() override;
+
   already_AddRefed<GMPCrashHelper> CreateGMPCrashHelper() override;
 
   nsISerialEventTarget* MainThreadEventTarget() {
     return mMainThreadEventTarget;
   }
 
   // Set the sink id (of the output device) that the audio will play. If aSinkId
   // is empty the default device will be set.
@@ -742,27 +724,47 @@ class HTMLMediaElement : public nsGeneri
   bool IsAudible() const;
 
  protected:
   virtual ~HTMLMediaElement();
 
   class AudioChannelAgentCallback;
   class ChannelLoader;
   class ErrorSink;
-  class MediaElementTrackSource;
   class MediaLoadListener;
   class MediaStreamRenderer;
   class MediaStreamTrackListener;
   class FirstFrameListener;
   class ShutdownObserver;
+  class StreamCaptureTrackSource;
 
   MediaDecoderOwner::NextFrameStatus NextFrameStatus();
 
   void SetDecoder(MediaDecoder* aDecoder);
 
+  // Holds references to the DOM wrappers for the MediaStreams that we're
+  // writing to.
+  struct OutputMediaStream {
+    OutputMediaStream();
+    ~OutputMediaStream();
+
+    RefPtr<DOMMediaStream> mStream;
+    // Dummy stream to keep mGraph from shutting down when MediaDecoder shuts
+    // down. Shared across all OutputMediaStreams as one stream is enough to
+    // keep the graph alive.
+    RefPtr<SharedDummyTrack> mGraphKeepAliveDummyStream;
+    bool mFinishWhenEnded;
+    bool mCapturingAudioOnly;
+    bool mCapturingDecoder;
+    bool mCapturingMediaStream;
+
+    // The following members are keeping state for a captured MediaStream.
+    nsTArray<Pair<nsString, RefPtr<MediaStreamTrackSource>>> mTracks;
+  };
+
   void PlayInternal(bool aHandlingUserInput);
 
   /** Use this method to change the mReadyState member, so required
    * events can be fired.
    */
   void ChangeReadyState(nsMediaReadyState aState);
 
   /**
@@ -848,59 +850,52 @@ class HTMLMediaElement : public nsGeneri
 
   /**
    * Called by our DOMMediaStream::TrackListener when a MediaStreamTrack in
    * |mSrcStream|'s playback stream has ended.
    */
   void NotifyMediaStreamTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack);
 
   /**
-   * Convenience method to get in a single list all enabled AudioTracks and, if
-   * this is a video element, the selected VideoTrack.
-   */
-  void GetAllEnabledMediaTracks(nsTArray<RefPtr<MediaTrack>>& aTracks);
-
-  /**
    * Enables or disables all tracks forwarded from mSrcStream to all
    * OutputMediaStreams. We do this for muting the tracks when pausing,
    * and unmuting when playing the media element again.
+   *
+   * If mSrcStream is unset, this does nothing.
    */
   void SetCapturedOutputStreamsEnabled(bool aEnabled);
 
   /**
-   * Create a new MediaStreamTrack for the TrackSource corresponding to aTrack
-   * and add it to the DOMMediaStream in aOutputStream. This automatically sets
-   * the output track to enabled or disabled depending on our current playing
-   * state.
+   * Create a new MediaStreamTrack for aTrack and add it to the DOMMediaStream
+   * in aOutputStream. This automatically sets the output track to enabled or
+   * disabled depending on our current playing state.
    */
-  enum class AddTrackMode { ASYNC, SYNC };
-  void AddOutputTrackSourceToOutputStream(
-      MediaElementTrackSource* aSource, OutputMediaStream& aOutputStream,
-      AddTrackMode aMode = AddTrackMode::ASYNC);
+  void AddCaptureMediaTrackToOutputStream(dom::MediaTrack* aTrack,
+                                          OutputMediaStream& aOutputStream,
+                                          bool aAsyncAddtrack = true);
 
   /**
-   * Creates output track sources when this media element is captured, tracks
-   * exist, playback is not ended and readyState is >= HAVE_METADATA.
+   * Discard all output streams that are flagged to finish when playback ends.
    */
-  void UpdateOutputTrackSources();
+  void DiscardFinishWhenEndedOutputStreams();
 
   /**
    * Returns an DOMMediaStream containing the played contents of this
    * element. When aBehavior is FINISH_WHEN_ENDED, when this element ends
    * playback we will finish the stream and not play any more into it.  When
    * aType is CONTINUE_WHEN_ENDED, ending playback does not finish the stream.
    * The stream will never finish.
    *
    * When aType is CAPTURE_AUDIO, we stop playout of audio and instead route it
    * to the DOMMediaStream. Volume and mute state will be applied to the audio
    * reaching the stream. No video tracks will be captured in this case.
    */
   already_AddRefed<DOMMediaStream> CaptureStreamInternal(
-      StreamCaptureBehavior aFinishBehavior,
-      StreamCaptureType aStreamCaptureType, MediaTrackGraph* aGraph);
+      StreamCaptureBehavior aBehavior, StreamCaptureType aType,
+      MediaTrackGraph* aGraph);
 
   /**
    * Initialize a decoder as a clone of an existing decoder in another
    * element.
    * mLoadingSrc must already be set.
    */
   nsresult InitializeDecoderAsClone(ChannelMediaDecoder* aOriginal);
 
@@ -1249,28 +1244,16 @@ class HTMLMediaElement : public nsGeneri
   // and queues a task to resolve them also to dispatch a "playing" event.
   void NotifyAboutPlaying();
 
   already_AddRefed<Promise> CreateDOMPromise(ErrorResult& aRv) const;
 
   // Pass information for deciding the video decode mode to decoder.
   void NotifyDecoderActivityChanges() const;
 
-  // Constructs an AudioTrack in mAudioTrackList if aInfo reports that audio is
-  // available, and a VideoTrack in mVideoTrackList if aInfo reports that video
-  // is available.
-  void ConstructMediaTracks(const MediaInfo* aInfo);
-
-  // Removes all MediaTracks from mAudioTrackList and mVideoTrackList and fires
-  // "removetrack" on the lists accordingly.
-  // Note that by spec, this should not fire "removetrack". However, it appears
-  // other user agents do, per
-  // https://wpt.fyi/results/media-source/mediasource-avtracks.html.
-  void RemoveMediaTracks();
-
   // Mark the decoder owned by the element as tainted so that the
   // suspend-video-decoder is disabled.
   void MarkAsTainted();
 
   virtual nsresult AfterSetAttr(int32_t aNameSpaceID, nsAtom* aName,
                                 const nsAttrValue* aValue,
                                 const nsAttrValue* aOldValue,
                                 nsIPrincipal* aMaybeScriptedPrincipal,
@@ -1342,16 +1325,19 @@ class HTMLMediaElement : public nsGeneri
   // Holds a reference to the MediaStream that we're actually playing.
   // At most one of mDecoder and mSrcStream can be non-null.
   RefPtr<DOMMediaStream> mSrcStream;
 
   // The MediaStreamRenderer handles rendering of our selected video track, and
   // enabled audio tracks, while mSrcStream is set.
   RefPtr<MediaStreamRenderer> mMediaStreamRenderer;
 
+  // True once mSrcStream's initial set of tracks are known.
+  bool mSrcStreamTracksAvailable = false;
+
   // True once PlaybackEnded() is called and we're playing a MediaStream.
   // Reset to false if we start playing mSrcStream again.
   Watchable<bool> mSrcStreamPlaybackEnded = {
       false, "HTMLMediaElement::mSrcStreamPlaybackEnded"};
 
   // Mirrors mSrcStreamPlaybackEnded after a tail dispatch when set to true,
   // but may be be forced to false directly. To accomodate when an application
   // ends playback synchronously by manipulating mSrcStream or its tracks,
@@ -1361,22 +1347,16 @@ class HTMLMediaElement : public nsGeneri
   // Holds a reference to the stream connecting this stream to the window
   // capture sink.
   UniquePtr<MediaStreamWindowCapturer> mStreamWindowCapturer;
 
   // Holds references to the DOM wrappers for the MediaStreams that we're
   // writing to.
   nsTArray<OutputMediaStream> mOutputStreams;
 
-  // Mapping for output tracks, from dom::MediaTrack ids to the
-  // MediaElementTrackSource that represents the source of all corresponding
-  // MediaStreamTracks captured from this element.
-  nsRefPtrHashtable<nsStringHashKey, MediaElementTrackSource>
-      mOutputTrackSources;
-
   // Holds a reference to the first-frame-getting track listener attached to
   // mSelectedVideoStreamTrack.
   RefPtr<FirstFrameListener> mFirstFrameListener;
   // The currently selected video stream track.
   RefPtr<VideoStreamTrack> mSelectedVideoStreamTrack;
 
   const RefPtr<ShutdownObserver> mShutdownObserver;
 
@@ -1558,34 +1538,26 @@ class HTMLMediaElement : public nsGeneri
   // start playing when loaded. The 'autoplay' attribute of the object
   // is a mirror of the HTML attribute. These are different from this
   // 'mAutoplaying' flag, which indicates whether the current playback
   // is a result of the autoplay attribute.
   bool mAutoplaying = true;
 
   // Playback of the video is paused either due to calling the
   // 'Pause' method, or playback not yet having started.
-  Watchable<bool> mPaused = {true, "HTMLMediaElement::mPaused"};
+  Watchable<bool> mPaused;
 
   // The following two fields are here for the private storage of the builtin
   // video controls, and control 'casting' of the video to external devices
   // (TVs, projectors etc.)
   // True if casting is currently allowed
   bool mAllowCasting = false;
   // True if currently casting this video
   bool mIsCasting = false;
 
-  // Set while there are some OutputMediaStreams this media element's enabled
-  // and selected tracks are captured into. When set, all tracks are captured
-  // into the graph of this dummy track.
-  // NB: This is a SharedDummyTrack to allow non-default graphs (AudioContexts
-  // with an explicit sampleRate defined) to capture this element. When
-  // cross-graph tracks are supported, this can become a bool.
-  Watchable<RefPtr<SharedDummyTrack>> mTracksCaptured;
-
   // True if the sound is being captured.
   bool mAudioCaptured = false;
 
   // If TRUE then the media element was actively playing before the currently
   // in progress seeking. If FALSE then the media element is either not seeking
   // or was not actively playing before the current seek. Used to decide whether
   // to raise the 'waiting' event as per 4.7.1.8 in HTML 5 specification.
   bool mPlayingBeforeSeek = false;
@@ -1677,18 +1649,17 @@ class HTMLMediaElement : public nsGeneri
 
   // Listens for waitingForKey events from the owned decoder.
   MediaEventListener mWaitingForKeyListener;
 
   // Init Data that needs to be sent in 'encrypted' events in MetadataLoaded().
   EncryptionInfo mPendingEncryptedInitData;
 
   // True if the media's channel's download has been suspended.
-  Watchable<bool> mDownloadSuspendedByCache = {
-      false, "HTMLMediaElement::mDownloadSuspendedByCache"};
+  bool mDownloadSuspendedByCache = false;
 
   // Disable the video playback by track selection. This flag might not be
   // enough if we ever expand the ability of supporting multi-tracks video
   // playback.
   bool mDisableVideo = false;
 
   RefPtr<TextTrackManager> mTextTrackManager;
 
@@ -1816,32 +1787,35 @@ class HTMLMediaElement : public nsGeneri
   // True if Init() has been called after construction
   bool mInitialized = false;
 
   // True if user has called load(), seek() or element has started playing
   // before. It's *only* use for checking autoplay policy
   bool mIsBlessed = false;
 
   // True if the first frame has been successfully loaded.
-  Watchable<bool> mFirstFrameLoaded = {false,
-                                       "HTMLMediaElement::mFirstFrameLoaded"};
+  bool mFirstFrameLoaded = false;
 
   // Media elements also have a default playback start position, which must
   // initially be set to zero seconds. This time is used to allow the element to
   // be seeked even before the media is loaded.
   double mDefaultPlaybackStartPosition = 0.0;
 
   // True if media element has been marked as 'tainted' and can't
   // participate in video decoder suspending.
   bool mHasSuspendTaint = false;
 
   // True if media element has been forced into being considered 'hidden'.
   // For use by mochitests. Enabling pref "media.test.video-suspend"
   bool mForcedHidden = false;
 
+  // True if audio tracks and video tracks are constructed and added into the
+  // track list, false if all tracks are removed from the track list.
+  bool mMediaTracksConstructed = false;
+
   Visibility mVisibilityState = Visibility::Untracked;
 
   UniquePtr<ErrorSink> mErrorSink;
 
   // This wrapper will handle all audio channel related stuffs, eg. the
   // operations of tab audio indicator, Fennec's media control. Note:
   // mAudioChannelWrapper might be null after GC happened.
   RefPtr<AudioChannelAgentCallback> mAudioChannelWrapper;
--- a/dom/media/ChannelMediaDecoder.cpp
+++ b/dom/media/ChannelMediaDecoder.cpp
@@ -216,37 +216,23 @@ MediaDecoderStateMachine* ChannelMediaDe
   mReader = DecoderTraits::CreateReader(ContainerType(), init);
   return new MediaDecoderStateMachine(this, mReader);
 }
 
 void ChannelMediaDecoder::Shutdown() {
   mResourceCallback->Disconnect();
   MediaDecoder::Shutdown();
 
+  // Force any outstanding seek and byterange requests to complete
+  // to prevent shutdown from deadlocking.
   if (mResource) {
-    // Force any outstanding seek and byterange requests to complete
-    // to prevent shutdown from deadlocking.
-    mResourceClosePromise = mResource->Close();
+    mResource->Close();
   }
 }
 
-void ChannelMediaDecoder::ShutdownInternal() {
-  if (!mResourceClosePromise) {
-    MediaShutdownManager::Instance().Unregister(this);
-    return;
-  }
-
-  mResourceClosePromise->Then(
-      AbstractMainThread(), __func__,
-      [self = RefPtr<ChannelMediaDecoder>(this)] {
-        MediaShutdownManager::Instance().Unregister(self);
-      });
-  return;
-}
-
 nsresult ChannelMediaDecoder::Load(nsIChannel* aChannel,
                                    bool aIsPrivateBrowsing,
                                    nsIStreamListener** aStreamListener) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(!mResource);
   MOZ_ASSERT(aStreamListener);
   AbstractThread::AutoEnter context(AbstractMainThread());
 
--- a/dom/media/ChannelMediaDecoder.h
+++ b/dom/media/ChannelMediaDecoder.h
@@ -54,17 +54,16 @@ class ChannelMediaDecoder
     // The decoder to send notifications. Main-thread only.
     ChannelMediaDecoder* mDecoder = nullptr;
     nsCOMPtr<nsITimer> mTimer;
     bool mTimerArmed = false;
     const RefPtr<AbstractThread> mAbstractMainThread;
   };
 
  protected:
-  void ShutdownInternal() override;
   void OnPlaybackEvent(MediaPlaybackEvent&& aEvent) override;
   void DurationChanged() override;
   void MetadataLoaded(UniquePtr<MediaInfo> aInfo, UniquePtr<MetadataTags> aTags,
                       MediaDecoderEventVisibility aEventVisibility) override;
   void NotifyPrincipalChanged() override;
 
   RefPtr<ResourceCallback> mResourceCallback;
   RefPtr<BaseMediaResource> mResource;
@@ -152,17 +151,13 @@ class ChannelMediaDecoder
   // start playing back again.
   int64_t mPlaybackPosition = 0;
 
   bool mCanPlayThrough = false;
 
   // True if we've been notified that the ChannelMediaResource has
   // a principal.
   bool mInitialChannelPrincipalKnown = false;
-
-  // Set in Shutdown() when we start closing mResource, if mResource is set.
-  // Must resolve before we unregister the shutdown blocker.
-  RefPtr<GenericPromise> mResourceClosePromise;
 };
 
 }  // namespace mozilla
 
 #endif  // ChannelMediaDecoder_h_
--- a/dom/media/ChannelMediaResource.cpp
+++ b/dom/media/ChannelMediaResource.cpp
@@ -584,25 +584,25 @@ nsresult ChannelMediaResource::SetupChan
     element->SetRequestHeaders(hc);
   } else {
     NS_ASSERTION(aOffset == 0, "Don't know how to seek on this channel type");
     return NS_ERROR_FAILURE;
   }
   return NS_OK;
 }
 
-RefPtr<GenericPromise> ChannelMediaResource::Close() {
+nsresult ChannelMediaResource::Close() {
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
 
   if (!mClosed) {
     CloseChannel();
+    mCacheStream.Close();
     mClosed = true;
-    return mCacheStream.Close();
   }
-  return GenericPromise::CreateAndResolve(true, __func__);
+  return NS_OK;
 }
 
 already_AddRefed<nsIPrincipal> ChannelMediaResource::GetCurrentPrincipal() {
   MOZ_ASSERT(NS_IsMainThread());
   return do_AddRef(mSharedInfo->mPrincipal);
 }
 
 bool ChannelMediaResource::HadCrossOriginRedirects() {
--- a/dom/media/ChannelMediaResource.h
+++ b/dom/media/ChannelMediaResource.h
@@ -112,17 +112,17 @@ class ChannelMediaResource
   void CacheClientResume();
 
   bool IsSuspended();
 
   void ThrottleReadahead(bool bThrottle) override;
 
   // Main thread
   nsresult Open(nsIStreamListener** aStreamListener) override;
-  RefPtr<GenericPromise> Close() override;
+  nsresult Close() override;
   void Suspend(bool aCloseImmediately) override;
   void Resume() override;
   already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override;
   bool HadCrossOriginRedirects() override;
   bool CanClone() override;
   already_AddRefed<BaseMediaResource> CloneData(
       MediaResourceCallback* aDecoder) override;
   nsresult ReadFromCache(char* aBuffer, int64_t aOffset,
--- a/dom/media/CloneableWithRangeMediaResource.cpp
+++ b/dom/media/CloneableWithRangeMediaResource.cpp
@@ -143,19 +143,17 @@ nsresult CloneableWithRangeMediaResource
     nsIStreamListener** aStreamListener) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(aStreamListener);
 
   *aStreamListener = nullptr;
   return NS_OK;
 }
 
-RefPtr<GenericPromise> CloneableWithRangeMediaResource::Close() {
-  return GenericPromise::CreateAndResolve(true, __func__);
-}
+nsresult CloneableWithRangeMediaResource::Close() { return NS_OK; }
 
 already_AddRefed<nsIPrincipal>
 CloneableWithRangeMediaResource::GetCurrentPrincipal() {
   MOZ_ASSERT(NS_IsMainThread());
 
   nsCOMPtr<nsIPrincipal> principal;
   nsIScriptSecurityManager* secMan = nsContentUtils::GetSecurityManager();
   if (!secMan || !mChannel) {
--- a/dom/media/CloneableWithRangeMediaResource.h
+++ b/dom/media/CloneableWithRangeMediaResource.h
@@ -22,17 +22,17 @@ class CloneableWithRangeMediaResource : 
         mInitialized(false) {
     MOZ_ASSERT(mStream);
   }
 
   ~CloneableWithRangeMediaResource() {}
 
   // Main thread
   nsresult Open(nsIStreamListener** aStreamListener) override;
-  RefPtr<GenericPromise> Close() override;
+  nsresult Close() override;
   void Suspend(bool aCloseImmediately) override {}
   void Resume() override {}
   already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override;
   bool HadCrossOriginRedirects() override;
   nsresult ReadFromCache(char* aBuffer, int64_t aOffset,
                          uint32_t aCount) override;
 
   // These methods are called off the main thread.
--- a/dom/media/DOMMediaStream.cpp
+++ b/dom/media/DOMMediaStream.cpp
@@ -368,17 +368,16 @@ already_AddRefed<DOMMediaStream> DOMMedi
     RefPtr<MediaStreamTrack> clone = track->Clone();
     newStream->AddTrack(*clone);
   }
 
   return newStream.forget();
 }
 
 bool DOMMediaStream::Active() const { return mActive; }
-bool DOMMediaStream::Audible() const { return mAudible; }
 
 MediaStreamTrack* DOMMediaStream::GetTrackById(const nsAString& aId) const {
   for (const auto& track : mTracks) {
     nsString id;
     track->GetId(id);
     if (id == aId) {
       return track;
     }
@@ -451,16 +450,30 @@ void DOMMediaStream::RegisterTrackListen
   mTrackListeners.AppendElement(aListener);
 }
 
 void DOMMediaStream::UnregisterTrackListener(TrackListener* aListener) {
   MOZ_ASSERT(NS_IsMainThread());
   mTrackListeners.RemoveElement(aListener);
 }
 
+void DOMMediaStream::SetFinishedOnInactive(bool aFinishedOnInactive) {
+  MOZ_ASSERT(NS_IsMainThread());
+
+  if (mFinishedOnInactive == aFinishedOnInactive) {
+    return;
+  }
+
+  mFinishedOnInactive = aFinishedOnInactive;
+
+  if (mFinishedOnInactive && !ContainsLiveTracks(mTracks)) {
+    NotifyTrackRemoved(nullptr);
+  }
+}
+
 void DOMMediaStream::NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) {
   MOZ_ASSERT(NS_IsMainThread());
 
   aTrack->AddConsumer(mPlaybackTrackListener);
 
   for (int32_t i = mTrackListeners.Length() - 1; i >= 0; --i) {
     mTrackListeners[i]->NotifyTrackAdded(aTrack);
   }
@@ -499,16 +512,20 @@ void DOMMediaStream::NotifyTrackRemoved(
     }
 
     if (!mActive) {
       NS_ASSERTION(false, "Shouldn't remove a live track if already inactive");
       return;
     }
   }
 
+  if (!mFinishedOnInactive) {
+    return;
+  }
+
   if (mAudible) {
     // Check if we became inaudible.
     if (!ContainsLiveAudioTracks(mTracks)) {
       mAudible = false;
       NotifyInaudible();
     }
   }
 
--- a/dom/media/DOMMediaStream.h
+++ b/dom/media/DOMMediaStream.h
@@ -139,19 +139,16 @@ class DOMMediaStream : public DOMEventTa
 
   bool Active() const;
 
   IMPL_EVENT_HANDLER(addtrack)
   IMPL_EVENT_HANDLER(removetrack)
 
   // NON-WebIDL
 
-  // Returns true if this stream contains a live audio track.
-  bool Audible() const;
-
   /**
    * Returns true if this DOMMediaStream has aTrack in mTracks.
    */
   bool HasTrack(const MediaStreamTrack& aTrack) const;
 
   /**
    * Returns a principal indicating who may access this stream. The stream
    * contents can only be accessed by principals subsuming this principal.
@@ -184,16 +181,20 @@ class DOMMediaStream : public DOMEventTa
   // being destroyed, so we don't hold on to a dead pointer. Main thread only.
   void RegisterTrackListener(TrackListener* aListener);
 
   // Unregisters a track listener from this MediaStream. The caller must call
   // UnregisterTrackListener before being destroyed, so we don't hold on to
   // a dead pointer. Main thread only.
   void UnregisterTrackListener(TrackListener* aListener);
 
+  // Tells this MediaStream whether it can go inactive as soon as no tracks
+  // are live anymore.
+  void SetFinishedOnInactive(bool aFinishedOnInactive);
+
  protected:
   virtual ~DOMMediaStream();
 
   void Destroy();
 
   // Dispatches NotifyActive() to all registered track listeners.
   void NotifyActive();
 
@@ -234,15 +235,19 @@ class DOMMediaStream : public DOMEventTa
   // The track listeners subscribe to changes in this stream's track set.
   nsTArray<TrackListener*> mTrackListeners;
 
   // True if this stream has live tracks.
   bool mActive = false;
 
   // True if this stream has live audio tracks.
   bool mAudible = false;
+
+  // For compatibility with mozCaptureStream, we in some cases do not go
+  // inactive until the MediaDecoder lets us. (Remove this in Bug 1302379)
+  bool mFinishedOnInactive = true;
 };
 
 NS_DEFINE_STATIC_IID_ACCESSOR(DOMMediaStream, NS_DOMMEDIASTREAM_IID)
 
 }  // namespace mozilla
 
 #endif /* NSDOMMEDIASTREAM_H_ */
--- a/dom/media/FileMediaResource.cpp
+++ b/dom/media/FileMediaResource.cpp
@@ -90,27 +90,27 @@ nsresult FileMediaResource::Open(nsIStre
     // doing an async open and waiting until we locate the real resource,
     // then using that (if it's still a file!).
     return NS_ERROR_FAILURE;
   }
 
   return NS_OK;
 }
 
-RefPtr<GenericPromise> FileMediaResource::Close() {
+nsresult FileMediaResource::Close() {
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
 
   // Since mChennel is only accessed by main thread, there is no necessary to
   // take the lock.
   if (mChannel) {
     mChannel->Cancel(NS_ERROR_PARSED_DATA_CACHED);
     mChannel = nullptr;
   }
 
-  return GenericPromise::CreateAndResolve(true, __func__);
+  return NS_OK;
 }
 
 already_AddRefed<nsIPrincipal> FileMediaResource::GetCurrentPrincipal() {
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
 
   nsCOMPtr<nsIPrincipal> principal;
   nsIScriptSecurityManager* secMan = nsContentUtils::GetSecurityManager();
   if (!secMan || !mChannel) return nullptr;
--- a/dom/media/FileMediaResource.h
+++ b/dom/media/FileMediaResource.h
@@ -18,17 +18,17 @@ class FileMediaResource : public BaseMed
       : BaseMediaResource(aCallback, aChannel, aURI),
         mSize(aSize),
         mLock("FileMediaResource.mLock"),
         mSizeInitialized(aSize != -1) {}
   ~FileMediaResource() {}
 
   // Main thread
   nsresult Open(nsIStreamListener** aStreamListener) override;
-  RefPtr<GenericPromise> Close() override;
+  nsresult Close() override;
   void Suspend(bool aCloseImmediately) override {}
   void Resume() override {}
   already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override;
   bool HadCrossOriginRedirects() override;
   nsresult ReadFromCache(char* aBuffer, int64_t aOffset,
                          uint32_t aCount) override;
 
   // These methods are called off the main thread.
--- a/dom/media/MediaCache.cpp
+++ b/dom/media/MediaCache.cpp
@@ -156,17 +156,17 @@ class MediaCache {
 
   // Get an instance of a MediaCache (or nullptr if initialization failed).
   // aContentLength is the content length if known already, otherwise -1.
   // If the length is known and considered small enough, a discrete MediaCache
   // with memory backing will be given. Otherwise the one MediaCache with
   // file backing will be provided.
   static RefPtr<MediaCache> GetMediaCache(int64_t aContentLength);
 
-  nsISerialEventTarget* OwnerThread() const { return sThread; }
+  nsIEventTarget* OwnerThread() const { return sThread; }
 
   // Brutally flush the cache contents. Main thread only.
   void Flush();
 
   // Close all streams associated with private browsing windows. This will
   // also remove the blocks from the cache since we don't want to leave any
   // traces when PB is done.
   void CloseStreamsForPrivateBrowsing();
@@ -2191,28 +2191,27 @@ bool MediaCacheStream::AreAllStreamsForR
       continue;
     }
     return false;
   }
 
   return true;
 }
 
-RefPtr<GenericPromise> MediaCacheStream::Close() {
+void MediaCacheStream::Close() {
   MOZ_ASSERT(NS_IsMainThread());
   if (!mMediaCache) {
-    return GenericPromise::CreateAndResolve(true, __func__);
+    return;
   }
-
-  return InvokeAsync(OwnerThread(), "MediaCacheStream::Close",
-                     [this, client = RefPtr<ChannelMediaResource>(mClient)] {
-                       AutoLock lock(mMediaCache->Monitor());
-                       CloseInternal(lock);
-                       return GenericPromise::CreateAndResolve(true, __func__);
-                     });
+  OwnerThread()->Dispatch(NS_NewRunnableFunction(
+      "MediaCacheStream::Close",
+      [this, client = RefPtr<ChannelMediaResource>(mClient)]() {
+        AutoLock lock(mMediaCache->Monitor());
+        CloseInternal(lock);
+      }));
 }
 
 void MediaCacheStream::CloseInternal(AutoLock& aLock) {
   MOZ_ASSERT(OwnerThread()->IsOnCurrentThread());
 
   if (mClosed) {
     return;
   }
@@ -2730,17 +2729,17 @@ void MediaCacheStream::InitAsCloneIntern
   mClient->CacheClientSuspend();
 
   // Step 5: add the stream to be managed by the cache.
   mMediaCache->OpenStream(lock, this, true /* aIsClone */);
   // Wake up the reader which is waiting for the cloned data.
   lock.NotifyAll();
 }
 
-nsISerialEventTarget* MediaCacheStream::OwnerThread() const {
+nsIEventTarget* MediaCacheStream::OwnerThread() const {
   return mMediaCache->OwnerThread();
 }
 
 nsresult MediaCacheStream::GetCachedRanges(MediaByteRangeSet& aRanges) {
   MOZ_ASSERT(!NS_IsMainThread());
   // Take the monitor, so that the cached data ranges can't grow while we're
   // trying to loop over them.
   AutoLock lock(mMediaCache->Monitor());
--- a/dom/media/MediaCache.h
+++ b/dom/media/MediaCache.h
@@ -212,22 +212,22 @@ class MediaCacheStream : public DecoderD
   nsresult Init(int64_t aContentLength);
 
   // Set up this stream with the cache, assuming it's for the same data
   // as the aOriginal stream.
   // Exactly one of InitAsClone or Init must be called before any other method
   // on this class.
   void InitAsClone(MediaCacheStream* aOriginal);
 
-  nsISerialEventTarget* OwnerThread() const;
+  nsIEventTarget* OwnerThread() const;
 
   // These are called on the main thread.
-  // This must be called (and resolve) before the ChannelMediaResource
+  // This must be called (and return) before the ChannelMediaResource
   // used to create this MediaCacheStream is deleted.
-  RefPtr<GenericPromise> Close();
+  void Close();
   // This returns true when the stream has been closed.
   bool IsClosed(AutoLock&) const { return mClosed; }
   // Returns true when this stream is can be shared by a new resource load.
   // Called on the main thread only.
   bool IsAvailableForSharing() const { return !mIsPrivateBrowsing; }
 
   // These callbacks are called on the main thread by the client
   // when data has been received via the channel.
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -1,17 +1,16 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaDecoder.h"
 
-#include "AudioDeviceInfo.h"
 #include "DOMMediaStream.h"
 #include "DecoderBenchmark.h"
 #include "ImageContainer.h"
 #include "Layers.h"
 #include "MediaDecoderStateMachine.h"
 #include "MediaFormatReader.h"
 #include "MediaResource.h"
 #include "MediaShutdownManager.h"
@@ -221,56 +220,46 @@ void MediaDecoder::Pause() {
 }
 
 void MediaDecoder::SetVolume(double aVolume) {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
   mVolume = aVolume;
 }
 
-RefPtr<GenericPromise> MediaDecoder::SetSink(AudioDeviceInfo* aSinkDevice) {
+RefPtr<GenericPromise> MediaDecoder::SetSink(AudioDeviceInfo* aSink) {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
-  mSinkDevice = aSinkDevice;
-  return GetStateMachine()->InvokeSetSink(aSinkDevice);
+  return GetStateMachine()->InvokeSetSink(aSink);
 }
 
-void MediaDecoder::SetOutputCaptured(bool aCaptured) {
-  MOZ_ASSERT(NS_IsMainThread());
-  MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
-  AbstractThread::AutoEnter context(AbstractMainThread());
-  mOutputCaptured = aCaptured;
-}
-
-void MediaDecoder::AddOutputTrack(RefPtr<ProcessedMediaTrack> aTrack) {
+void MediaDecoder::AddOutputStream(DOMMediaStream* aStream,
+                                   SharedDummyTrack* aDummyStream) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
   AbstractThread::AutoEnter context(AbstractMainThread());
-  nsTArray<RefPtr<ProcessedMediaTrack>> tracks = mOutputTracks;
-  tracks.AppendElement(std::move(aTrack));
-  mOutputTracks = tracks;
+  mDecoderStateMachine->EnsureOutputStreamManager(aDummyStream);
+  if (mInfo) {
+    mDecoderStateMachine->EnsureOutputStreamManagerHasTracks(*mInfo);
+  }
+  mDecoderStateMachine->AddOutputStream(aStream);
 }
 
-void MediaDecoder::RemoveOutputTrack(
-    const RefPtr<ProcessedMediaTrack>& aTrack) {
+void MediaDecoder::RemoveOutputStream(DOMMediaStream* aStream) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
   AbstractThread::AutoEnter context(AbstractMainThread());
-  nsTArray<RefPtr<ProcessedMediaTrack>> tracks = mOutputTracks;
-  if (tracks.RemoveElement(aTrack)) {
-    mOutputTracks = tracks;
-  }
+  mDecoderStateMachine->RemoveOutputStream(aStream);
 }
 
-void MediaDecoder::SetOutputTracksPrincipal(
-    const RefPtr<nsIPrincipal>& aPrincipal) {
+void MediaDecoder::SetOutputStreamPrincipal(nsIPrincipal* aPrincipal) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
   AbstractThread::AutoEnter context(AbstractMainThread());
-  mOutputPrincipal = MakePrincipalHandle(aPrincipal);
+  mDecoderStateMachine->SetOutputStreamPrincipal(aPrincipal);
 }
 
 double MediaDecoder::GetDuration() {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
   return mDuration;
 }
 
@@ -306,20 +295,16 @@ MediaDecoder::MediaDecoder(MediaDecoderI
       mLogicallySeeking(false, "MediaDecoder::mLogicallySeeking"),
       INIT_MIRROR(mBuffered, TimeIntervals()),
       INIT_MIRROR(mCurrentPosition, TimeUnit::Zero()),
       INIT_MIRROR(mStateMachineDuration, NullableTimeUnit()),
       INIT_MIRROR(mIsAudioDataAudible, false),
       INIT_CANONICAL(mVolume, aInit.mVolume),
       INIT_CANONICAL(mPreservesPitch, aInit.mPreservesPitch),
       INIT_CANONICAL(mLooping, aInit.mLooping),
-      INIT_CANONICAL(mSinkDevice, nullptr),
-      INIT_CANONICAL(mOutputCaptured, false),
-      INIT_CANONICAL(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
-      INIT_CANONICAL(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
       INIT_CANONICAL(mPlayState, PLAY_STATE_LOADING),
       mSameOriginMedia(false),
       mVideoDecodingOberver(
           new BackgroundVideoDecodingPermissionObserver(this)),
       mIsBackgroundVideoDecodingAllowed(false),
       mTelemetryReported(false),
       mContainerType(aInit.mContainerType) {
   MOZ_ASSERT(NS_IsMainThread());
@@ -385,21 +370,24 @@ void MediaDecoder::Shutdown() {
         &MediaDecoder::FinishShutdown);
   } else {
     // Ensure we always unregister asynchronously in order not to disrupt
     // the hashtable iterating in MediaShutdownManager::Shutdown().
     RefPtr<MediaDecoder> self = this;
     nsCOMPtr<nsIRunnable> r =
         NS_NewRunnableFunction("MediaDecoder::Shutdown", [self]() {
           self->mVideoFrameContainer = nullptr;
-          self->ShutdownInternal();
+          MediaShutdownManager::Instance().Unregister(self);
         });
     mAbstractMainThread->Dispatch(r.forget());
   }
 
+  // Ask the owner to remove its audio/video tracks.
+  GetOwner()->RemoveMediaTracks();
+
   ChangeState(PLAY_STATE_SHUTDOWN);
   mVideoDecodingOberver->UnregisterEvent();
   mVideoDecodingOberver = nullptr;
   mOwner = nullptr;
 }
 
 void MediaDecoder::NotifyXPCOMShutdown() {
   MOZ_ASSERT(NS_IsMainThread());
@@ -526,26 +514,21 @@ void MediaDecoder::OnStoreDecoderBenchma
         "type = %s\n",
         benchmarkInfo.mWidth, benchmarkInfo.mHeight, benchmarkInfo.mFrameRate,
         benchmarkInfo.mContentType.BeginReading());
 
     mDecoderBenchmark->Store(benchmarkInfo, mFrameStats);
   }
 }
 
-void MediaDecoder::ShutdownInternal() {
-  MOZ_ASSERT(NS_IsMainThread());
-  MediaShutdownManager::Instance().Unregister(this);
-}
-
 void MediaDecoder::FinishShutdown() {
   MOZ_ASSERT(NS_IsMainThread());
   SetStateMachine(nullptr);
   mVideoFrameContainer = nullptr;
-  ShutdownInternal();
+  MediaShutdownManager::Instance().Unregister(this);
 }
 
 nsresult MediaDecoder::InitializeStateMachine() {
   MOZ_ASSERT(NS_IsMainThread());
   NS_ASSERTION(mDecoderStateMachine, "Cannot initialize null state machine!");
   AbstractThread::AutoEnter context(AbstractMainThread());
 
   nsresult rv = mDecoderStateMachine->Init(this);
@@ -654,16 +637,17 @@ double MediaDecoder::GetCurrentTime() {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
   return mLogicalPosition;
 }
 
 void MediaDecoder::OnMetadataUpdate(TimedMetadata&& aMetadata) {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
+  GetOwner()->RemoveMediaTracks();
   MetadataLoaded(MakeUnique<MediaInfo>(*aMetadata.mInfo),
                  UniquePtr<MetadataTags>(std::move(aMetadata.mTags)),
                  MediaDecoderEventVisibility::Observable);
   FirstFrameLoaded(std::move(aMetadata.mInfo),
                    MediaDecoderEventVisibility::Observable);
 }
 
 void MediaDecoder::MetadataLoaded(
@@ -676,16 +660,18 @@ void MediaDecoder::MetadataLoaded(
   LOG("MetadataLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d",
       aInfo->mAudio.mChannels, aInfo->mAudio.mRate, aInfo->HasAudio(),
       aInfo->HasVideo());
 
   mMediaSeekable = aInfo->mMediaSeekable;
   mMediaSeekableOnlyInBufferedRanges =
       aInfo->mMediaSeekableOnlyInBufferedRanges;
   mInfo = aInfo.release();
+  GetOwner()->ConstructMediaTracks(mInfo);
+  mDecoderStateMachine->EnsureOutputStreamManagerHasTracks(*mInfo);
 
   // Make sure the element and the frame (if any) are told about
   // our new size.
   if (aEventVisibility != MediaDecoderEventVisibility::Suppressed) {
     mFiredMetadataLoaded = true;
     GetOwner()->MetadataLoaded(mInfo, std::move(aTags));
   }
   // Invalidate() will end up calling GetOwner()->UpdateMediaSize with the last
@@ -866,16 +852,22 @@ void MediaDecoder::ChangeState(PlayState
   if (mNextState == aState) {
     mNextState = PLAY_STATE_PAUSED;
   }
 
   if (mPlayState != aState) {
     DDLOG(DDLogCategory::Property, "play_state", ToPlayStateStr(aState));
   }
   mPlayState = aState;
+
+  if (mPlayState == PLAY_STATE_PLAYING) {
+    GetOwner()->ConstructMediaTracks(mInfo);
+  } else if (IsEnded()) {
+    GetOwner()->RemoveMediaTracks();
+  }
 }
 
 bool MediaDecoder::IsLoopingBack(double aPrevPos, double aCurPos) const {
   // If current position is early than previous position and we didn't do seek,
   // that means we looped back to the start position.
   return mLooping && !mSeekRequest.Exists() && aCurPos < aPrevPos;
 }
 
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -38,22 +38,22 @@ namespace mozilla {
 
 namespace dom {
 class MediaMemoryInfo;
 }
 
 class AbstractThread;
 class DOMMediaStream;
 class DecoderBenchmark;
-class ProcessedMediaTrack;
 class FrameStatistics;
 class VideoFrameContainer;
 class MediaFormatReader;
 class MediaDecoderStateMachine;
 struct MediaPlaybackEvent;
+struct SharedDummyTrack;
 
 enum class Visibility : uint8_t;
 
 struct MOZ_STACK_CLASS MediaDecoderInit {
   MediaDecoderOwner* const mOwner;
   const double mVolume;
   const bool mPreservesPitch;
   const double mPlaybackRate;
@@ -150,44 +150,36 @@ class MediaDecoder : public DecoderDocto
   // Adjust the speed of the playback, optionally with pitch correction,
   void SetVolume(double aVolume);
 
   void SetPlaybackRate(double aPlaybackRate);
   void SetPreservesPitch(bool aPreservesPitch);
   void SetLooping(bool aLooping);
 
   // Set the given device as the output device.
-  RefPtr<GenericPromise> SetSink(AudioDeviceInfo* aSinkDevice);
+  RefPtr<GenericPromise> SetSink(AudioDeviceInfo* aSink);
 
   bool GetMinimizePreroll() const { return mMinimizePreroll; }
 
   // All MediaStream-related data is protected by mReentrantMonitor.
   // We have at most one DecodedStreamData per MediaDecoder. Its stream
   // is used as the input for each ProcessedMediaTrack created by calls to
   // captureStream(UntilEnded). Seeking creates a new source stream, as does
   // replaying after the input as ended. In the latter case, the new source is
   // not connected to streams created by captureStreamUntilEnded.
 
-  // Turn output capturing of this decoder on or off. If it is on, the
-  // MediaDecoderStateMachine's media sink will only play after output tracks
-  // have been set. This is to ensure that it doesn't skip over any data
-  // while the owner has intended to capture the full output, thus missing to
-  // capture some of it. The owner of the MediaDecoder is responsible for adding
-  // output tracks in a timely fashion while the output is captured.
-  void SetOutputCaptured(bool aCaptured);
-  // Add an output track. All decoder output for the track's media type will be
-  // sent to the track.
-  // Note that only one audio track and one video track is supported by
-  // MediaDecoder at this time. Passing in more of one type, or passing in a
-  // type that metadata says we are not decoding, is an error.
-  void AddOutputTrack(RefPtr<ProcessedMediaTrack> aTrack);
-  // Remove an output track added with AddOutputTrack.
-  void RemoveOutputTrack(const RefPtr<ProcessedMediaTrack>& aTrack);
-  // Update the principal for any output tracks.
-  void SetOutputTracksPrincipal(const RefPtr<nsIPrincipal>& aPrincipal);
+  // Add an output stream. All decoder output will be sent to the stream.
+  // The stream is initially blocked. The decoder is responsible for unblocking
+  // it while it is playing back.
+  void AddOutputStream(DOMMediaStream* aStream, SharedDummyTrack* aDummyStream);
+  // Remove an output stream added with AddOutputStream.
+  void RemoveOutputStream(DOMMediaStream* aStream);
+
+  // Update the principal for any output streams and their tracks.
+  void SetOutputStreamPrincipal(nsIPrincipal* aPrincipal);
 
   // Return the duration of the video in seconds.
   virtual double GetDuration();
 
   // Return true if the stream is infinite.
   bool IsInfinite() const;
 
   // Return true if we are currently seeking in the media resource.
@@ -398,21 +390,16 @@ class MediaDecoder : public DecoderDocto
 
   // Called when the first audio and/or video from the media file has been
   // loaded by the state machine. Call on the main thread only.
   virtual void FirstFrameLoaded(nsAutoPtr<MediaInfo> aInfo,
                                 MediaDecoderEventVisibility aEventVisibility);
 
   void SetStateMachineParameters();
 
-  // Called when MediaDecoder shutdown is finished. Subclasses use this to clean
-  // up internal structures, and unregister potential shutdown blockers when
-  // they're done.
-  virtual void ShutdownInternal();
-
   bool IsShutdown() const;
 
   // Called to notify the decoder that the duration has changed.
   virtual void DurationChanged();
 
   // State-watching manager.
   WatchManager<MediaDecoder> mWatchManager;
 
@@ -614,30 +601,16 @@ class MediaDecoder : public DecoderDocto
 
   // Volume of playback.  0.0 = muted. 1.0 = full volume.
   Canonical<double> mVolume;
 
   Canonical<bool> mPreservesPitch;
 
   Canonical<bool> mLooping;
 
-  // The device used with SetSink, or nullptr if no explicit device has been
-  // set.
-  Canonical<RefPtr<AudioDeviceInfo>> mSinkDevice;
-
-  // Whether this MediaDecoder's output is captured. When captured, all decoded
-  // data must be played out through mOutputTracks.
-  Canonical<bool> mOutputCaptured;
-
-  // Tracks that, if set, will get data routed through them.
-  Canonical<nsTArray<RefPtr<ProcessedMediaTrack>>> mOutputTracks;
-
-  // PrincipalHandle to be used when feeding data into mOutputTracks.
-  Canonical<PrincipalHandle> mOutputPrincipal;
-
   // Media duration set explicitly by JS. At present, this is only ever present
   // for MSE.
   Maybe<double> mExplicitDuration;
 
   // Set to one of the valid play states.
   // This can only be changed on the main thread while holding the decoder
   // monitor. Thus, it can be safely read while holding the decoder monitor
   // OR on the main thread.
@@ -660,29 +633,16 @@ class MediaDecoder : public DecoderDocto
   bool mIsBackgroundVideoDecodingAllowed;
 
  public:
   AbstractCanonical<double>* CanonicalVolume() { return &mVolume; }
   AbstractCanonical<bool>* CanonicalPreservesPitch() {
     return &mPreservesPitch;
   }
   AbstractCanonical<bool>* CanonicalLooping() { return &mLooping; }
-  AbstractCanonical<RefPtr<AudioDeviceInfo>>* CanonicalSinkDevice() {
-    return &mSinkDevice;
-  }
-  AbstractCanonical<bool>* CanonicalOutputCaptured() {
-    return &mOutputCaptured;
-  }
-  AbstractCanonical<nsTArray<RefPtr<ProcessedMediaTrack>>>*
-  CanonicalOutputTracks() {
-    return &mOutputTracks;
-  }
-  AbstractCanonical<PrincipalHandle>* CanonicalOutputPrincipal() {
-    return &mOutputPrincipal;
-  }
   AbstractCanonical<PlayState>* CanonicalPlayState() { return &mPlayState; }
 
  private:
   // Notify owner when the audible state changed
   void NotifyAudibleStateChanged();
 
   bool mTelemetryReported;
   const MediaContainerType mContainerType;
--- a/dom/media/MediaDecoderOwner.h
+++ b/dom/media/MediaDecoderOwner.h
@@ -134,16 +134,24 @@ class MediaDecoderOwner {
   virtual void NotifyXPCOMShutdown() = 0;
 
   // Dispatches a "encrypted" event to the HTMLMediaElement, with the
   // provided init data. Actual dispatch may be delayed until HAVE_METADATA.
   // Main thread only.
   virtual void DispatchEncrypted(const nsTArray<uint8_t>& aInitData,
                                  const nsAString& aInitDataType) = 0;
 
+  // Called by the media decoder to create audio/video tracks and add to its
+  // owner's track list.
+  virtual void ConstructMediaTracks(const MediaInfo* aInfo) = 0;
+
+  // Called by the media decoder to removes all audio/video tracks from its
+  // owner's track list.
+  virtual void RemoveMediaTracks() = 0;
+
   // Notified by the decoder that a decryption key is required before emitting
   // further output.
   virtual void NotifyWaitingForKey() {}
 
   /*
    * Methods that are used only in Gecko go here. We provide defaul
    * implementations so they can compile in Servo without modification.
    */
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -6,16 +6,17 @@
 
 #include <algorithm>
 #include <stdint.h>
 #include <utility>
 
 #include "mediasink/AudioSink.h"
 #include "mediasink/AudioSinkWrapper.h"
 #include "mediasink/DecodedStream.h"
+#include "mediasink/OutputStreamManager.h"
 #include "mediasink/VideoSink.h"
 #include "mozilla/Logging.h"
 #include "mozilla/MathAlgorithms.h"
 #include "mozilla/NotNull.h"
 #include "mozilla/SharedThreadPool.h"
 #include "mozilla/Sprintf.h"
 #include "mozilla/StaticPrefs_media.h"
 #include "mozilla/Telemetry.h"
@@ -2586,20 +2587,16 @@ RefPtr<ShutdownPromise> MediaDecoderStat
   master->mOnMediaNotSeekable.Disconnect();
 
   // Disconnect canonicals and mirrors before shutting down our task queue.
   master->mBuffered.DisconnectIfConnected();
   master->mPlayState.DisconnectIfConnected();
   master->mVolume.DisconnectIfConnected();
   master->mPreservesPitch.DisconnectIfConnected();
   master->mLooping.DisconnectIfConnected();
-  master->mSinkDevice.DisconnectIfConnected();
-  master->mOutputCaptured.DisconnectIfConnected();
-  master->mOutputTracks.DisconnectIfConnected();
-  master->mOutputPrincipal.DisconnectIfConnected();
 
   master->mDuration.DisconnectAll();
   master->mCurrentPosition.DisconnectAll();
   master->mIsAudioDataAudible.DisconnectAll();
 
   // Shut down the watch manager to stop further notifications.
   master->mWatchManager.Shutdown();
 
@@ -2625,38 +2622,34 @@ MediaDecoderStateMachine::MediaDecoderSt
                                /* aSupportsTailDispatch = */ true)),
       mWatchManager(this, mTaskQueue),
       mDispatchedStateMachine(false),
       mDelayedScheduler(mTaskQueue, true /*aFuzzy*/),
       mCurrentFrameID(0),
       mReader(new ReaderProxy(mTaskQueue, aReader)),
       mPlaybackRate(1.0),
       mAmpleAudioThreshold(detail::AMPLE_AUDIO_THRESHOLD),
+      mAudioCaptured(false),
       mMinimizePreroll(aDecoder->GetMinimizePreroll()),
       mSentFirstFrameLoadedEvent(false),
       mVideoDecodeSuspended(false),
       mVideoDecodeSuspendTimer(mTaskQueue),
+      mOutputStreamManager(nullptr),
       mVideoDecodeMode(VideoDecodeMode::Normal),
       mIsMSE(aDecoder->IsMSE()),
       mSeamlessLoopingAllowed(false),
       INIT_MIRROR(mBuffered, TimeIntervals()),
       INIT_MIRROR(mPlayState, MediaDecoder::PLAY_STATE_LOADING),
       INIT_MIRROR(mVolume, 1.0),
       INIT_MIRROR(mPreservesPitch, true),
       INIT_MIRROR(mLooping, false),
-      INIT_MIRROR(mSinkDevice, nullptr),
-      INIT_MIRROR(mOutputCaptured, false),
-      INIT_MIRROR(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
-      INIT_MIRROR(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
-      INIT_CANONICAL(mCanonicalOutputTracks,
-                     nsTArray<RefPtr<ProcessedMediaTrack>>()),
-      INIT_CANONICAL(mCanonicalOutputPrincipal, PRINCIPAL_HANDLE_NONE),
       INIT_CANONICAL(mDuration, NullableTimeUnit()),
       INIT_CANONICAL(mCurrentPosition, TimeUnit::Zero()),
-      INIT_CANONICAL(mIsAudioDataAudible, false) {
+      INIT_CANONICAL(mIsAudioDataAudible, false),
+      mSetSinkRequestsCount(0) {
   MOZ_COUNT_CTOR(MediaDecoderStateMachine);
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
 
   InitVideoQueuePrefs();
 
   DDLINKCHILD("reader", aReader);
 }
 
@@ -2673,39 +2666,25 @@ void MediaDecoderStateMachine::Initializ
   MOZ_ASSERT(OnTaskQueue());
 
   // Connect mirrors.
   mBuffered.Connect(mReader->CanonicalBuffered());
   mPlayState.Connect(aDecoder->CanonicalPlayState());
   mVolume.Connect(aDecoder->CanonicalVolume());
   mPreservesPitch.Connect(aDecoder->CanonicalPreservesPitch());
   mLooping.Connect(aDecoder->CanonicalLooping());
-  mSinkDevice.Connect(aDecoder->CanonicalSinkDevice());
-  mOutputCaptured.Connect(aDecoder->CanonicalOutputCaptured());
-  mOutputTracks.Connect(aDecoder->CanonicalOutputTracks());
-  mOutputPrincipal.Connect(aDecoder->CanonicalOutputPrincipal());
 
   // Initialize watchers.
   mWatchManager.Watch(mBuffered,
                       &MediaDecoderStateMachine::BufferedRangeUpdated);
   mWatchManager.Watch(mVolume, &MediaDecoderStateMachine::VolumeChanged);
   mWatchManager.Watch(mPreservesPitch,
                       &MediaDecoderStateMachine::PreservesPitchChanged);
   mWatchManager.Watch(mPlayState, &MediaDecoderStateMachine::PlayStateChanged);
   mWatchManager.Watch(mLooping, &MediaDecoderStateMachine::LoopingChanged);
-  mWatchManager.Watch(mOutputCaptured,
-                      &MediaDecoderStateMachine::UpdateOutputCaptured);
-  mWatchManager.Watch(mOutputTracks,
-                      &MediaDecoderStateMachine::UpdateOutputCaptured);
-  mWatchManager.Watch(mOutputTracks,
-                      &MediaDecoderStateMachine::OutputTracksChanged);
-  mWatchManager.Watch(mOutputPrincipal,
-                      &MediaDecoderStateMachine::OutputPrincipalChanged);
-
-  mMediaSink = CreateMediaSink();
 
   MOZ_ASSERT(!mStateObj);
   auto* s = new DecodeMetadataState(this);
   mStateObj.reset(s);
   s->Enter();
 }
 
 void MediaDecoderStateMachine::AudioAudibleChanged(bool aAudible) {
@@ -2713,34 +2692,33 @@ void MediaDecoderStateMachine::AudioAudi
 }
 
 MediaSink* MediaDecoderStateMachine::CreateAudioSink() {
   RefPtr<MediaDecoderStateMachine> self = this;
   auto audioSinkCreator = [self]() {
     MOZ_ASSERT(self->OnTaskQueue());
     AudioSink* audioSink =
         new AudioSink(self->mTaskQueue, self->mAudioQueue, self->GetMediaTime(),
-                      self->Info().mAudio, self->mSinkDevice.Ref());
+                      self->Info().mAudio);
 
     self->mAudibleListener = audioSink->AudibleEvent().Connect(
         self->mTaskQueue, self.get(),
         &MediaDecoderStateMachine::AudioAudibleChanged);
     return audioSink;
   };
-  return new AudioSinkWrapper(mTaskQueue, mAudioQueue, audioSinkCreator,
-                              mVolume, mPlaybackRate, mPreservesPitch);
+  return new AudioSinkWrapper(mTaskQueue, mAudioQueue, audioSinkCreator);
 }
 
-already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink() {
-  MOZ_ASSERT(OnTaskQueue());
+already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink(
+    bool aAudioCaptured, OutputStreamManager* aManager) {
+  MOZ_ASSERT_IF(aAudioCaptured, aManager);
   RefPtr<MediaSink> audioSink =
-      mOutputCaptured
-          ? new DecodedStream(this, mOutputTracks, mVolume, mPlaybackRate,
-                              mPreservesPitch, mAudioQueue, mVideoQueue)
-          : CreateAudioSink();
+      aAudioCaptured ? new DecodedStream(mTaskQueue, mAbstractMainThread,
+                                         mAudioQueue, mVideoQueue, aManager)
+                     : CreateAudioSink();
 
   RefPtr<MediaSink> mediaSink =
       new VideoSink(mTaskQueue, audioSink, mVideoQueue, mVideoFrameContainer,
                     *mFrameStats, sVideoQueueSendToCompositorSize);
   return mediaSink.forget();
 }
 
 TimeUnit MediaDecoderStateMachine::GetDecodedAudioDuration() {
@@ -2820,16 +2798,18 @@ nsresult MediaDecoderStateMachine::Init(
   mVideoQueueListener = VideoQueue().PopFrontEvent().Connect(
       mTaskQueue, this, &MediaDecoderStateMachine::OnVideoPopped);
 
   mMetadataManager.Connect(mReader->TimedMetadataEvent(), OwnerThread());
 
   mOnMediaNotSeekable = mReader->OnMediaNotSeekable().Connect(
       OwnerThread(), this, &MediaDecoderStateMachine::SetMediaNotSeekable);
 
+  mMediaSink = CreateMediaSink(mAudioCaptured, mOutputStreamManager);
+
   nsresult rv = mReader->Init();
   NS_ENSURE_SUCCESS(rv, rv);
 
   mReader->SetCanonicalDuration(&mDuration);
 
   return NS_OK;
 }
 
@@ -3355,16 +3335,19 @@ void MediaDecoderStateMachine::FinishDec
   // Get potentially updated metadata
   mReader->ReadUpdatedMetadata(mInfo.ptr());
 
   EnqueueFirstFrameLoadedEvent();
 }
 
 RefPtr<ShutdownPromise> MediaDecoderStateMachine::BeginShutdown() {
   MOZ_ASSERT(NS_IsMainThread());
+  if (mOutputStreamManager) {
+    mOutputStreamManager->Disconnect();
+  }
   return InvokeAsync(OwnerThread(), this, __func__,
                      &MediaDecoderStateMachine::Shutdown);
 }
 
 RefPtr<ShutdownPromise> MediaDecoderStateMachine::FinishShutdown() {
   MOZ_ASSERT(OnTaskQueue());
   LOG("Shutting down state machine task queue");
   return OwnerThread()->BeginShutdown();
@@ -3443,17 +3426,17 @@ void MediaDecoderStateMachine::UpdatePla
     auto t = std::min(clockTime, maxEndTime);
     // FIXME: Bug 1091422 - chained ogg files hit this assertion.
     // MOZ_ASSERT(t >= GetMediaTime());
     if (loopback || t > GetMediaTime()) {
       UpdatePlaybackPosition(t);
     }
   }
   // Note we have to update playback position before releasing the monitor.
-  // Otherwise, MediaDecoder::AddOutputTrack could kick in when we are outside
+  // Otherwise, MediaDecoder::AddOutputStream could kick in when we are outside
   // the monitor and get a staled value from GetCurrentTimeUs() which hits the
   // assertion in GetClock().
 
   int64_t delay = std::max<int64_t>(1, AUDIO_DURATION_USECS / mPlaybackRate);
   ScheduleStateMachineIn(TimeUnit::FromMicroseconds(delay));
 
   // Notify the listener as we progress in the playback offset. Note it would
   // be too intensive to send notifications for each popped audio/video sample.
@@ -3529,85 +3512,57 @@ void MediaDecoderStateMachine::Preserves
 void MediaDecoderStateMachine::LoopingChanged() {
   MOZ_ASSERT(OnTaskQueue());
   LOGV("LoopingChanged, looping=%d", mLooping.Ref());
   if (mSeamlessLoopingAllowed) {
     mStateObj->HandleLoopingChanged();
   }
 }
 
-void MediaDecoderStateMachine::UpdateOutputCaptured() {
-  MOZ_ASSERT(OnTaskQueue());
-
-  // Reset these flags so they are consistent with the status of the sink.
-  // TODO: Move these flags into MediaSink to improve cohesion so we don't need
-  // to reset these flags when switching MediaSinks.
-  mAudioCompleted = false;
-  mVideoCompleted = false;
-
-  // Stop and shut down the existing sink.
-  StopMediaSink();
-  mMediaSink->Shutdown();
-
-  // Create a new sink according to whether output is captured.
-  mMediaSink = CreateMediaSink();
-
-  // Don't buffer as much when audio is captured because we don't need to worry
-  // about high latency audio devices.
-  mAmpleAudioThreshold = mOutputCaptured ? detail::AMPLE_AUDIO_THRESHOLD / 2
-                                         : detail::AMPLE_AUDIO_THRESHOLD;
-
-  mStateObj->HandleAudioCaptured();
-}
-
-void MediaDecoderStateMachine::OutputTracksChanged() {
-  MOZ_ASSERT(OnTaskQueue());
-  LOG("OutputTracksChanged, tracks=%zu", mOutputTracks.Ref().Length());
-  mCanonicalOutputTracks = mOutputTracks;
-}
-
-void MediaDecoderStateMachine::OutputPrincipalChanged() {
-  MOZ_ASSERT(OnTaskQueue());
-  mCanonicalOutputPrincipal = mOutputPrincipal;
-}
-
 RefPtr<GenericPromise> MediaDecoderStateMachine::InvokeSetSink(
     RefPtr<AudioDeviceInfo> aSink) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(aSink);
 
+  Unused << ++mSetSinkRequestsCount;
   return InvokeAsync(OwnerThread(), this, __func__,
                      &MediaDecoderStateMachine::SetSink, aSink);
 }
 
 RefPtr<GenericPromise> MediaDecoderStateMachine::SetSink(
-    RefPtr<AudioDeviceInfo> aSinkDevice) {
+    RefPtr<AudioDeviceInfo> aSink) {
   MOZ_ASSERT(OnTaskQueue());
-  if (mOutputCaptured) {
+  if (mAudioCaptured) {
     // Not supported yet.
     return GenericPromise::CreateAndReject(NS_ERROR_ABORT, __func__);
   }
 
-  if (mSinkDevice.Ref() != aSinkDevice) {
-    // A new sink was set before this ran.
-    return GenericPromise::CreateAndResolve(IsPlaying(), __func__);
+  // Backup current playback parameters.
+  bool wasPlaying = mMediaSink->IsPlaying();
+
+  if (--mSetSinkRequestsCount > 0) {
+    MOZ_ASSERT(mSetSinkRequestsCount > 0);
+    return GenericPromise::CreateAndResolve(wasPlaying, __func__);
   }
 
-  if (mMediaSink->AudioDevice() == aSinkDevice) {
-    // The sink has not changed.
-    return GenericPromise::CreateAndResolve(IsPlaying(), __func__);
+  MediaSink::PlaybackParams params = mMediaSink->GetPlaybackParams();
+  params.mSink = std::move(aSink);
+
+  if (!mMediaSink->IsStarted()) {
+    mMediaSink->SetPlaybackParams(params);
+    return GenericPromise::CreateAndResolve(false, __func__);
   }
 
-  const bool wasPlaying = IsPlaying();
-
   // Stop and shutdown the existing sink.
   StopMediaSink();
   mMediaSink->Shutdown();
   // Create a new sink according to whether audio is captured.
-  mMediaSink = CreateMediaSink();
+  mMediaSink = CreateMediaSink(false);
+  // Restore playback parameters.
+  mMediaSink->SetPlaybackParams(params);
   // Start the new sink
   if (wasPlaying) {
     nsresult rv = StartMediaSink();
     if (NS_FAILED(rv)) {
       return GenericPromise::CreateAndReject(NS_ERROR_ABORT, __func__);
     }
   }
   return GenericPromise::CreateAndResolve(wasPlaying, __func__);
@@ -3696,16 +3651,53 @@ void MediaDecoderStateMachine::OnMediaSi
     return;
   }
 
   // Otherwise notify media decoder/element about this error for it makes
   // no sense to play an audio-only file without sound output.
   DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__));
 }
 
+void MediaDecoderStateMachine::SetAudioCaptured(bool aCaptured,
+                                                OutputStreamManager* aManager) {
+  MOZ_ASSERT(OnTaskQueue());
+
+  if (aCaptured == mAudioCaptured) {
+    return;
+  }
+
+  // Rest these flags so they are consistent with the status of the sink.
+  // TODO: Move these flags into MediaSink to improve cohesion so we don't need
+  // to reset these flags when switching MediaSinks.
+  mAudioCompleted = false;
+  mVideoCompleted = false;
+
+  // Backup current playback parameters.
+  MediaSink::PlaybackParams params = mMediaSink->GetPlaybackParams();
+
+  // Stop and shut down the existing sink.
+  StopMediaSink();
+  mMediaSink->Shutdown();
+
+  // Create a new sink according to whether audio is captured.
+  mMediaSink = CreateMediaSink(aCaptured, aManager);
+
+  // Restore playback parameters.
+  mMediaSink->SetPlaybackParams(params);
+
+  mAudioCaptured = aCaptured;
+
+  // Don't buffer as much when audio is captured because we don't need to worry
+  // about high latency audio devices.
+  mAmpleAudioThreshold = mAudioCaptured ? detail::AMPLE_AUDIO_THRESHOLD / 2
+                                        : detail::AMPLE_AUDIO_THRESHOLD;
+
+  mStateObj->HandleAudioCaptured();
+}
+
 uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const {
   MOZ_ASSERT(OnTaskQueue());
   return mReader->VideoIsHardwareAccelerated()
              ? std::max<uint32_t>(sVideoQueueHWAccelSize, MIN_VIDEO_QUEUE_SIZE)
              : std::max<uint32_t>(sVideoQueueDefaultSize, MIN_VIDEO_QUEUE_SIZE);
 }
 
 void MediaDecoderStateMachine::GetDebugInfo(
@@ -3739,16 +3731,96 @@ RefPtr<GenericPromise> MediaDecoderState
                                p->Resolve(true, __func__);
                              }),
       AbstractThread::TailDispatch);
   MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
   Unused << rv;
   return p.forget();
 }
 
+void MediaDecoderStateMachine::SetOutputStreamPrincipal(
+    nsIPrincipal* aPrincipal) {
+  MOZ_ASSERT(NS_IsMainThread());
+  mOutputStreamPrincipal = aPrincipal;
+  if (mOutputStreamManager) {
+    mOutputStreamManager->SetPrincipal(mOutputStreamPrincipal);
+  }
+}
+
+void MediaDecoderStateMachine::AddOutputStream(DOMMediaStream* aStream) {
+  MOZ_ASSERT(NS_IsMainThread());
+  LOG("AddOutputStream aStream=%p!", aStream);
+  mOutputStreamManager->Add(aStream);
+  nsCOMPtr<nsIRunnable> r =
+      NS_NewRunnableFunction("MediaDecoderStateMachine::SetAudioCaptured",
+                             [self = RefPtr<MediaDecoderStateMachine>(this),
+                              manager = mOutputStreamManager]() {
+                               self->SetAudioCaptured(true, manager);
+                             });
+  nsresult rv = OwnerThread()->Dispatch(r.forget());
+  MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
+  Unused << rv;
+}
+
+void MediaDecoderStateMachine::RemoveOutputStream(DOMMediaStream* aStream) {
+  MOZ_ASSERT(NS_IsMainThread());
+  LOG("RemoveOutputStream=%p!", aStream);
+  mOutputStreamManager->Remove(aStream);
+  if (mOutputStreamManager->IsEmpty()) {
+    mOutputStreamManager->Disconnect();
+    mOutputStreamManager = nullptr;
+    nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction(
+        "MediaDecoderStateMachine::SetAudioCaptured",
+        [self = RefPtr<MediaDecoderStateMachine>(this)]() {
+          self->SetAudioCaptured(false);
+        });
+    nsresult rv = OwnerThread()->Dispatch(r.forget());
+    MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
+    Unused << rv;
+  }
+}
+
+void MediaDecoderStateMachine::EnsureOutputStreamManager(
+    SharedDummyTrack* aDummyStream) {
+  MOZ_ASSERT(NS_IsMainThread());
+  if (mOutputStreamManager) {
+    return;
+  }
+  mOutputStreamManager = new OutputStreamManager(
+      aDummyStream, mOutputStreamPrincipal, mAbstractMainThread);
+}
+
+void MediaDecoderStateMachine::EnsureOutputStreamManagerHasTracks(
+    const MediaInfo& aLoadedInfo) {
+  MOZ_ASSERT(NS_IsMainThread());
+  if (!mOutputStreamManager) {
+    return;
+  }
+  if ((!aLoadedInfo.HasAudio() ||
+       mOutputStreamManager->HasTrackType(MediaSegment::AUDIO)) &&
+      (!aLoadedInfo.HasVideo() ||
+       mOutputStreamManager->HasTrackType(MediaSegment::VIDEO))) {
+    return;
+  }
+  if (aLoadedInfo.HasAudio()) {
+    MOZ_ASSERT(!mOutputStreamManager->HasTrackType(MediaSegment::AUDIO));
+    RefPtr<SourceMediaTrack> dummy =
+        mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
+    LOG("Pre-created audio track with underlying track %p", dummy.get());
+    Unused << dummy;
+  }
+  if (aLoadedInfo.HasVideo()) {
+    MOZ_ASSERT(!mOutputStreamManager->HasTrackType(MediaSegment::VIDEO));
+    RefPtr<SourceMediaTrack> dummy =
+        mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
+    LOG("Pre-created video track with underlying track %p", dummy.get());
+    Unused << dummy;
+  }
+}
+
 class VideoQueueMemoryFunctor : public nsDequeFunctor {
  public:
   VideoQueueMemoryFunctor() : mSize(0) {}
 
   MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf);
 
   virtual void operator()(void* aObject) override {
     const VideoData* v = static_cast<const VideoData*>(aObject);
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -101,16 +101,17 @@ hardware (via AudioStream).
 #  include "nsThreadUtils.h"
 
 namespace mozilla {
 
 class AbstractThread;
 class AudioSegment;
 class DecodedStream;
 class DOMMediaStream;
+class OutputStreamManager;
 class ReaderProxy;
 class TaskQueue;
 
 extern LazyLogModule gMediaDecoderLog;
 
 struct MediaPlaybackEvent {
   enum EventType {
     PlaybackStarted,
@@ -180,16 +181,29 @@ class MediaDecoderStateMachine
   };
 
   // Returns the state machine task queue.
   TaskQueue* OwnerThread() const { return mTaskQueue; }
 
   RefPtr<GenericPromise> RequestDebugInfo(
       dom::MediaDecoderStateMachineDebugInfo& aInfo);
 
+  void SetOutputStreamPrincipal(nsIPrincipal* aPrincipal);
+  // If an OutputStreamManager does not exist, one will be created.
+  void EnsureOutputStreamManager(SharedDummyTrack* aDummyStream);
+  // If an OutputStreamManager exists, tracks matching aLoadedInfo will be
+  // created unless they already exist in the manager.
+  void EnsureOutputStreamManagerHasTracks(const MediaInfo& aLoadedInfo);
+  // Add an output stream to the output stream manager. The manager must have
+  // been created through EnsureOutputStreamManager() before this.
+  void AddOutputStream(DOMMediaStream* aStream);
+  // Remove an output stream added with AddOutputStream. If the last output
+  // stream was removed, we will also tear down the OutputStreamManager.
+  void RemoveOutputStream(DOMMediaStream* aStream);
+
   // Seeks to the decoder to aTarget asynchronously.
   RefPtr<MediaDecoder::SeekPromise> InvokeSeek(const SeekTarget& aTarget);
 
   void DispatchSetPlaybackRate(double aPlaybackRate) {
     OwnerThread()->DispatchStateChange(NewRunnableMethod<double>(
         "MediaDecoderStateMachine::SetPlaybackRate", this,
         &MediaDecoderStateMachine::SetPlaybackRate, aPlaybackRate));
   }
@@ -297,16 +311,21 @@ class MediaDecoderStateMachine
   // on the appropriate threads.
   bool OnTaskQueue() const;
 
   // Initialization that needs to happen on the task queue. This is the first
   // task that gets run on the task queue, and is dispatched from the MDSM
   // constructor immediately after the task queue is created.
   void InitializationTask(MediaDecoder* aDecoder);
 
+  // Sets the audio-captured state and recreates the media sink if needed.
+  // A manager must be passed in if setting the audio-captured state to true.
+  void SetAudioCaptured(bool aCaptured,
+                        OutputStreamManager* aManager = nullptr);
+
   RefPtr<MediaDecoder::SeekPromise> Seek(const SeekTarget& aTarget);
 
   RefPtr<ShutdownPromise> Shutdown();
 
   RefPtr<ShutdownPromise> FinishShutdown();
 
   // Update the playback position. This can result in a timeupdate event
   // and an invalidate of the frame being dispatched asynchronously if
@@ -370,19 +389,16 @@ class MediaDecoderStateMachine
   void OnVideoPopped(const RefPtr<VideoData>& aSample);
 
   void AudioAudibleChanged(bool aAudible);
 
   void VolumeChanged();
   void SetPlaybackRate(double aPlaybackRate);
   void PreservesPitchChanged();
   void LoopingChanged();
-  void UpdateOutputCaptured();
-  void OutputTracksChanged();
-  void OutputPrincipalChanged();
 
   MediaQueue<AudioData>& AudioQueue() { return mAudioQueue; }
   MediaQueue<VideoData>& VideoQueue() { return mVideoQueue; }
 
   // True if we are low in decoded audio/video data.
   // May not be invoked when mReader->UseBufferingHeuristics() is false.
   bool HasLowDecodedData();
 
@@ -417,19 +433,20 @@ class MediaDecoderStateMachine
   void UpdatePlaybackPositionInternal(const media::TimeUnit& aTime);
 
   // Update playback position and trigger next update by default time period.
   // Called on the state machine thread.
   void UpdatePlaybackPositionPeriodically();
 
   MediaSink* CreateAudioSink();
 
-  // Always create mediasink which contains an AudioSink or DecodedStream
-  // inside.
-  already_AddRefed<MediaSink> CreateMediaSink();
+  // Always create mediasink which contains an AudioSink or StreamSink inside.
+  // A manager must be passed in if aAudioCaptured is true.
+  already_AddRefed<MediaSink> CreateMediaSink(
+      bool aAudioCaptured, OutputStreamManager* aManager = nullptr);
 
   // Stops the media sink and shut it down.
   // The decoder monitor must be held with exactly one lock count.
   // Called on the state machine thread.
   void StopMediaSink();
 
   // Create and start the media sink.
   // The decoder monitor must be held with exactly one lock count.
@@ -604,16 +621,21 @@ class MediaDecoderStateMachine
   void CancelSuspendTimer();
 
   bool IsInSeamlessLooping() const;
 
   bool mCanPlayThrough = false;
 
   bool mIsLiveStream = false;
 
+  // True if we shouldn't play our audio (but still write it to any capturing
+  // streams). When this is true, the audio thread will never start again after
+  // it has stopped.
+  bool mAudioCaptured;
+
   // True if all audio frames are already rendered.
   bool mAudioCompleted = false;
 
   // True if all video frames are already rendered.
   bool mVideoCompleted = false;
 
   // True if we should not decode/preroll unnecessary samples, unless we're
   // played. "Prerolling" in this context refers to when we decode and
@@ -645,16 +667,23 @@ class MediaDecoderStateMachine
   bool mMediaSeekable = true;
 
   // True if the media is seekable only in buffered ranges.
   bool mMediaSeekableOnlyInBufferedRanges = false;
 
   // Track enabling video decode suspension via timer
   DelayedScheduler mVideoDecodeSuspendTimer;
 
+  // Data about MediaStreams that are being fed by the decoder.
+  // Main thread only.
+  RefPtr<OutputStreamManager> mOutputStreamManager;
+
+  // Principal used by output streams. Main thread only.
+  nsCOMPtr<nsIPrincipal> mOutputStreamPrincipal;
+
   // Track the current video decode mode.
   VideoDecodeMode mVideoDecodeMode;
 
   // Track the complete & error for audio/video separately
   MozPromiseRequestHolder<MediaSink::EndedPromise> mMediaSinkAudioEndedPromise;
   MozPromiseRequestHolder<MediaSink::EndedPromise> mMediaSinkVideoEndedPromise;
 
   MediaEventListener mAudioQueueListener;
@@ -699,55 +728,34 @@ class MediaDecoderStateMachine
 
   // Pitch preservation for the playback rate.
   Mirror<bool> mPreservesPitch;
 
   // Whether to seek back to the start of the media resource
   // upon reaching the end.
   Mirror<bool> mLooping;
 
-  // The device used with SetSink, or nullptr if no explicit device has been
-  // set.
-  Mirror<RefPtr<AudioDeviceInfo>> mSinkDevice;
-
-  // Whether all output should be captured into mOutputTracks. While true, the
-  // media sink will only play if there are output tracks.
-  Mirror<bool> mOutputCaptured;
-
-  // Tracks to capture data into.
-  Mirror<nsTArray<RefPtr<ProcessedMediaTrack>>> mOutputTracks;
-
-  // PrincipalHandle to feed with data captured into mOutputTracks.
-  Mirror<PrincipalHandle> mOutputPrincipal;
-
-  Canonical<nsTArray<RefPtr<ProcessedMediaTrack>>> mCanonicalOutputTracks;
-  Canonical<PrincipalHandle> mCanonicalOutputPrincipal;
-
   // Duration of the media. This is guaranteed to be non-null after we finish
   // decoding the first frame.
   Canonical<media::NullableTimeUnit> mDuration;
 
   // The time of the current frame, corresponding to the "current
   // playback position" in HTML5. This is referenced from 0, which is the
   // initial playback position.
   Canonical<media::TimeUnit> mCurrentPosition;
 
   // Used to distinguish whether the audio is producing sound.
   Canonical<bool> mIsAudioDataAudible;
 
+  // Used to count the number of pending requests to set a new sink.
+  Atomic<int> mSetSinkRequestsCount;
+
  public:
   AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() const;
 
-  AbstractCanonical<nsTArray<RefPtr<ProcessedMediaTrack>>>*
-  CanonicalOutputTracks() {
-    return &mCanonicalOutputTracks;
-  }
-  AbstractCanonical<PrincipalHandle>* CanonicalOutputPrincipal() {
-    return &mCanonicalOutputPrincipal;
-  }
   AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration() {
     return &mDuration;
   }
   AbstractCanonical<media::TimeUnit>* CanonicalCurrentPosition() {
     return &mCurrentPosition;
   }
   AbstractCanonical<bool>* CanonicalIsAudioDataAudible() {
     return &mIsAudioDataAudible;
--- a/dom/media/MediaResource.h
+++ b/dom/media/MediaResource.h
@@ -55,21 +55,18 @@ class MediaResource : public DecoderDoct
   // Note that this means it's safe for references to this object to be
   // released on a non main thread, but the destructor will always run on
   // the main thread.
   NS_METHOD_(MozExternalRefCountType) AddRef(void);
   NS_METHOD_(MozExternalRefCountType) Release(void);
 
   // Close the resource, stop any listeners, channels, etc.
   // Cancels any currently blocking Read request and forces that request to
-  // return an error. This must be called (and resolve) before the MediaResource
-  // is deleted.
-  virtual RefPtr<GenericPromise> Close() {
-    return GenericPromise::CreateAndResolve(true, __func__);
-  }
+  // return an error.
+  virtual nsresult Close() { return NS_OK; }
 
   // These methods are called off the main thread.
   // Read up to aCount bytes from the stream. The read starts at
   // aOffset in the stream, seeking to that location initially if
   // it is not the current stream offset. The remaining arguments,
   // results and requirements are the same as per the Read method.
   virtual nsresult ReadAt(int64_t aOffset, char* aBuffer, uint32_t aCount,
                           uint32_t* aBytes) = 0;
--- a/dom/media/MediaStreamTrack.h
+++ b/dom/media/MediaStreamTrack.h
@@ -303,17 +303,17 @@ class MediaStreamTrackSource : public ns
         mSinks.RemoveElement(sink);
         continue;
       }
       sink->OverrideEnded();
     }
   }
 
   // Principal identifying who may access the contents of this source.
-  RefPtr<nsIPrincipal> mPrincipal;
+  nsCOMPtr<nsIPrincipal> mPrincipal;
 
   // Currently registered sinks.
   nsTArray<WeakPtr<Sink>> mSinks;
 
   // The label of the track we are the source of per the MediaStreamTrack spec.
   const nsString mLabel;
 
   // True if all MediaStreamTrack users have unregistered from this source and
--- a/dom/media/mediasink/AudioSink.cpp
+++ b/dom/media/mediasink/AudioSink.cpp
@@ -1,17 +1,16 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioSink.h"
 #include "AudioConverter.h"
-#include "AudioDeviceInfo.h"
 #include "MediaQueue.h"
 #include "VideoUtils.h"
 #include "mozilla/CheckedInt.h"
 #include "mozilla/DebugOnly.h"
 #include "mozilla/IntegerPrintfMacros.h"
 #include "mozilla/StaticPrefs_media.h"
 #include "nsPrintfCString.h"
 
@@ -30,21 +29,19 @@ static const int64_t AUDIO_FUZZ_FRAMES =
 
 // Amount of audio frames we will be processing ahead of use
 static const int32_t LOW_AUDIO_USECS = 300000;
 
 using media::TimeUnit;
 
 AudioSink::AudioSink(AbstractThread* aThread,
                      MediaQueue<AudioData>& aAudioQueue,
-                     const TimeUnit& aStartTime, const AudioInfo& aInfo,
-                     AudioDeviceInfo* aAudioDevice)
+                     const TimeUnit& aStartTime, const AudioInfo& aInfo)
     : mStartTime(aStartTime),
       mInfo(aInfo),
-      mAudioDevice(aAudioDevice),
       mPlaying(true),
       mMonitor("AudioSink"),
       mWritten(0),
       mErrored(false),
       mPlaybackComplete(false),
       mOwnerThread(aThread),
       mProcessedQueueLength(0),
       mFramesParsed(0),
@@ -181,17 +178,17 @@ nsresult AudioSink::InitializeAudioStrea
   AudioConfig::ChannelLayout::ChannelMap channelMap =
       mConverter ? mConverter->OutputConfig().Layout().Map()
                  : AudioConfig::ChannelLayout(mOutputChannels).Map();
   // The layout map used here is already processed by mConverter with
   // mOutputChannels into SMPTE format, so there is no need to worry if
   // StaticPrefs::accessibility_monoaudio_enable() or
   // StaticPrefs::media_forcestereo_enabled() is applied.
   nsresult rv = mAudioStream->Init(mOutputChannels, channelMap, mOutputRate,
-                                   mAudioDevice);
+                                   aParams.mSink);
   if (NS_FAILED(rv)) {
     mAudioStream->Shutdown();
     mAudioStream = nullptr;
     return rv;
   }
 
   // Set playback params before calling Start() so they can take effect
   // as soon as the 1st DataCallback of the AudioStream fires.
--- a/dom/media/mediasink/AudioSink.h
+++ b/dom/media/mediasink/AudioSink.h
@@ -18,30 +18,21 @@
 #include "mozilla/RefPtr.h"
 #include "nsISupportsImpl.h"
 
 namespace mozilla {
 
 class AudioConverter;
 
 class AudioSink : private AudioStream::DataSource {
+  using PlaybackParams = MediaSink::PlaybackParams;
+
  public:
-  struct PlaybackParams {
-    PlaybackParams(double aVolume, double aPlaybackRate, bool aPreservesPitch)
-        : mVolume(aVolume),
-          mPlaybackRate(aPlaybackRate),
-          mPreservesPitch(aPreservesPitch) {}
-    double mVolume;
-    double mPlaybackRate;
-    bool mPreservesPitch;
-  };
-
   AudioSink(AbstractThread* aThread, MediaQueue<AudioData>& aAudioQueue,
-            const media::TimeUnit& aStartTime, const AudioInfo& aInfo,
-            AudioDeviceInfo* aAudioDevice);
+            const media::TimeUnit& aStartTime, const AudioInfo& aInfo);
 
   ~AudioSink();
 
   // Return a promise which will be resolved when AudioSink
   // finishes playing, or rejected if any error.
   nsresult Init(const PlaybackParams& aParams,
                 RefPtr<MediaSink::EndedPromise>& aEndedPromise);
 
@@ -63,18 +54,16 @@ class AudioSink : private AudioStream::D
   void SetPlaybackRate(double aPlaybackRate);
   void SetPreservesPitch(bool aPreservesPitch);
   void SetPlaying(bool aPlaying);
 
   MediaEventSource<bool>& AudibleEvent() { return mAudibleEvent; }
 
   void GetDebugInfo(dom::MediaSinkDebugInfo& aInfo);
 
-  const RefPtr<AudioDeviceInfo>& AudioDevice() { return mAudioDevice; }
-
  private:
   // Allocate and initialize mAudioStream. Returns NS_OK on success.
   nsresult InitializeAudioStream(const PlaybackParams& aParams);
 
   // Interface of AudioStream::DataSource.
   // Called on the callback thread of cubeb.
   UniquePtr<AudioStream::Chunk> PopFrames(uint32_t aFrames) override;
   bool Ended() const override;
@@ -93,20 +82,16 @@ class AudioSink : private AudioStream::D
 
   // Keep the last good position returned from the audio stream. Used to ensure
   // position returned by GetPosition() is mono-increasing in spite of audio
   // stream error. Used on the task queue of MDSM only.
   media::TimeUnit mLastGoodPosition;
 
   const AudioInfo mInfo;
 
-  // The output device this AudioSink is playing data to. The system's default
-  // device is used if this is null.
-  const RefPtr<AudioDeviceInfo> mAudioDevice;
-
   // Used on the task queue of MDSM only.
   bool mPlaying;
 
   MozPromiseHolder<MediaSink::EndedPromise> mEndedPromise;
 
   /*
    * Members to implement AudioStream::DataSource.
    * Used on the callback thread of cubeb.
--- a/dom/media/mediasink/AudioSinkWrapper.cpp
+++ b/dom/media/mediasink/AudioSinkWrapper.cpp
@@ -16,16 +16,31 @@ using media::TimeUnit;
 AudioSinkWrapper::~AudioSinkWrapper() {}
 
 void AudioSinkWrapper::Shutdown() {
   AssertOwnerThread();
   MOZ_ASSERT(!mIsStarted, "Must be called after playback stopped.");
   mCreator = nullptr;
 }
 
+const MediaSink::PlaybackParams& AudioSinkWrapper::GetPlaybackParams() const {
+  AssertOwnerThread();
+  return mParams;
+}
+
+void AudioSinkWrapper::SetPlaybackParams(const PlaybackParams& aParams) {
+  AssertOwnerThread();
+  if (mAudioSink) {
+    mAudioSink->SetVolume(aParams.mVolume);
+    mAudioSink->SetPlaybackRate(aParams.mPlaybackRate);
+    mAudioSink->SetPreservesPitch(aParams.mPreservesPitch);
+  }
+  mParams = aParams;
+}
+
 RefPtr<MediaSink::EndedPromise> AudioSinkWrapper::OnEnded(TrackType aType) {
   AssertOwnerThread();
   MOZ_ASSERT(mIsStarted, "Must be called after playback starts.");
   if (aType == TrackInfo::kAudioTrack) {
     return mEndedPromise;
   }
   return nullptr;
 }
@@ -134,21 +149,16 @@ void AudioSinkWrapper::SetPlaying(bool a
     // Remember how long we've played.
     mPlayDuration = GetPosition();
     // mPlayStartTime must be updated later since GetPosition()
     // depends on the value of mPlayStartTime.
     mPlayStartTime = TimeStamp();
   }
 }
 
-double AudioSinkWrapper::PlaybackRate() const {
-  AssertOwnerThread();
-  return mParams.mPlaybackRate;
-}
-
 nsresult AudioSinkWrapper::Start(const TimeUnit& aStartTime,
                                  const MediaInfo& aInfo) {
   AssertOwnerThread();
   MOZ_ASSERT(!mIsStarted, "playback already started.");
 
   mIsStarted = true;
   mPlayDuration = aStartTime;
   mPlayStartTime = TimeStamp::Now();
--- a/dom/media/mediasink/AudioSinkWrapper.h
+++ b/dom/media/mediasink/AudioSinkWrapper.h
@@ -19,18 +19,16 @@ class AudioSink;
 class MediaData;
 template <class T>
 class MediaQueue;
 
 /**
  * A wrapper around AudioSink to provide the interface of MediaSink.
  */
 class AudioSinkWrapper : public MediaSink {
-  using PlaybackParams = AudioSink::PlaybackParams;
-
   // An AudioSink factory.
   class Creator {
    public:
     virtual ~Creator() {}
     virtual AudioSink* Create() = 0;
   };
 
   // Wrap around a function object which creates AudioSinks.
@@ -43,40 +41,39 @@ class AudioSinkWrapper : public MediaSin
    private:
     Function mFunction;
   };
 
  public:
   template <typename Function>
   AudioSinkWrapper(AbstractThread* aOwnerThread,
                    const MediaQueue<AudioData>& aAudioQueue,
-                   const Function& aFunc, double aVolume, double aPlaybackRate,
-                   bool aPreservesPitch)
+                   const Function& aFunc)
       : mOwnerThread(aOwnerThread),
         mCreator(new CreatorImpl<Function>(aFunc)),
         mIsStarted(false),
-        mParams(aVolume, aPlaybackRate, aPreservesPitch),
         // Give an invalid value to facilitate debug if used before playback
         // starts.
         mPlayDuration(media::TimeUnit::Invalid()),
         mAudioEnded(true),
         mAudioQueue(aAudioQueue) {}
 
+  const PlaybackParams& GetPlaybackParams() const override;
+  void SetPlaybackParams(const PlaybackParams& aParams) override;
+
   RefPtr<EndedPromise> OnEnded(TrackType aType) override;
   media::TimeUnit GetEndTime(TrackType aType) const override;
   media::TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
   bool HasUnplayedFrames(TrackType aType) const override;
 
   void SetVolume(double aVolume) override;
   void SetPlaybackRate(double aPlaybackRate) override;
   void SetPreservesPitch(bool aPreservesPitch) override;
   void SetPlaying(bool aPlaying) override;
 
-  double PlaybackRate() const override;
-
   nsresult Start(const media::TimeUnit& aStartTime,
                  const MediaInfo& aInfo) override;
   void Stop() override;
   bool IsStarted() const override;
   bool IsPlaying() const override;
 
   void Shutdown() override;
 
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -2,20 +2,20 @@
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "DecodedStream.h"
 #include "AudioSegment.h"
 #include "MediaData.h"
-#include "MediaDecoderStateMachine.h"
 #include "MediaQueue.h"
 #include "MediaTrackGraph.h"
 #include "MediaTrackListener.h"
+#include "OutputStreamManager.h"
 #include "SharedBuffer.h"
 #include "VideoSegment.h"
 #include "VideoUtils.h"
 #include "mozilla/AbstractThread.h"
 #include "mozilla/CheckedInt.h"
 #include "mozilla/SyncRunnable.h"
 #include "mozilla/gfx/Point.h"
 #include "nsProxyRelease.h"
@@ -49,73 +49,75 @@ class DecodedStreamTrackListener : publi
   const RefPtr<DecodedStreamGraphListener> mGraphListener;
   const RefPtr<SourceMediaTrack> mTrack;
 };
 
 class DecodedStreamGraphListener {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStreamGraphListener)
  public:
   DecodedStreamGraphListener(
-      SourceMediaTrack* aAudioTrack,
+      SourceMediaTrack* aAudioStream,
       MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedHolder,
-      SourceMediaTrack* aVideoTrack,
-      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder)
+      SourceMediaTrack* aVideoStream,
+      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder,
+      AbstractThread* aMainThread)
       : mAudioTrackListener(
-            aAudioTrack
-                ? MakeRefPtr<DecodedStreamTrackListener>(this, aAudioTrack)
+            aAudioStream
+                ? MakeRefPtr<DecodedStreamTrackListener>(this, aAudioStream)
                 : nullptr),
         mAudioEndedHolder(std::move(aAudioEndedHolder)),
         mVideoTrackListener(
-            aVideoTrack
-                ? MakeRefPtr<DecodedStreamTrackListener>(this, aVideoTrack)
+            aVideoStream
+                ? MakeRefPtr<DecodedStreamTrackListener>(this, aVideoStream)
                 : nullptr),
         mVideoEndedHolder(std::move(aVideoEndedHolder)),
-        mAudioTrack(aAudioTrack),
-        mVideoTrack(aVideoTrack) {
+        mAudioStream(aAudioStream),
+        mVideoStream(aVideoStream),
+        mAbstractMainThread(aMainThread) {
     MOZ_ASSERT(NS_IsMainThread());
     if (mAudioTrackListener) {
-      mAudioTrack->AddListener(mAudioTrackListener);
+      mAudioStream->AddListener(mAudioTrackListener);
     } else {
       mAudioEnded = true;
       mAudioEndedHolder.ResolveIfExists(true, __func__);
     }
 
     if (mVideoTrackListener) {
-      mVideoTrack->AddListener(mVideoTrackListener);
+      mVideoStream->AddListener(mVideoTrackListener);
     } else {
       mVideoEnded = true;
       mVideoEndedHolder.ResolveIfExists(true, __func__);
     }
   }
 
   void NotifyOutput(SourceMediaTrack* aTrack, TrackTime aCurrentTrackTime) {
-    if (aTrack == mAudioTrack) {
+    if (aTrack == mAudioStream) {
       if (aCurrentTrackTime >= mAudioEnd) {
-        mAudioTrack->End();
+        mAudioStream->End();
       }
-    } else if (aTrack == mVideoTrack) {
+    } else if (aTrack == mVideoStream) {
       if (aCurrentTrackTime >= mVideoEnd) {
-        mVideoTrack->End();
+        mVideoStream->End();
       }
     } else {
       MOZ_CRASH("Unexpected source track");
     }
-    if (aTrack != mAudioTrack && mAudioTrack && !mAudioEnded) {
+    if (aTrack != mAudioStream && mAudioStream && !mAudioEnded) {
       // Only audio playout drives the clock forward, if present and live.
       return;
     }
-    MOZ_ASSERT_IF(aTrack == mAudioTrack, !mAudioEnded);
-    MOZ_ASSERT_IF(aTrack == mVideoTrack, !mVideoEnded);
+    MOZ_ASSERT_IF(aTrack == mAudioStream, !mAudioEnded);
+    MOZ_ASSERT_IF(aTrack == mVideoStream, !mVideoEnded);
     mOnOutput.Notify(aTrack->TrackTimeToMicroseconds(aCurrentTrackTime));
   }
 
   void NotifyEnded(SourceMediaTrack* aTrack) {
-    if (aTrack == mAudioTrack) {
+    if (aTrack == mAudioStream) {
       mAudioEnded = true;
-    } else if (aTrack == mVideoTrack) {
+    } else if (aTrack == mVideoStream) {
       mVideoEnded = true;
     } else {
       MOZ_CRASH("Unexpected source track");
     }
     aTrack->Graph()->DispatchToMainThreadStableState(
         NewRunnableMethod<RefPtr<SourceMediaTrack>>(
             "DecodedStreamGraphListener::DoNotifyTrackEnded", this,
             &DecodedStreamGraphListener::DoNotifyTrackEnded, aTrack));
@@ -138,49 +140,49 @@ class DecodedStreamGraphListener {
    * to a MediaStreamTrack ending on main thread (it uses another listener)
    * before the listeners to render the track get added, potentially meaning a
    * media element doesn't progress before reaching the end although data was
    * available.
    *
    * Callable from any thread.
    */
   void EndTrackAt(SourceMediaTrack* aTrack, TrackTime aEnd) {
-    if (aTrack == mAudioTrack) {
+    if (aTrack == mAudioStream) {
       mAudioEnd = aEnd;
-    } else if (aTrack == mVideoTrack) {
+    } else if (aTrack == mVideoStream) {
       mVideoEnd = aEnd;
     } else {
       MOZ_CRASH("Unexpected source track");
     }
   }
 
   void DoNotifyTrackEnded(SourceMediaTrack* aTrack) {
     MOZ_ASSERT(NS_IsMainThread());
-    if (aTrack == mAudioTrack) {
+    if (aTrack == mAudioStream) {
       mAudioEndedHolder.ResolveIfExists(true, __func__);
-    } else if (aTrack == mVideoTrack) {
+    } else if (aTrack == mVideoStream) {
       mVideoEndedHolder.ResolveIfExists(true, __func__);
     } else {
       MOZ_CRASH("Unexpected source track");
     }
   }
 
   void Forget() {
     MOZ_ASSERT(NS_IsMainThread());
 
-    if (mAudioTrackListener && !mAudioTrack->IsDestroyed()) {
-      mAudioTrack->End();
-      mAudioTrack->RemoveListener(mAudioTrackListener);
+    if (mAudioTrackListener && !mAudioStream->IsDestroyed()) {
+      mAudioStream->End();
+      mAudioStream->RemoveListener(mAudioTrackListener);
     }
     mAudioTrackListener = nullptr;
     mAudioEndedHolder.ResolveIfExists(false, __func__);
 
-    if (mVideoTrackListener && !mVideoTrack->IsDestroyed()) {
-      mVideoTrack->End();
-      mVideoTrack->RemoveListener(mVideoTrackListener);
+    if (mVideoTrackListener && !mVideoStream->IsDestroyed()) {
+      mVideoStream->End();
+      mVideoStream->RemoveListener(mVideoTrackListener);
     }
     mVideoTrackListener = nullptr;
     mVideoEndedHolder.ResolveIfExists(false, __func__);
   }
 
   MediaEventSource<int64_t>& OnOutput() { return mOnOutput; }
 
  private:
@@ -197,20 +199,21 @@ class DecodedStreamGraphListener {
   RefPtr<DecodedStreamTrackListener> mVideoTrackListener;
   MozPromiseHolder<DecodedStream::EndedPromise> mVideoEndedHolder;
 
   // Graph thread only.
   bool mAudioEnded = false;
   bool mVideoEnded = false;
 
   // Any thread.
-  const RefPtr<SourceMediaTrack> mAudioTrack;
-  const RefPtr<SourceMediaTrack> mVideoTrack;
+  const RefPtr<SourceMediaTrack> mAudioStream;
+  const RefPtr<SourceMediaTrack> mVideoStream;
   Atomic<TrackTime> mAudioEnd{TRACK_TIME_MAX};
   Atomic<TrackTime> mVideoEnd{TRACK_TIME_MAX};
+  const RefPtr<AbstractThread> mAbstractMainThread;
 };
 
 DecodedStreamTrackListener::DecodedStreamTrackListener(
     DecodedStreamGraphListener* aGraphListener, SourceMediaTrack* aTrack)
     : mGraphListener(aGraphListener), mTrack(aTrack) {}
 
 void DecodedStreamTrackListener::NotifyOutput(MediaTrackGraph* aGraph,
                                               TrackTime aCurrentTrackTime) {
@@ -218,30 +221,31 @@ void DecodedStreamTrackListener::NotifyO
 }
 
 void DecodedStreamTrackListener::NotifyEnded(MediaTrackGraph* aGraph) {
   mGraphListener->NotifyEnded(mTrack);
 }
 
 /**
  * All MediaStream-related data is protected by the decoder's monitor. We have
- * at most one DecodedStreamData per MediaDecoder. XXX Its tracks are used as
+ * at most one DecodedStreamData per MediaDecoder. Its tracks are used as
  * inputs for all output tracks created by OutputStreamManager after calls to
  * captureStream/UntilEnded. Seeking creates new source tracks, as does
  * replaying after the input as ended. In the latter case, the new sources are
  * not connected to tracks created by captureStreamUntilEnded.
  */
 class DecodedStreamData final {
  public:
   DecodedStreamData(
-      PlaybackInfoInit&& aInit, MediaTrackGraph* aGraph,
-      RefPtr<ProcessedMediaTrack> aAudioOutputTrack,
-      RefPtr<ProcessedMediaTrack> aVideoOutputTrack,
+      OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
+      RefPtr<SourceMediaTrack> aAudioStream,
+      RefPtr<SourceMediaTrack> aVideoStream,
       MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
-      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise);
+      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
+      AbstractThread* aMainThread);
   ~DecodedStreamData();
   MediaEventSource<int64_t>& OnOutput();
   void Forget();
   void GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo);
 
   void WriteVideoToSegment(layers::Image* aImage, const TimeUnit& aStart,
                            const TimeUnit& aEnd,
                            const gfx::IntSize& aIntrinsicSize,
@@ -249,19 +253,19 @@ class DecodedStreamData final {
                            const PrincipalHandle& aPrincipalHandle);
 
   /* The following group of fields are protected by the decoder's monitor
    * and can be read or written on any thread.
    */
   // Count of audio frames written to the track
   int64_t mAudioFramesWritten;
   // Count of video frames written to the track in the track's rate
-  TrackTime mVideoTrackWritten;
+  TrackTime mVideoStreamWritten;
   // Count of audio frames written to the track in the track's rate
-  TrackTime mAudioTrackWritten;
+  TrackTime mAudioStreamWritten;
   // mNextAudioTime is the end timestamp for the last packet sent to the track.
   // Therefore audio packets starting at or after this time need to be copied
   // to the output track.
   TimeUnit mNextAudioTime;
   // mLastVideoStartTime is the start timestamp for the last packet sent to the
   // track. Therefore video packets starting after this time need to be copied
   // to the output track.
   NullableTimeUnit mLastVideoStartTime;
@@ -274,121 +278,108 @@ class DecodedStreamData final {
   TimeStamp mLastVideoTimeStamp;
   // The last video image sent to the track. Useful if we need to replicate
   // the image.
   RefPtr<layers::Image> mLastVideoImage;
   gfx::IntSize mLastVideoImageDisplaySize;
   bool mHaveSentFinishAudio;
   bool mHaveSentFinishVideo;
 
-  const RefPtr<SourceMediaTrack> mAudioTrack;
-  const RefPtr<SourceMediaTrack> mVideoTrack;
-  const RefPtr<ProcessedMediaTrack> mAudioOutputTrack;
-  const RefPtr<ProcessedMediaTrack> mVideoOutputTrack;
-  const RefPtr<MediaInputPort> mAudioPort;
-  const RefPtr<MediaInputPort> mVideoPort;
+  const RefPtr<SourceMediaTrack> mAudioStream;
+  const RefPtr<SourceMediaTrack> mVideoStream;
   const RefPtr<DecodedStreamGraphListener> mListener;
+
+  const RefPtr<OutputStreamManager> mOutputStreamManager;
+  const RefPtr<AbstractThread> mAbstractMainThread;
 };
 
 DecodedStreamData::DecodedStreamData(
-    PlaybackInfoInit&& aInit, MediaTrackGraph* aGraph,
-    RefPtr<ProcessedMediaTrack> aAudioOutputTrack,
-    RefPtr<ProcessedMediaTrack> aVideoOutputTrack,
+    OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
+    RefPtr<SourceMediaTrack> aAudioStream,
+    RefPtr<SourceMediaTrack> aVideoStream,
     MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
-    MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise)
+    MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
+    AbstractThread* aMainThread)
     : mAudioFramesWritten(0),
-      mVideoTrackWritten(0),
-      mAudioTrackWritten(0),
+      mVideoStreamWritten(0),
+      mAudioStreamWritten(0),
       mNextAudioTime(aInit.mStartTime),
       mHaveSentFinishAudio(false),
       mHaveSentFinishVideo(false),
-      mAudioTrack(aInit.mInfo.HasAudio()
-                      ? aGraph->CreateSourceTrack(MediaSegment::AUDIO)
-                      : nullptr),
-      mVideoTrack(aInit.mInfo.HasVideo()
-                      ? aGraph->CreateSourceTrack(MediaSegment::VIDEO)
-                      : nullptr),
-      mAudioOutputTrack(std::move(aAudioOutputTrack)),
-      mVideoOutputTrack(std::move(aVideoOutputTrack)),
-      mAudioPort((mAudioOutputTrack && mAudioTrack)
-                     ? mAudioOutputTrack->AllocateInputPort(mAudioTrack)
-                     : nullptr),
-      mVideoPort((mVideoOutputTrack && mVideoTrack)
-                     ? mVideoOutputTrack->AllocateInputPort(mVideoTrack)
-                     : nullptr),
+      mAudioStream(std::move(aAudioStream)),
+      mVideoStream(std::move(aVideoStream)),
       // DecodedStreamGraphListener will resolve these promises.
       mListener(MakeRefPtr<DecodedStreamGraphListener>(
-          mAudioTrack, std::move(aAudioEndedPromise), mVideoTrack,
-          std::move(aVideoEndedPromise))) {
+          mAudioStream, std::move(aAudioEndedPromise), mVideoStream,
+          std::move(aVideoEndedPromise), aMainThread)),
+      mOutputStreamManager(aOutputStreamManager),
+      mAbstractMainThread(aMainThread) {
   MOZ_ASSERT(NS_IsMainThread());
-  if (mAudioTrack) {
-    mAudioTrack->SetAppendDataSourceRate(aInit.mInfo.mAudio.mRate);
-  }
+  MOZ_DIAGNOSTIC_ASSERT(
+      mOutputStreamManager->HasTracks(mAudioStream, mVideoStream),
+      "Tracks must be pre-created on main thread");
 }
 
-DecodedStreamData::~DecodedStreamData() {
-  MOZ_ASSERT(NS_IsMainThread());
-  if (mAudioTrack) {
-    mAudioTrack->Destroy();
-  }
-  if (mVideoTrack) {
-    mVideoTrack->Destroy();
-  }
-  if (mAudioPort) {
-    mAudioPort->Destroy();
-  }
-  if (mVideoPort) {
-    mVideoPort->Destroy();
-  }
-}
+DecodedStreamData::~DecodedStreamData() { MOZ_ASSERT(NS_IsMainThread()); }
 
 MediaEventSource<int64_t>& DecodedStreamData::OnOutput() {
   return mListener->OnOutput();
 }
 
 void DecodedStreamData::Forget() { mListener->Forget(); }
 
 void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) {
   aInfo.mInstance = NS_ConvertUTF8toUTF16(nsPrintfCString("%p", this));
   aInfo.mAudioFramesWritten = mAudioFramesWritten;
-  aInfo.mStreamAudioWritten = mAudioTrackWritten;
+  aInfo.mStreamAudioWritten = mAudioStreamWritten;
   aInfo.mNextAudioTime = mNextAudioTime.ToMicroseconds();
   aInfo.mLastVideoStartTime =
       mLastVideoStartTime.valueOr(TimeUnit::FromMicroseconds(-1))
           .ToMicroseconds();
   aInfo.mLastVideoEndTime =
       mLastVideoEndTime.valueOr(TimeUnit::FromMicroseconds(-1))
           .ToMicroseconds();
   aInfo.mHaveSentFinishAudio = mHaveSentFinishAudio;
   aInfo.mHaveSentFinishVideo = mHaveSentFinishVideo;
 }
 
-DecodedStream::DecodedStream(
-    MediaDecoderStateMachine* aStateMachine,
-    nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks, double aVolume,
-    double aPlaybackRate, bool aPreservesPitch,
-    MediaQueue<AudioData>& aAudioQueue, MediaQueue<VideoData>& aVideoQueue)
-    : mOwnerThread(aStateMachine->OwnerThread()),
+DecodedStream::DecodedStream(AbstractThread* aOwnerThread,
+                             AbstractThread* aMainThread,
+                             MediaQueue<AudioData>& aAudioQueue,
+                             MediaQueue<VideoData>& aVideoQueue,
+                             OutputStreamManager* aOutputStreamManager)
+    : mOwnerThread(aOwnerThread),
+      mAbstractMainThread(aMainThread),
+      mOutputStreamManager(aOutputStreamManager),
       mWatchManager(this, mOwnerThread),
       mPlaying(false, "DecodedStream::mPlaying"),
-      mPrincipalHandle(aStateMachine->OwnerThread(), PRINCIPAL_HANDLE_NONE,
+      mPrincipalHandle(aOwnerThread, PRINCIPAL_HANDLE_NONE,
                        "DecodedStream::mPrincipalHandle (Mirror)"),
-      mOutputTracks(std::move(aOutputTracks)),
-      mVolume(aVolume),
-      mPlaybackRate(aPlaybackRate),
-      mPreservesPitch(aPreservesPitch),
       mAudioQueue(aAudioQueue),
       mVideoQueue(aVideoQueue) {
-  mPrincipalHandle.Connect(aStateMachine->CanonicalOutputPrincipal());
+  mPrincipalHandle.Connect(mOutputStreamManager->CanonicalPrincipalHandle());
 
   mWatchManager.Watch(mPlaying, &DecodedStream::PlayingChanged);
+  PlayingChanged();  // Notify of the initial state
 }
 
 DecodedStream::~DecodedStream() {
   MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended.");
+  NS_ProxyRelease("DecodedStream::mOutputStreamManager", mAbstractMainThread,
+                  do_AddRef(mOutputStreamManager));
+}
+
+const MediaSink::PlaybackParams& DecodedStream::GetPlaybackParams() const {
+  AssertOwnerThread();
+  return mParams;
+}
+
+void DecodedStream::SetPlaybackParams(const PlaybackParams& aParams) {
+  AssertOwnerThread();
+  mParams = aParams;
 }
 
 RefPtr<DecodedStream::EndedPromise> DecodedStream::OnEnded(TrackType aType) {
   AssertOwnerThread();
   MOZ_ASSERT(mStartTime.isSome());
 
   if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio()) {
     return mAudioEndedPromise;
@@ -397,87 +388,89 @@ RefPtr<DecodedStream::EndedPromise> Deco
   }
   return nullptr;
 }
 
 nsresult DecodedStream::Start(const TimeUnit& aStartTime,
                               const MediaInfo& aInfo) {
   AssertOwnerThread();
   MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");
-  MOZ_DIAGNOSTIC_ASSERT(!mOutputTracks.IsEmpty());
 
   mStartTime.emplace(aStartTime);
   mLastOutputTime = TimeUnit::Zero();
   mInfo = aInfo;
   mPlaying = true;
   ConnectListener();
 
   class R : public Runnable {
     typedef MozPromiseHolder<MediaSink::EndedPromise> Promise;
 
    public:
-    R(PlaybackInfoInit&& aInit,
-      nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
-      Promise&& aAudioEndedPromise, Promise&& aVideoEndedPromise)
+    R(PlaybackInfoInit&& aInit, Promise&& aAudioEndedPromise,
+      Promise&& aVideoEndedPromise, OutputStreamManager* aManager,
+      AbstractThread* aMainThread)
         : Runnable("CreateDecodedStreamData"),
           mInit(std::move(aInit)),
-          mOutputTracks(std::move(aOutputTracks)),
           mAudioEndedPromise(std::move(aAudioEndedPromise)),
-          mVideoEndedPromise(std::move(aVideoEndedPromise)) {}
+          mVideoEndedPromise(std::move(aVideoEndedPromise)),
+          mOutputStreamManager(aManager),
+          mAbstractMainThread(aMainThread) {}
     NS_IMETHOD Run() override {
       MOZ_ASSERT(NS_IsMainThread());
-      RefPtr<ProcessedMediaTrack> audioOutputTrack;
-      RefPtr<ProcessedMediaTrack> videoOutputTrack;
-      for (const auto& track : mOutputTracks) {
-        if (track->mType == MediaSegment::AUDIO) {
-          MOZ_DIAGNOSTIC_ASSERT(
-              !audioOutputTrack,
-              "We only support capturing to one output track per kind");
-          audioOutputTrack = track;
-        } else if (track->mType == MediaSegment::VIDEO) {
-          MOZ_DIAGNOSTIC_ASSERT(
-              !videoOutputTrack,
-              "We only support capturing to one output track per kind");
-          videoOutputTrack = track;
-        } else {
-          MOZ_CRASH("Unknown media type");
-        }
-      }
-      if ((!audioOutputTrack && !videoOutputTrack) ||
-          (audioOutputTrack && audioOutputTrack->IsDestroyed()) ||
-          (videoOutputTrack && videoOutputTrack->IsDestroyed())) {
-        // No output tracks yet, or they're going away. Halt playback by not
-        // creating DecodedStreamData. MDSM will try again with a new
-        // DecodedStream sink when tracks are available.
+      // No need to create a source track when there are no output tracks.
+      // This happens when RemoveOutput() is called immediately after
+      // StartPlayback().
+      if (mOutputStreamManager->IsEmpty()) {
+        // Resolve the promise to indicate the end of playback.
+        mAudioEndedPromise.Resolve(true, __func__);
+        mVideoEndedPromise.Resolve(true, __func__);
         return NS_OK;
       }
+      RefPtr<SourceMediaTrack> audioStream =
+          mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::AUDIO);
+      if (mInit.mInfo.HasAudio() && !audioStream) {
+        MOZ_DIAGNOSTIC_ASSERT(
+            !mOutputStreamManager->HasTrackType(MediaSegment::AUDIO));
+        audioStream = mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
+      }
+      if (audioStream) {
+        audioStream->SetAppendDataSourceRate(mInit.mInfo.mAudio.mRate);
+      }
+      RefPtr<SourceMediaTrack> videoStream =
+          mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::VIDEO);
+      if (mInit.mInfo.HasVideo() && !videoStream) {
+        MOZ_DIAGNOSTIC_ASSERT(
+            !mOutputStreamManager->HasTrackType(MediaSegment::VIDEO));
+        videoStream = mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
+      }
       mData = MakeUnique<DecodedStreamData>(
-          std::move(mInit), mOutputTracks[0]->Graph(),
-          std::move(audioOutputTrack), std::move(videoOutputTrack),
-          std::move(mAudioEndedPromise), std::move(mVideoEndedPromise));
+          mOutputStreamManager, std::move(mInit), std::move(audioStream),
+          std::move(videoStream), std::move(mAudioEndedPromise),
+          std::move(mVideoEndedPromise), mAbstractMainThread);
       return NS_OK;
     }
     UniquePtr<DecodedStreamData> ReleaseData() { return std::move(mData); }
 
    private:
     PlaybackInfoInit mInit;
-    const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
     Promise mAudioEndedPromise;
     Promise mVideoEndedPromise;
+    RefPtr<OutputStreamManager> mOutputStreamManager;
     UniquePtr<DecodedStreamData> mData;
+    const RefPtr<AbstractThread> mAbstractMainThread;
   };
 
   MozPromiseHolder<DecodedStream::EndedPromise> audioEndedHolder;
   mAudioEndedPromise = audioEndedHolder.Ensure(__func__);
   MozPromiseHolder<DecodedStream::EndedPromise> videoEndedHolder;
   mVideoEndedPromise = videoEndedHolder.Ensure(__func__);
   PlaybackInfoInit init{aStartTime, aInfo};
-  nsCOMPtr<nsIRunnable> r = new R(
-      std::move(init), nsTArray<RefPtr<ProcessedMediaTrack>>(mOutputTracks),
-      std::move(audioEndedHolder), std::move(videoEndedHolder));
+  nsCOMPtr<nsIRunnable> r = new R(std::move(init), std::move(audioEndedHolder),
+                                  std::move(videoEndedHolder),
+                                  mOutputStreamManager, mAbstractMainThread);
   SyncRunnable::DispatchToThread(
       SystemGroup::EventTargetFor(TaskCategory::Other), r);
   mData = static_cast<R*>(r.get())->ReleaseData();
 
   if (mData) {
     mOutputListener = mData->OnOutput().Connect(mOwnerThread, this,
                                                 &DecodedStream::NotifyOutput);
     SendData();
@@ -520,50 +513,48 @@ void DecodedStream::DestroyData(UniquePt
   AssertOwnerThread();
 
   if (!aData) {
     return;
   }
 
   mOutputListener.Disconnect();
 
-  NS_DispatchToMainThread(
-      NS_NewRunnableFunction("DecodedStream::DestroyData",
-                             [data = std::move(aData)]() { data->Forget(); }));
+  NS_DispatchToMainThread(NS_NewRunnableFunction(
+      "DecodedStream::DestroyData",
+      [data = std::move(aData), manager = mOutputStreamManager]() {
+        data->Forget();
+        manager->RemoveTracks();
+      }));
 }
 
 void DecodedStream::SetPlaying(bool aPlaying) {
   AssertOwnerThread();
 
   // Resume/pause matters only when playback started.
   if (mStartTime.isNothing()) {
     return;
   }
 
   mPlaying = aPlaying;
 }
 
 void DecodedStream::SetVolume(double aVolume) {
   AssertOwnerThread();
-  mVolume = aVolume;
+  mParams.mVolume = aVolume;
 }
 
 void DecodedStream::SetPlaybackRate(double aPlaybackRate) {
   AssertOwnerThread();
-  mPlaybackRate = aPlaybackRate;
+  mParams.mPlaybackRate = aPlaybackRate;
 }
 
 void DecodedStream::SetPreservesPitch(bool aPreservesPitch) {
   AssertOwnerThread();
-  mPreservesPitch = aPreservesPitch;
-}
-
-double DecodedStream::PlaybackRate() const {
-  AssertOwnerThread();
-  return mPlaybackRate;
+  mParams.mPreservesPitch = aPreservesPitch;
 }
 
 static void SendStreamAudio(DecodedStreamData* aStream,
                             const TimeUnit& aStartTime, AudioData* aData,
                             AudioSegment* aOutput, uint32_t aRate,
                             const PrincipalHandle& aPrincipalHandle) {
   // The amount of audio frames that is used to fuzz rounding errors.
   static const int64_t AUDIO_FUZZ_FRAMES = 1;
@@ -632,34 +623,35 @@ void DecodedStream::SendAudio(double aVo
                     aPrincipalHandle);
   }
 
   output.ApplyVolume(aVolume);
 
   // |mNextAudioTime| is updated as we process each audio sample in
   // SendStreamAudio().
   if (output.GetDuration() > 0) {
-    mData->mAudioTrackWritten += mData->mAudioTrack->AppendData(&output);
+    mData->mAudioStreamWritten += mData->mAudioStream->AppendData(&output);
   }
 
   if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
-    mData->mListener->EndTrackAt(mData->mAudioTrack, mData->mAudioTrackWritten);
+    mData->mListener->EndTrackAt(mData->mAudioStream,
+                                 mData->mAudioStreamWritten);
     mData->mHaveSentFinishAudio = true;
   }
 }
 
 void DecodedStreamData::WriteVideoToSegment(
     layers::Image* aImage, const TimeUnit& aStart, const TimeUnit& aEnd,
     const gfx::IntSize& aIntrinsicSize, const TimeStamp& aTimeStamp,
     VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle) {
   RefPtr<layers::Image> image = aImage;
   auto end =
-      mVideoTrack->MicrosecondsToTrackTimeRoundDown(aEnd.ToMicroseconds());
+      mVideoStream->MicrosecondsToTrackTimeRoundDown(aEnd.ToMicroseconds());
   auto start =
-      mVideoTrack->MicrosecondsToTrackTimeRoundDown(aStart.ToMicroseconds());
+      mVideoStream->MicrosecondsToTrackTimeRoundDown(aStart.ToMicroseconds());
   aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false,
                        aTimeStamp);
   // Extend this so we get accurate durations for all frames.
   // Because this track is pushed, we need durations so the graph can track
   // when playout of the track has finished.
   aOutput->ExtendLastFrameBy(end - start);
 
   mLastVideoStartTime = Some(aStart);
@@ -695,17 +687,17 @@ void DecodedStream::ResetVideo(const Pri
   // nullptr) at an earlier time than the previous, will signal to that consumer
   // to discard any frames ahead in time of the new frame. To be honest, this is
   // an ugly hack because the direct listeners of the MediaTrackGraph do not
   // have an API that supports clearing the future frames. ImageContainer and
   // VideoFrameContainer do though, and we will need to move to a similar API
   // for video tracks as part of bug 1493618.
   resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize,
                        aPrincipalHandle, false, currentTime);
-  mData->mVideoTrack->AppendData(&resetter);
+  mData->mVideoStream->AppendData(&resetter);
 
   // Consumer buffers have been reset. We now set the next time to the start
   // time of the current frame, so that it can be displayed again on resuming.
   if (RefPtr<VideoData> v = mVideoQueue.PeekFront()) {
     mData->mLastVideoStartTime = Some(v->mTime - TimeUnit::FromMicroseconds(1));
     mData->mLastVideoEndTime = Some(v->mTime);
   } else {
     // There was no current frame in the queue. We set the next time to the
@@ -775,33 +767,33 @@ void DecodedStream::SendVideo(const Prin
       // the track's lifetime in the MTG, as rendering is based on timestamps,
       // aka frame start times.
       TimeStamp t =
           std::max(mData->mLastVideoTimeStamp,
                    currentTime + (lastEnd - currentPosition).ToTimeDuration());
       TimeUnit end = std::max(
           v->GetEndTime(),
           lastEnd + TimeUnit::FromMicroseconds(
-                        mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1));
+                        mData->mVideoStream->TrackTimeToMicroseconds(1) + 1));
       mData->mLastVideoImage = v->mImage;
       mData->mLastVideoImageDisplaySize = v->mDisplay;
       mData->WriteVideoToSegment(v->mImage, lastEnd, end, v->mDisplay, t,
                                  &output, aPrincipalHandle);
     }
   }
 
   // Check the output is not empty.
   bool compensateEOS = false;
   bool forceBlack = false;
   if (output.GetLastFrame()) {
     compensateEOS = ZeroDurationAtLastChunk(output);
   }
 
   if (output.GetDuration() > 0) {
-    mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&output);
+    mData->mVideoStreamWritten += mData->mVideoStream->AppendData(&output);
   }
 
   if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
     if (!mData->mLastVideoImage) {
       // We have video, but the video queue finished before we received any
       // frame. We insert a black frame to progress any consuming
       // HTMLMediaElement. This mirrors the behavior of VideoSink.
 
@@ -813,47 +805,49 @@ void DecodedStream::SendVideo(const Prin
       mData->mLastVideoImageDisplaySize = mInfo.mVideo.mDisplay;
     }
     if (compensateEOS) {
       VideoSegment endSegment;
       // Calculate the deviation clock time from DecodedStream.
       // We round the nr of microseconds up, because WriteVideoToSegment
       // will round the conversion from microseconds to TrackTime down.
       auto deviation = TimeUnit::FromMicroseconds(
-          mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1);
+          mData->mVideoStream->TrackTimeToMicroseconds(1) + 1);
       auto start = mData->mLastVideoEndTime.valueOr(mStartTime.ref());
       mData->WriteVideoToSegment(
           mData->mLastVideoImage, start, start + deviation,
           mData->mLastVideoImageDisplaySize,
           currentTime + (start + deviation - currentPosition).ToTimeDuration(),
           &endSegment, aPrincipalHandle);
       MOZ_ASSERT(endSegment.GetDuration() > 0);
       if (forceBlack) {
         endSegment.ReplaceWithDisabled();
       }
-      mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&endSegment);
+      mData->mVideoStreamWritten +=
+          mData->mVideoStream->AppendData(&endSegment);
     }
-    mData->mListener->EndTrackAt(mData->mVideoTrack, mData->mVideoTrackWritten);
+    mData->mListener->EndTrackAt(mData->mVideoStream,
+                                 mData->mVideoStreamWritten);
     mData->mHaveSentFinishVideo = true;
   }
 }
 
 void DecodedStream::SendData() {
   AssertOwnerThread();
 
   // Not yet created on the main thread. MDSM will try again later.
   if (!mData) {
     return;
   }
 
   if (!mPlaying) {
     return;
   }
 
-  SendAudio(mVolume, mPrincipalHandle);
+  SendAudio(mParams.mVolume, mPrincipalHandle);
   SendVideo(mPrincipalHandle);
 }
 
 TimeUnit DecodedStream::GetEndTime(TrackType aType) const {
   AssertOwnerThread();
   if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) {
     auto t = mStartTime.ref() +
              FramesToTimeUnit(mData->mAudioFramesWritten, mInfo.mAudio.mRate);
@@ -897,16 +891,20 @@ void DecodedStream::NotifyOutput(int64_t
 
 void DecodedStream::PlayingChanged() {
   AssertOwnerThread();
 
   if (!mPlaying) {
     // On seek or pause we discard future frames.
     ResetVideo(mPrincipalHandle);
   }
+
+  mAbstractMainThread->Dispatch(NewRunnableMethod<bool>(
+      "OutputStreamManager::SetPlaying", mOutputStreamManager,
+      &OutputStreamManager::SetPlaying, mPlaying));
 }
 
 void DecodedStream::ConnectListener() {
   AssertOwnerThread();
 
   mAudioPushListener = mAudioQueue.PushEvent().Connect(
       mOwnerThread, this, &DecodedStream::SendData);
   mAudioFinishListener = mAudioQueue.FinishEvent().Connect(
--- a/dom/media/mediasink/DecodedStream.h
+++ b/dom/media/mediasink/DecodedStream.h
@@ -17,49 +17,52 @@
 #include "mozilla/MozPromise.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/StateMirroring.h"
 #include "mozilla/UniquePtr.h"
 
 namespace mozilla {
 
 class DecodedStreamData;
-class MediaDecoderStateMachine;
 class AudioData;
 class VideoData;
+class OutputStreamManager;
 struct PlaybackInfoInit;
 class ProcessedMediaTrack;
 class TimeStamp;
 
 template <class T>
 class MediaQueue;
 
 class DecodedStream : public MediaSink {
+  using MediaSink::PlaybackParams;
+
  public:
-  DecodedStream(MediaDecoderStateMachine* aStateMachine,
-                nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
-                double aVolume, double aPlaybackRate, bool aPreservesPitch,
+  DecodedStream(AbstractThread* aOwnerThread, AbstractThread* aMainThread,
                 MediaQueue<AudioData>& aAudioQueue,
-                MediaQueue<VideoData>& aVideoQueue);
+                MediaQueue<VideoData>& aVideoQueue,
+                OutputStreamManager* aOutputStreamManager);
+
+  // MediaSink functions.
+  const PlaybackParams& GetPlaybackParams() const override;
+  void SetPlaybackParams(const PlaybackParams& aParams) override;
 
   RefPtr<EndedPromise> OnEnded(TrackType aType) override;
   media::TimeUnit GetEndTime(TrackType aType) const override;
   media::TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
   bool HasUnplayedFrames(TrackType aType) const override {
     // TODO: implement this.
     return false;
   }
 
   void SetVolume(double aVolume) override;
   void SetPlaybackRate(double aPlaybackRate) override;
   void SetPreservesPitch(bool aPreservesPitch) override;
   void SetPlaying(bool aPlaying) override;
 
-  double PlaybackRate() const override;
-
   nsresult Start(const media::TimeUnit& aStartTime,
                  const MediaInfo& aInfo) override;
   void Stop() override;
   bool IsStarted() const override;
   bool IsPlaying() const override;
   void Shutdown() override;
   void GetDebugInfo(dom::MediaSinkDebugInfo& aInfo) override;
 
@@ -80,31 +83,36 @@ class DecodedStream : public MediaSink {
 
   void PlayingChanged();
 
   void ConnectListener();
   void DisconnectListener();
 
   const RefPtr<AbstractThread> mOwnerThread;
 
+  const RefPtr<AbstractThread> mAbstractMainThread;
+
+  /*
+   * Main thread only members.
+   */
+  // Data about MediaStreams that are being fed by the decoder.
+  const RefPtr<OutputStreamManager> mOutputStreamManager;
+
   /*
    * Worker thread only members.
    */
   WatchManager<DecodedStream> mWatchManager;
   UniquePtr<DecodedStreamData> mData;
   RefPtr<EndedPromise> mAudioEndedPromise;
   RefPtr<EndedPromise> mVideoEndedPromise;
 
   Watchable<bool> mPlaying;
   Mirror<PrincipalHandle> mPrincipalHandle;
-  const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
 
-  double mVolume;
-  double mPlaybackRate;
-  bool mPreservesPitch;
+  PlaybackParams mParams;
 
   media::NullableTimeUnit mStartTime;
   media::TimeUnit mLastOutputTime;
   MediaInfo mInfo;
 
   MediaQueue<AudioData>& mAudioQueue;
   MediaQueue<VideoData>& mVideoQueue;
 
--- a/dom/media/mediasink/MediaSink.h
+++ b/dom/media/mediasink/MediaSink.h
@@ -2,16 +2,17 @@
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MediaSink_h_
 #define MediaSink_h_
 
+#include "AudioDeviceInfo.h"
 #include "MediaInfo.h"
 #include "mozilla/MozPromise.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/dom/MediaDebugInfoBinding.h"
 #include "nsISupportsImpl.h"
 
 namespace mozilla {
 
@@ -33,16 +34,33 @@ class VideoFrameContainer;
  * Note this class is not thread-safe and should be called from the state
  * machine thread only.
  */
 class MediaSink {
  public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSink);
   typedef mozilla::TrackInfo::TrackType TrackType;
 
+  struct PlaybackParams {
+    PlaybackParams()
+        : mVolume(1.0), mPlaybackRate(1.0), mPreservesPitch(true) {}
+    double mVolume;
+    double mPlaybackRate;
+    bool mPreservesPitch;
+    RefPtr<AudioDeviceInfo> mSink;
+  };
+
+  // Return the playback parameters of this sink.
+  // Can be called in any state.
+  virtual const PlaybackParams& GetPlaybackParams() const = 0;
+
+  // Set the playback parameters of this sink.
+  // Can be called in any state.
+  virtual void SetPlaybackParams(const PlaybackParams& aParams) = 0;
+
   // EndedPromise needs to be a non-exclusive promise as it is shared between
   // both the AudioSink and VideoSink.
   typedef MozPromise<bool, nsresult, /* IsExclusive = */ false> EndedPromise;
 
   // Return a promise which is resolved when the track finishes
   // or null if no such track.
   // Must be called after playback starts.
   virtual RefPtr<EndedPromise> OnEnded(TrackType aType) = 0;
@@ -77,20 +95,16 @@ class MediaSink {
   // Whether to preserve pitch of the audio track.
   // Do nothing if this sink has no audio track.
   // Can be called in any state.
   virtual void SetPreservesPitch(bool aPreservesPitch) {}
 
   // Pause/resume the playback. Only work after playback starts.
   virtual void SetPlaying(bool aPlaying) = 0;
 
-  // Get the playback rate.
-  // Can be called in any state.
-  virtual double PlaybackRate() const = 0;
-
   // Single frame rendering operation may need to be done before playback
   // started (1st frame) or right after seek completed or playback stopped.
   // Do nothing if this sink has no video track. Can be called in any state.
   virtual void Redraw(const VideoInfo& aInfo){};
 
   // Begin a playback session with the provided start time and media info.
   // Must be called when playback is stopped.
   virtual nsresult Start(const media::TimeUnit& aStartTime,
@@ -103,20 +117,16 @@ class MediaSink {
   // Return true if playback has started.
   // Can be called in any state.
   virtual bool IsStarted() const = 0;
 
   // Return true if playback is started and not paused otherwise false.
   // Can be called in any state.
   virtual bool IsPlaying() const = 0;
 
-  // The audio output device this MediaSink is playing audio data to. The
-  // default device is used if this returns null.
-  virtual const AudioDeviceInfo* AudioDevice() { return nullptr; }
-
   // Called on the state machine thread to shut down the sink. All resources
   // allocated by this sink should be released.
   // Must be called after playback stopped.
   virtual void Shutdown() {}
 
   virtual void SetSecondaryVideoContainer(VideoFrameContainer* aSecondary) {}
   virtual void ClearSecondaryVideoContainer() {}
 
new file mode 100644
--- /dev/null
+++ b/dom/media/mediasink/OutputStreamManager.cpp
@@ -0,0 +1,357 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "OutputStreamManager.h"
+
+#include "DOMMediaStream.h"
+#include "../MediaTrackGraph.h"
+#include "mozilla/dom/MediaStreamTrack.h"
+#include "mozilla/dom/AudioStreamTrack.h"
+#include "mozilla/dom/VideoStreamTrack.h"
+#include "nsContentUtils.h"
+
+namespace mozilla {
+
+#define LOG(level, msg, ...) \
+  MOZ_LOG(gMediaDecoderLog, level, (msg, ##__VA_ARGS__))
+
+class DecodedStreamTrackSource : public dom::MediaStreamTrackSource {
+ public:
+  NS_DECL_ISUPPORTS_INHERITED
+  NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(DecodedStreamTrackSource,
+                                           dom::MediaStreamTrackSource)
+
+  explicit DecodedStreamTrackSource(SourceMediaTrack* aSourceStream,
+                                    nsIPrincipal* aPrincipal)
+      : dom::MediaStreamTrackSource(aPrincipal, nsString()),
+        mTrack(aSourceStream->Graph()->CreateForwardedInputTrack(
+            aSourceStream->mType)),
+        mPort(mTrack->AllocateInputPort(aSourceStream)) {
+    MOZ_ASSERT(NS_IsMainThread());
+  }
+
+  dom::MediaSourceEnum GetMediaSource() const override {
+    return dom::MediaSourceEnum::Other;
+  }
+
+  void Stop() override {
+    MOZ_ASSERT(NS_IsMainThread());
+
+    // We don't notify the source that a track was stopped since it will keep
+    // producing tracks until the element ends. The decoder also needs the
+    // tracks it created to be live at the source since the decoder's clock is
+    // based on MediaStreams during capture. We do however, disconnect this
+    // track's underlying track.
+    if (!mTrack->IsDestroyed()) {
+      mTrack->Destroy();
+      mPort->Destroy();
+    }
+  }
+
+  void Disable() override {}
+
+  void Enable() override {}
+
+  void SetPrincipal(nsIPrincipal* aPrincipal) {
+    MOZ_ASSERT(NS_IsMainThread());
+    mPrincipal = aPrincipal;
+    PrincipalChanged();
+  }
+
+  void ForceEnded() { OverrideEnded(); }
+
+  const RefPtr<ProcessedMediaTrack> mTrack;
+  const RefPtr<MediaInputPort> mPort;
+
+ protected:
+  virtual ~DecodedStreamTrackSource() {
+    MOZ_ASSERT(NS_IsMainThread());
+    MOZ_ASSERT(mTrack->IsDestroyed());
+  }
+};
+
+NS_IMPL_ADDREF_INHERITED(DecodedStreamTrackSource, dom::MediaStreamTrackSource)
+NS_IMPL_RELEASE_INHERITED(DecodedStreamTrackSource, dom::MediaStreamTrackSource)
+NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(DecodedStreamTrackSource)
+NS_INTERFACE_MAP_END_INHERITING(dom::MediaStreamTrackSource)
+NS_IMPL_CYCLE_COLLECTION_INHERITED(DecodedStreamTrackSource,
+                                   dom::MediaStreamTrackSource)
+
+OutputStreamData::OutputStreamData(OutputStreamManager* aManager,
+                                   AbstractThread* aAbstractMainThread,
+                                   DOMMediaStream* aDOMStream)
+    : mManager(aManager),
+      mAbstractMainThread(aAbstractMainThread),
+      mDOMStream(aDOMStream) {
+  MOZ_ASSERT(NS_IsMainThread());
+}
+
+OutputStreamData::~OutputStreamData() = default;
+
+void OutputStreamData::AddTrack(SourceMediaTrack* aTrack,
+                                MediaSegment::Type aType,
+                                nsIPrincipal* aPrincipal, bool aAsyncAddTrack) {
+  MOZ_ASSERT(NS_IsMainThread());
+  MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
+
+  LOG(LogLevel::Debug,
+      "Adding output %s track sourced from track %p to MediaStream %p%s",
+      aType == MediaSegment::AUDIO ? "audio" : "video", aTrack,
+      mDOMStream.get(), aAsyncAddTrack ? " (async)" : "");
+
+  auto source = MakeRefPtr<DecodedStreamTrackSource>(aTrack, aPrincipal);
+  RefPtr<dom::MediaStreamTrack> track;
+  if (aType == MediaSegment::AUDIO) {
+    track = new dom::AudioStreamTrack(mDOMStream->GetParentObject(),
+                                      source->mTrack, source);
+  } else {
+    MOZ_ASSERT(aType == MediaSegment::VIDEO);
+    track = new dom::VideoStreamTrack(mDOMStream->GetParentObject(),
+                                      source->mTrack, source);
+  }
+  mTracks.AppendElement(track.get());
+  if (aAsyncAddTrack) {
+    GetMainThreadEventTarget()->Dispatch(
+        NewRunnableMethod<RefPtr<dom::MediaStreamTrack>>(
+            "DOMMediaStream::AddTrackInternal", mDOMStream.get(),
+            &DOMMediaStream::AddTrackInternal, track));
+  } else {
+    mDOMStream->AddTrackInternal(track);
+  }
+}
+
+void OutputStreamData::RemoveTrack(SourceMediaTrack* aTrack) {
+  MOZ_ASSERT(NS_IsMainThread());
+  MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
+
+  LOG(LogLevel::Debug,
+      "Removing output track sourced by track %p from MediaStream %p", aTrack,
+      mDOMStream.get());
+
+  for (const auto& t : nsTArray<WeakPtr<dom::MediaStreamTrack>>(mTracks)) {
+    mTracks.RemoveElement(t);
+    if (!t || t->Ended()) {
+      continue;
+    }
+    DecodedStreamTrackSource& source =
+        static_cast<DecodedStreamTrackSource&>(t->GetSource());
+    GetMainThreadEventTarget()->Dispatch(
+        NewRunnableMethod("DecodedStreamTrackSource::ForceEnded", &source,
+                          &DecodedStreamTrackSource::ForceEnded));
+  }
+}
+
+void OutputStreamData::SetPrincipal(nsIPrincipal* aPrincipal) {
+  MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
+  for (const WeakPtr<dom::MediaStreamTrack>& track : mTracks) {
+    if (!track || track->Ended()) {
+      continue;
+    }
+    DecodedStreamTrackSource& source =
+        static_cast<DecodedStreamTrackSource&>(track->GetSource());
+    source.SetPrincipal(aPrincipal);
+  }
+}
+
+OutputStreamManager::OutputStreamManager(SharedDummyTrack* aDummyStream,
+                                         nsIPrincipal* aPrincipal,
+                                         AbstractThread* aAbstractMainThread)
+    : mAbstractMainThread(aAbstractMainThread),
+      mDummyStream(aDummyStream),
+      mPrincipalHandle(
+          aAbstractMainThread,
+          aPrincipal ? MakePrincipalHandle(aPrincipal) : PRINCIPAL_HANDLE_NONE,
+          "OutputStreamManager::mPrincipalHandle (Canonical)") {
+  MOZ_ASSERT(NS_IsMainThread());
+}
+
+void OutputStreamManager::Add(DOMMediaStream* aDOMStream) {
+  MOZ_ASSERT(NS_IsMainThread());
+
+  LOG(LogLevel::Info, "Adding MediaStream %p", aDOMStream);
+
+  OutputStreamData* p = mStreams
+                            .AppendElement(new OutputStreamData(
+                                this, mAbstractMainThread, aDOMStream))
+                            ->get();
+  for (const auto& lt : mLiveTracks) {
+    p->AddTrack(lt->mSourceTrack, lt->mType, mPrincipalHandle.Ref(), false);
+  }
+}
+
+void OutputStreamManager::Remove(DOMMediaStream* aDOMStream) {
+  MOZ_ASSERT(NS_IsMainThread());
+
+  LOG(LogLevel::Info, "Removing MediaStream %p", aDOMStream);
+
+  AutoRemoveDestroyedStreams();
+  mStreams.ApplyIf(
+      aDOMStream, 0, StreamComparator(),
+      [&](const UniquePtr<OutputStreamData>& aData) {
+        for (const auto& lt : mLiveTracks) {
+          aData->RemoveTrack(lt->mSourceTrack);
+        }
+      },
+      []() { MOZ_ASSERT_UNREACHABLE("Didn't exist"); });
+  DebugOnly<bool> rv = mStreams.RemoveElement(aDOMStream, StreamComparator());
+  MOZ_ASSERT(rv);
+}
+
+bool OutputStreamManager::HasTrackType(MediaSegment::Type aType) {
+  MOZ_ASSERT(NS_IsMainThread());
+
+  return mLiveTracks.Contains(aType, TrackTypeComparator());
+}
+
+bool OutputStreamManager::HasTracks(SourceMediaTrack* aAudioStream,
+                                    SourceMediaTrack* aVideoStream) {
+  MOZ_ASSERT(NS_IsMainThread());
+
+  size_t nrExpectedTracks = 0;
+  bool asExpected = true;
+  if (aAudioStream) {
+    Unused << ++nrExpectedTracks;
+    asExpected = asExpected && mLiveTracks.Contains(
+                                   MakePair(aAudioStream, MediaSegment::AUDIO),
+                                   TrackComparator());
+  }
+  if (aVideoStream) {
+    Unused << ++nrExpectedTracks;
+    asExpected = asExpected && mLiveTracks.Contains(
+                                   MakePair(aVideoStream, MediaSegment::VIDEO),
+                                   TrackComparator());
+  }
+  asExpected = asExpected && mLiveTracks.Length() == nrExpectedTracks;
+  return asExpected;
+}
+
+SourceMediaTrack* OutputStreamManager::GetPrecreatedTrackOfType(
+    MediaSegment::Type aType) const {
+  auto i = mLiveTracks.IndexOf(aType, 0, PrecreatedTrackTypeComparator());
+  return i == nsTArray<UniquePtr<LiveTrack>>::NoIndex
+             ? nullptr
+             : mLiveTracks[i]->mSourceTrack.get();
+}
+
+size_t OutputStreamManager::NumberOfTracks() {
+  MOZ_ASSERT(NS_IsMainThread());
+  return mLiveTracks.Length();
+}
+
+already_AddRefed<SourceMediaTrack> OutputStreamManager::AddTrack(
+    MediaSegment::Type aType) {
+  MOZ_ASSERT(NS_IsMainThread());
+  MOZ_ASSERT(!HasTrackType(aType),
+             "Cannot have two tracks of the same type at the same time");
+
+  RefPtr<SourceMediaTrack> track =
+      mDummyStream->mTrack->Graph()->CreateSourceTrack(aType);
+  if (!mPlaying) {
+    track->Suspend();
+  }
+
+  LOG(LogLevel::Info, "Adding %s track sourced by track %p",
+      aType == MediaSegment::AUDIO ? "audio" : "video", track.get());
+
+  mLiveTracks.AppendElement(MakeUnique<LiveTrack>(track, aType));
+  AutoRemoveDestroyedStreams();
+  for (const auto& data : mStreams) {
+    data->AddTrack(track, aType, mPrincipalHandle.Ref(), true);
+  }
+
+  return track.forget();
+}
+
+OutputStreamManager::LiveTrack::LiveTrack(SourceMediaTrack* aSourceTrack,
+                                          MediaSegment::Type aType)
+    : mSourceTrack(aSourceTrack), mType(aType) {}
+
+OutputStreamManager::LiveTrack::~LiveTrack() { mSourceTrack->Destroy(); }
+
+void OutputStreamManager::AutoRemoveDestroyedStreams() {
+  MOZ_ASSERT(NS_IsMainThread());
+  for (size_t i = mStreams.Length(); i > 0; --i) {
+    const auto& data = mStreams[i - 1];
+    if (!data->mDOMStream) {
+      // If the mDOMStream WeakPtr is now null, mDOMStream has been destructed.
+      mStreams.RemoveElementAt(i - 1);
+    }
+  }
+}
+
+void OutputStreamManager::RemoveTrack(SourceMediaTrack* aTrack) {
+  MOZ_ASSERT(NS_IsMainThread());
+  LOG(LogLevel::Info, "Removing track with source track %p", aTrack);
+  DebugOnly<bool> rv =
+      mLiveTracks.RemoveElement(aTrack, TrackStreamComparator());
+  MOZ_ASSERT(rv);
+  AutoRemoveDestroyedStreams();
+  for (const auto& data : mStreams) {
+    data->RemoveTrack(aTrack);
+  }
+}
+
+void OutputStreamManager::RemoveTracks() {
+  MOZ_ASSERT(NS_IsMainThread());
+  for (size_t i = mLiveTracks.Length(); i > 0; --i) {
+    RemoveTrack(mLiveTracks[i - 1]->mSourceTrack);
+  }
+}
+
+void OutputStreamManager::Disconnect() {
+  MOZ_ASSERT(NS_IsMainThread());
+  RemoveTracks();
+  MOZ_ASSERT(mLiveTracks.IsEmpty());
+  AutoRemoveDestroyedStreams();
+  nsTArray<RefPtr<DOMMediaStream>> domStreams(mStreams.Length());
+  for (const auto& data : mStreams) {
+    domStreams.AppendElement(data->mDOMStream);
+  }
+  for (auto& domStream : domStreams) {
+    Remove(domStream);
+  }
+  MOZ_ASSERT(mStreams.IsEmpty());
+}
+
+AbstractCanonical<PrincipalHandle>*
+OutputStreamManager::CanonicalPrincipalHandle() {
+  return &mPrincipalHandle;
+}
+
+void OutputStreamManager::SetPrincipal(nsIPrincipal* aPrincipal) {
+  MOZ_ASSERT(NS_IsMainThread());
+  nsCOMPtr<nsIPrincipal> principal = GetPrincipalFromHandle(mPrincipalHandle);
+  if (nsContentUtils::CombineResourcePrincipals(&principal, aPrincipal)) {
+    AutoRemoveDestroyedStreams();
+    for (const UniquePtr<OutputStreamData>& data : mStreams) {
+      data->SetPrincipal(principal);
+    }
+    mPrincipalHandle = MakePrincipalHandle(principal);
+  }
+}
+
+void OutputStreamManager::SetPlaying(bool aPlaying) {
+  MOZ_ASSERT(NS_IsMainThread());
+  if (mPlaying == aPlaying) {
+    return;
+  }
+
+  mPlaying = aPlaying;
+  for (auto& lt : mLiveTracks) {
+    if (mPlaying) {
+      lt->mSourceTrack->Resume();
+      lt->mEverPlayed = true;
+    } else {
+      lt->mSourceTrack->Suspend();
+    }
+  }
+}
+
+OutputStreamManager::~OutputStreamManager() = default;
+
+#undef LOG
+
+}  // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/dom/media/mediasink/OutputStreamManager.h
@@ -0,0 +1,161 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef OutputStreamManager_h
+#define OutputStreamManager_h
+
+#include "mozilla/RefPtr.h"
+#include "mozilla/StateMirroring.h"
+#include "mozilla/WeakPtr.h"
+#include "nsTArray.h"
+
+namespace mozilla {
+
+class DOMMediaStream;
+class MediaInputPort;
+class OutputStreamManager;
+class ProcessedMediaTrack;
+class SourceMediaTrack;
+
+namespace dom {
+class MediaStreamTrack;
+}
+
+class OutputStreamData {
+ public:
+  OutputStreamData(OutputStreamManager* aManager,
+                   AbstractThread* aAbstractMainThread,
+                   DOMMediaStream* aDOMStream);
+  OutputStreamData(const OutputStreamData& aOther) = delete;
+  OutputStreamData(OutputStreamData&& aOther) = delete;
+  ~OutputStreamData();
+
+  // Creates and adds a MediaStreamTrack to mDOMStream so that we can feed data
+  // to it. For a true aAsyncAddTrack we will dispatch a task to add the
+  // created track to mDOMStream, as is required by spec for the "addtrack"
+  // event.
+  void AddTrack(SourceMediaTrack* aTrack, MediaSegment::Type aType,
+                nsIPrincipal* aPrincipal, bool aAsyncAddTrack);
+  // Ends any MediaStreamTracks sourced from aTrack.
+  void RemoveTrack(SourceMediaTrack* aTrack);
+
+  void SetPrincipal(nsIPrincipal* aPrincipal);
+
+  const RefPtr<OutputStreamManager> mManager;
+  const RefPtr<AbstractThread> mAbstractMainThread;
+  // The DOMMediaStream we add tracks to and represent.
+  const WeakPtr<DOMMediaStream> mDOMStream;
+
+ private:
+  // Tracks that have been added and not yet removed.
+  nsTArray<WeakPtr<dom::MediaStreamTrack>> mTracks;
+};
+
+class OutputStreamManager {
+  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(OutputStreamManager);
+
+ public:
+  OutputStreamManager(SharedDummyTrack* aDummyStream, nsIPrincipal* aPrincipal,
+                      AbstractThread* aAbstractMainThread);
+  // Add the output stream to the collection.
+  void Add(DOMMediaStream* aDOMStream);
+  // Remove the output stream from the collection.
+  void Remove(DOMMediaStream* aDOMStream);
+  // Returns true if there's a live track of the given type.
+  bool HasTrackType(MediaSegment::Type aType);
+  // Returns true if the given tracks are sourcing all currently live tracks.
+  // Use nullptr to make it ignored for that type.
+  bool HasTracks(SourceMediaTrack* aAudioStream,
+                 SourceMediaTrack* aVideoStream);
+  // Gets the underlying track for the given type if it has never been played,
+  // or nullptr if there is none.
+  SourceMediaTrack* GetPrecreatedTrackOfType(MediaSegment::Type aType) const;
+  // Returns the number of live tracks.
+  size_t NumberOfTracks();
+  // Add a track sourced to all output tracks and return the MediaTrack that
+  // sources it.
+  already_AddRefed<SourceMediaTrack> AddTrack(MediaSegment::Type aType);
+  // Remove all currently live tracks.
+  void RemoveTracks();
+  // Remove all currently live tracks and all output streams.
+  void Disconnect();
+  // The principal handle for the underlying decoder.
+  AbstractCanonical<PrincipalHandle>* CanonicalPrincipalHandle();
+  // Called when the underlying decoder's principal has changed.
+  void SetPrincipal(nsIPrincipal* aPrincipal);
+  // Called by DecodedStream when its playing state changes. While not playing
+  // we suspend mSourceTrack.
+  void SetPlaying(bool aPlaying);
+  // Return true if the collection of output streams is empty.
+  bool IsEmpty() const {
+    MOZ_ASSERT(NS_IsMainThread());
+    return mStreams.IsEmpty();
+  }
+
+  const RefPtr<AbstractThread> mAbstractMainThread;
+
+ private:
+  ~OutputStreamManager();
+
+  class LiveTrack {
+   public:
+    LiveTrack(SourceMediaTrack* aSourceTrack, MediaSegment::Type aType);
+    ~LiveTrack();
+
+    const RefPtr<SourceMediaTrack> mSourceTrack;
+    const MediaSegment::Type mType;
+    bool mEverPlayed = false;
+  };
+
+  struct StreamComparator {
+    static bool Equals(const UniquePtr<OutputStreamData>& aData,
+                       DOMMediaStream* aStream) {
+      return aData->mDOMStream == aStream;
+    }
+  };
+  struct TrackStreamComparator {
+    static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
+                       SourceMediaTrack* aTrack) {
+      return aLiveTrack->mSourceTrack == aTrack;
+    }
+  };
+  struct TrackTypeComparator {
+    static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
+                       MediaSegment::Type aType) {
+      return aLiveTrack->mType == aType;
+    }
+  };
+  struct PrecreatedTrackTypeComparator {
+    static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
+                       MediaSegment::Type aType) {
+      return !aLiveTrack->mEverPlayed && aLiveTrack->mType == aType;
+    }
+  };
+  struct TrackComparator {
+    static bool Equals(
+        const UniquePtr<LiveTrack>& aLiveTrack,
+        const Pair<SourceMediaTrack*, MediaSegment::Type>& aOther) {
+      return aLiveTrack->mSourceTrack == aOther.first() &&
+             aLiveTrack->mType == aOther.second();
+    }
+  };
+
+  // Goes through mStreams and removes any entries that have been destroyed.
+  void AutoRemoveDestroyedStreams();
+
+  // Remove tracks sourced from aTrack from all output tracks.
+  void RemoveTrack(SourceMediaTrack* aTrack);
+
+  const RefPtr<SharedDummyTrack> mDummyStream;
+  nsTArray<UniquePtr<OutputStreamData>> mStreams;
+  nsTArray<UniquePtr<LiveTrack>> mLiveTracks;
+  Canonical<PrincipalHandle> mPrincipalHandle;
+  bool mPlaying = false;
+};
+
+}  // namespace mozilla
+
+#endif  // OutputStreamManager_h
--- a/dom/media/mediasink/VideoSink.cpp
+++ b/dom/media/mediasink/VideoSink.cpp
@@ -151,16 +151,28 @@ VideoSink::VideoSink(AbstractThread* aTh
 }
 
 VideoSink::~VideoSink() {
 #ifdef XP_WIN
   MOZ_ASSERT(!mHiResTimersRequested);
 #endif
 }
 
+const MediaSink::PlaybackParams& VideoSink::GetPlaybackParams() const {
+  AssertOwnerThread();
+
+  return mAudioSink->GetPlaybackParams();
+}
+
+void VideoSink::SetPlaybackParams(const PlaybackParams& aParams) {
+  AssertOwnerThread();
+
+  mAudioSink->SetPlaybackParams(aParams);
+}
+
 RefPtr<VideoSink::EndedPromise> VideoSink::OnEnded(TrackType aType) {
   AssertOwnerThread();
   MOZ_ASSERT(mAudioSink->IsStarted(), "Must be called after playback starts.");
 
   if (aType == TrackInfo::kAudioTrack) {
     return mAudioSink->OnEnded(aType);
   } else if (aType == TrackInfo::kVideoTrack) {
     return mEndPromise;
@@ -206,22 +218,16 @@ void VideoSink::SetVolume(double aVolume
 }
 
 void VideoSink::SetPreservesPitch(bool aPreservesPitch) {
   AssertOwnerThread();
 
   mAudioSink->SetPreservesPitch(aPreservesPitch);
 }
 
-double VideoSink::PlaybackRate() const {
-  AssertOwnerThread();
-
-  return mAudioSink->PlaybackRate();
-}
-
 void VideoSink::EnsureHighResTimersOnOnlyIfPlaying() {
 #ifdef XP_WIN
   const bool needed = IsPlaying();
   if (needed == mHiResTimersRequested) {
     return;
   }
   if (needed) {
     // Ensure high precision timers are enabled on Windows, otherwise the
@@ -429,18 +435,18 @@ void VideoSink::TryUpdateRenderedVideoFr
     // Time to render this frame.
     UpdateRenderedVideoFrames();
     return;
   }
 
   // If we send this future frame to the compositor now, it will be rendered
   // immediately and break A/V sync. Instead, we schedule a timer to send it
   // later.
-  int64_t delta =
-      (v->mTime - clockTime).ToMicroseconds() / mAudioSink->PlaybackRate();
+  int64_t delta = (v->mTime - clockTime).ToMicroseconds() /
+                  mAudioSink->GetPlaybackParams().mPlaybackRate;
   TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta);
   RefPtr<VideoSink> self = this;
   mUpdateScheduler.Ensure(
       target, [self]() { self->UpdateRenderedVideoFramesByTimer(); },
       [self]() { self->UpdateRenderedVideoFramesByTimer(); });
 }
 
 void VideoSink::UpdateRenderedVideoFramesByTimer() {
@@ -470,17 +476,17 @@ void VideoSink::RenderVideoFrames(int32_
   AutoTArray<RefPtr<VideoData>, 16> frames;
   VideoQueue().GetFirstElements(aMaxFrames, &frames);
   if (frames.IsEmpty() || !mContainer) {
     return;
   }
 
   AutoTArray<ImageContainer::NonOwningImage, 16> images;
   TimeStamp lastFrameTime;
-  double playbackRate = mAudioSink->PlaybackRate();
+  MediaSink::PlaybackParams params = mAudioSink->GetPlaybackParams();
   for (uint32_t i = 0; i < frames.Length(); ++i) {
     VideoData* frame = frames[i];
     bool wasSent = frame->IsSentToCompositor();
     frame->MarkSentToCompositor();
 
     if (!frame->mImage || !frame->mImage->IsValid() ||
         !frame->mImage->GetSize().width || !frame->mImage->GetSize().height) {
       continue;
@@ -488,18 +494,18 @@ void VideoSink::RenderVideoFrames(int32_
 
     if (frame->mTime.IsNegative()) {
       // Frame times before the start time are invalid; drop such frames
       continue;
     }
 
     MOZ_ASSERT(!aClockTimeStamp.IsNull());
     int64_t delta = frame->mTime.ToMicroseconds() - aClockTime;
-    TimeStamp t =
-        aClockTimeStamp + TimeDuration::FromMicroseconds(delta / playbackRate);
+    TimeStamp t = aClockTimeStamp +
+                  TimeDuration::FromMicroseconds(delta / params.mPlaybackRate);
     if (!lastFrameTime.IsNull() && t <= lastFrameTime) {
       // Timestamps out of order; drop the new frame. In theory we should
       // probably replace the previous frame with the new frame if the
       // timestamps are equal, but this is a corrupt video file already so
       // never mind.
       continue;
     }
     MOZ_ASSERT(!t.IsNull());
@@ -602,18 +608,19 @@ void VideoSink::UpdateRenderedVideoFrame
   VideoQueue().GetFirstElements(2, &frames);
   if (frames.Length() < 2) {
     return;
   }
 
   int64_t nextFrameTime = frames[1]->mTime.ToMicroseconds();
   int64_t delta = std::max(nextFrameTime - clockTime.ToMicroseconds(),
                            MIN_UPDATE_INTERVAL_US);
-  TimeStamp target = nowTime + TimeDuration::FromMicroseconds(
-                                   delta / mAudioSink->PlaybackRate());
+  TimeStamp target =
+      nowTime + TimeDuration::FromMicroseconds(
+                    delta / mAudioSink->GetPlaybackParams().mPlaybackRate);
 
   RefPtr<VideoSink> self = this;
   mUpdateScheduler.Ensure(
       target, [self]() { self->UpdateRenderedVideoFramesByTimer(); },
       [self]() { self->UpdateRenderedVideoFramesByTimer(); });
 }
 
 void VideoSink::MaybeResolveEndPromise() {
@@ -635,17 +642,17 @@ void VideoSink::MaybeResolveEndPromise()
     TimeStamp nowTime;
     const auto clockTime = mAudioSink->GetPosition(&nowTime);
     if (clockTime < mVideoFrameEndTime) {
       VSINK_LOG_V(
           "Not reach video end time yet, reschedule timer to resolve "
           "end promise. clockTime=%" PRId64 ", endTime=%" PRId64,
           clockTime.ToMicroseconds(), mVideoFrameEndTime.ToMicroseconds());
       int64_t delta = (mVideoFrameEndTime - clockTime).ToMicroseconds() /
-                      mAudioSink->PlaybackRate();
+                      mAudioSink->GetPlaybackParams().mPlaybackRate;
       TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta);
       auto resolveEndPromise = [self = RefPtr<VideoSink>(this)]() {
         self->mEndPromiseHolder.ResolveIfExists(true, __func__);
         self->mUpdateScheduler.CompleteRequest();
       };
       mUpdateScheduler.Ensure(target, std::move(resolveEndPromise),
                               std::move(resolveEndPromise));
     } else {
--- a/dom/media/mediasink/VideoSink.h
+++ b/dom/media/mediasink/VideoSink.h
@@ -27,34 +27,36 @@ class MediaQueue;
 class VideoSink : public MediaSink {
   typedef mozilla::layers::ImageContainer::ProducerID ProducerID;
 
  public:
   VideoSink(AbstractThread* aThread, MediaSink* aAudioSink,
             MediaQueue<VideoData>& aVideoQueue, VideoFrameContainer* aContainer,
             FrameStatistics& aFrameStats, uint32_t aVQueueSentToCompositerSize);
 
+  const PlaybackParams& GetPlaybackParams() const override;
+
+  void SetPlaybackParams(const PlaybackParams& aParams) override;
+
   RefPtr<EndedPromise> OnEnded(TrackType aType) override;
 
   TimeUnit GetEndTime(TrackType aType) const override;
 
   TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
 
   bool HasUnplayedFrames(TrackType aType) const override;
 
   void SetPlaybackRate(double aPlaybackRate) override;
 
   void SetVolume(double aVolume) override;
 
   void SetPreservesPitch(bool aPreservesPitch) override;
 
   void SetPlaying(bool aPlaying) override;
 
-  double PlaybackRate() const override;
-
   void Redraw(const VideoInfo& aInfo) override;
 
   nsresult Start(const TimeUnit& aStartTime, const MediaInfo& aInfo) override;
 
   void Stop() override;
 
   bool IsStarted() const override;
 
--- a/dom/media/mediasink/moz.build
+++ b/dom/media/mediasink/moz.build
@@ -3,16 +3,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 UNIFIED_SOURCES += [
     'AudioSink.cpp',
     'AudioSinkWrapper.cpp',
     'DecodedStream.cpp',
+    'OutputStreamManager.cpp',
     'VideoSink.cpp',
 ]
 
 EXPORTS += [
     'MediaSink.h'
 ]
 
 include('/ipc/chromium/chromium-config.mozbuild')
--- a/dom/media/mediasource/SourceBufferResource.cpp
+++ b/dom/media/mediasource/SourceBufferResource.cpp
@@ -19,21 +19,21 @@ mozilla::LogModule* GetSourceBufferResou
   DDMOZ_LOG(GetSourceBufferResourceLog(), mozilla::LogLevel::Debug, \
             "::%s: " arg, __func__, ##__VA_ARGS__)
 #define SBR_DEBUGV(arg, ...)                                          \
   DDMOZ_LOG(GetSourceBufferResourceLog(), mozilla::LogLevel::Verbose, \
             "::%s: " arg, __func__, ##__VA_ARGS__)
 
 namespace mozilla {
 
-RefPtr<GenericPromise> SourceBufferResource::Close() {
+nsresult SourceBufferResource::Close() {
   MOZ_ASSERT(OnThread());
   SBR_DEBUG("Close");
   mClosed = true;
-  return GenericPromise::CreateAndResolve(true, __func__);
+  return NS_OK;
 }
 
 nsresult SourceBufferResource::ReadAt(int64_t aOffset, char* aBuffer,
                                       uint32_t aCount, uint32_t* aBytes) {
   SBR_DEBUG("ReadAt(aOffset=%" PRId64 ", aBuffer=%p, aCount=%u, aBytes=%p)",
             aOffset, aBytes, aCount, aBytes);
   return ReadAtInternal(aOffset, aBuffer, aCount, aBytes);
 }
--- a/dom/media/mediasource/SourceBufferResource.h
+++ b/dom/media/mediasource/SourceBufferResource.h
@@ -31,17 +31,17 @@ class SourceBuffer;
 DDLoggedTypeDeclNameAndBase(SourceBufferResource, MediaResource);
 
 // SourceBufferResource is not thread safe.
 class SourceBufferResource final
     : public MediaResource,
       public DecoderDoctorLifeLogger<SourceBufferResource> {
  public:
   SourceBufferResource();
-  RefPtr<GenericPromise> Close() override;
+  nsresult Close() override;
   nsresult ReadAt(int64_t aOffset, char* aBuffer, uint32_t aCount,
                   uint32_t* aBytes) override;
   // Memory-based and no locks, caching discouraged.
   bool ShouldCacheReads() override { return false; }
   void Pin() override { UNIMPLEMENTED(); }
   void Unpin() override { UNIMPLEMENTED(); }
   int64_t GetLength() override { return mInputBuffer.GetLength(); }
   int64_t GetNextCachedData(int64_t aOffset) override {
--- a/dom/media/test/test_mediatrack_consuming_mediaresource.html
+++ b/dom/media/test/test_mediatrack_consuming_mediaresource.html
@@ -5,29 +5,29 @@
   <script src="/tests/SimpleTest/SimpleTest.js"></script>
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
   <script type="text/javascript" src="manifest.js"></script>
 </head>
 <body>
 <pre id="test">
 <script class="testbody" type="text/javascript">
 
-const manager = new MediaTestManager;
+var manager = new MediaTestManager;
 
 function startTest(test, token) {
-  const elemType = getMajorMimeType(test.type);
-  const element = document.createElement(elemType);
+  var elemType = getMajorMimeType(test.type);
+  var element = document.createElement(elemType);
 
-  let audioOnchange = 0;
-  let audioOnaddtrack = 0;
-  let audioOnremovetrack = 0;
-  let videoOnchange = 0;
-  let videoOnaddtrack = 0;
-  let videoOnremovetrack = 0;
-  let isPlaying = false;
+  var audioOnchange = 0;
+  var audioOnaddtrack = 0;
+  var audioOnremovetrack = 0;
+  var videoOnchange = 0;
+  var videoOnaddtrack = 0;
+  var videoOnremovetrack = 0;
+  var isPlaying = false;
 
   isnot(element.audioTracks, undefined,
         'HTMLMediaElement::AudioTracks() property should be available.');
   isnot(element.videoTracks, undefined,
         'HTMLMediaElement::VideoTracks() property should be available.');
 
   element.audioTracks.onaddtrack = function(e) {
     audioOnaddtrack++;
@@ -48,53 +48,36 @@ function startTest(test, token) {
   element.videoTracks.onremovetrack = function(e) {
     videoOnremovetrack++;
   }
 
   element.videoTracks.onchange = function(e) {
     videoOnchange++;
   }
 
-  function checkTrackNotRemoved() {
-    is(audioOnremovetrack, 0, 'Should have no calls of onremovetrack on audioTracks.');
-    is(videoOnremovetrack, 0, 'Should have no calls of onremovetrack on videoTracks.');
+  function checkTrackRemoved() {
     if (isPlaying) {
-      is(element.audioTracks.length, test.hasAudio ? 1 : 0,
-        'Expected length of audioTracks.');
-      is(element.videoTracks.length, test.hasVideo ? 1 : 0,
-        'Expected length of videoTracks.');
-    }
-  }
-
-  function checkTrackRemoved() {
-    is(element.audioTracks.length, 0, 'The length of audioTracks should be 0.');
-    is(element.videoTracks.length, 0, 'The length of videoTracks should be 0.');
-    if (isPlaying) {
-      is(audioOnremovetrack, test.hasAudio ? 1 : 0,
-        'Expected calls of onremovetrack on audioTracks.');
-      is(videoOnremovetrack, test.hasVideo ? 1 : 0,
-        'Expected calls of onremovetrack on videoTracks.');
+      if (test.hasAudio) {
+        is(audioOnremovetrack, 1, 'Calls of onremovetrack on audioTracks should be 1.');
+        is(element.audioTracks.length, 0, 'The length of audioTracks should be 0.');
+      }
+      if (test.hasVideo) {
+        is(videoOnremovetrack, 1, 'Calls of onremovetrack on videoTracks should be 1.');
+        is(element.videoTracks.length, 0, 'The length of videoTracks should be 0.');
+      }
     }
   }
 
   function onended() {
     ok(true, 'Event ended is expected to be fired on element.');
-    checkTrackNotRemoved();
+    checkTrackRemoved();
     element.onended = null;
     element.onplaying = null;
     element.onpause = null;
-    element.src = "";
-    is(element.audioTracks.length, 0, 'audioTracks have been forgotten');
-    is(element.videoTracks.length, 0, 'videoTracks have been forgotten');
-    is(audioOnremovetrack, 0, 'No audio removetrack events yet');
-    is(videoOnremovetrack, 0, 'No video removetrack events yet');
-    setTimeout(() => {
-      checkTrackRemoved();
-      manager.finished(element.token);
-    }, 100);
+    manager.finished(element.token);
   }
 
   function checkTrackAdded() {
     isPlaying = true;
     if (test.hasAudio) {
       is(audioOnaddtrack, 1, 'Calls of onaddtrack on audioTracks should be 1.');
       is(element.audioTracks.length, 1, 'The length of audioTracks should be 1.');
       ok(element.audioTracks[0].enabled, 'Audio track should be enabled as default.');
--- a/dom/media/test/test_mediatrack_replay_from_end.html
+++ b/dom/media/test/test_mediatrack_replay_from_end.html
@@ -5,39 +5,38 @@
   <script src="/tests/SimpleTest/SimpleTest.js"></script>
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
   <script type="text/javascript" src="manifest.js"></script>
 </head>
 <body>
 <pre id="test">
 <script class="testbody" type="text/javascript">
 
-const manager = new MediaTestManager;
+var manager = new MediaTestManager;
 
 function startTest(test, token) {
   // Scenario to test:
   // 1. Audio tracks and video tracks should be added to the track list when
-  //    metadata has loaded, and all tracks should remain even after we seek to
-  //    the end.
-  // 2. No tracks should be added back to the list if we replay from the end,
-  //    and no tracks should be removed from the list after we seek to the end.
-  // 3. After seek to the middle from end of playback, all tracks should remain
-  //    in the list if we play from here, and no tracks should be removed from
-  //    the list after we seek to the end.
-  // 4. Unsetting the media element's src attribute should remove all tracks.
+  //    playing, and all tracks should be removed from the list after we seek
+  //    to the end.
+  // 2. All tracks should be added back to the list if we replay from the end,
+  //    and all tracks should be removed from the list after we seek to the end.
+  // 3. After seek to the middle from end of playback, all tracks should be
+  //    added back to the list if we play from here, and all tracks should be
+  //    removed from the list after we seek to the end.
 
-  const elemType = getMajorMimeType(test.type);
-  const element = document.createElement(elemType);
+  var elemType = getMajorMimeType(test.type);
+  var element = document.createElement(elemType);
 
-  let audioOnaddtrack = 0;
-  let audioOnremovetrack = 0;
-  let videoOnaddtrack = 0;
-  let videoOnremovetrack = 0;
-  let isPlaying = false;
-  let steps = 0;
+  var audioOnaddtrack = 0;
+  var audioOnremovetrack = 0;
+  var videoOnaddtrack = 0;
+  var videoOnremovetrack = 0;
+  var isPlaying = false;
+  var steps = 0;
 
   element.audioTracks.onaddtrack = function(e) {
     audioOnaddtrack++;
   }
 
   element.audioTracks.onremovetrack = function(e) {
     audioOnremovetrack++;
   }
@@ -45,33 +44,26 @@ function startTest(test, token) {
   element.videoTracks.onaddtrack = function(e) {
     videoOnaddtrack++;
   }
 
   element.videoTracks.onremovetrack = function(e) {
     videoOnremovetrack++;
   }
 
-  function testExpectedAddtrack(expectedCalls) {
+  function testTrackEventCalls(expectedCalls) {
     if (test.hasAudio) {
       is(audioOnaddtrack, expectedCalls,
          'Calls of onaddtrack on audioTracks should be '+expectedCalls+' times.');
+      is(audioOnremovetrack, expectedCalls,
+         'Calls of onremovetrack on audioTracks should be '+expectedCalls+' times.');
     }
     if (test.hasVideo) {
       is(videoOnaddtrack, expectedCalls,
          'Calls of onaddtrack on videoTracks should be '+expectedCalls+' times.');
-    }
-  }
-
-  function testExpectedRemovetrack(expectedCalls) {
-    if (test.hasAudio) {
-      is(audioOnremovetrack, expectedCalls,
-         'Calls of onremovetrack on audioTracks should be '+expectedCalls+' times.');
-    }
-    if (test.hasVideo) {
       is(videoOnremovetrack, expectedCalls,
          'Calls of onremovetrack on videoTracks should be '+expectedCalls+' times.');
     }
   }
 
   function finishTesting() {
     element.onpause = null;
     element.onseeked = null;
@@ -79,39 +71,31 @@ function startTest(test, token) {
     element.onended = null;
     manager.finished(element.token);
   }
 
   function onended() {
     if (isPlaying) {
       switch(steps) {
         case 1:
-          testExpectedAddtrack(1);
-          testExpectedRemovetrack(0);
+          testTrackEventCalls(1);
           element.onplaying = onplaying;
           element.play();
           steps++;
           break;
         case 2:
-          testExpectedAddtrack(1);
-          testExpectedRemovetrack(0);
+          testTrackEventCalls(2);
           element.currentTime = element.duration * 0.5;
           element.onplaying = onplaying;
           element.play();
           steps++;
           break;
         case 3:
-          testExpectedAddtrack(1);
-          testExpectedRemovetrack(0);
-          element.src = "";
-          setTimeout(() => {
-            testExpectedAddtrack(1);
-            testExpectedRemovetrack(1);
-            finishTesting();
-          }, 0);
+          testTrackEventCalls(3);
+          finishTesting();
           break;
       }
     } else {
       ok(true, 'Finish the test anyway if ended is fired before other events.');
       finishTesting();
     }
   }
 
--- a/dom/media/test/test_streams_element_capture_reset.html
+++ b/dom/media/test/test_streams_element_capture_reset.html
@@ -1,137 +1,134 @@
 <!DOCTYPE HTML>
 <html>
 <head>
-  <title>Test that reloading and seeking in a media element that's being captured behaves as expected</title>
+  <title>Test that reloading and seeking in a media element that's being captured doesn't crash</title>
   <script src="/tests/SimpleTest/SimpleTest.js"></script>
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-  <script src="manifest.js"></script>
+  <script type="text/javascript" src="manifest.js"></script>
 </head>
 <body>
 <video id="v"></video>
 <video id="vout"></video>
 <video id="vout_untilended"></video>
 <pre id="test">
-<script>
-const v = document.getElementById('v');
-const vout = document.getElementById('vout');
-const vout_untilended = document.getElementById('vout_untilended');
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+var v = document.getElementById('v');
+var vout = document.getElementById('vout');
+var vout_untilended = document.getElementById('vout_untilended');
 
 function dumpEvent(event) {
-  const video = event.target;
-  info(
-    `${video.name}:${video.id} GOT EVENT ${event.type} ` +
-    `currentTime=${video.currentTime} paused=${video.paused} ` +
-    `ended=${video.ended} readyState=${video.readyState}`
-  );
+  var video = event.target;
+  info(video.name + " GOT EVENT " + event.type +
+       " currentTime=" + video.currentTime +
+       " paused=" + video.paused +
+       " ended=" + video.ended +
+       " readyState=" + video.readyState);
 }
 
-function unexpected(event) {
-  ok(false, `${event.type} event received on ${event.target.id} unexpectedly`);
-};
-
-const events = ["timeupdate", "seeking", "seeked", "ended", "playing", "pause"];
-for (const e of events) {
-  v.addEventListener(e, dumpEvent);
-  vout.addEventListener(e, dumpEvent);
-  vout_untilended.addEventListener(e, dumpEvent);
+var events = ["timeupdate", "seeking", "seeked", "ended", "playing", "pause"];
+for (var i = 0; i < events.length; ++i) {
+  v.addEventListener(events[i], dumpEvent);
 }
 
 function isWithinEps(a, b, msg) {
   ok(Math.abs(a - b) < 0.01,
      "Got " + a + ", expected " + b + "; " + msg);
 }
 
 function isGreaterThanOrEqualEps(a, b, msg) {
   ok(a >= b - 0.01,
      "Got " + a + ", expected at least " + b + "; " + msg);
 }
 
-async function startTest(test) {
-  const seekTime = test.duration/2;
+function startTest(test) {
+  var seekTime = test.duration/2;
+
+  function endedAfterReplay() {
+    isGreaterThanOrEqualEps(v.currentTime, test.duration, "checking v.currentTime at third 'ended' event");
+    isGreaterThanOrEqualEps(vout.currentTime, (test.duration - seekTime) + test.duration*2,
+	            "checking vout.currentTime after seeking, playing through and reloading");
+    SimpleTest.finish();
+  };
+
+  function endedAfterSeek() {
+    isGreaterThanOrEqualEps(v.currentTime, test.duration, "checking v.currentTime at second 'ended' event");
+    isGreaterThanOrEqualEps(vout.currentTime, (test.duration - seekTime) + test.duration,
+                "checking vout.currentTime after seeking and playing through again");
+    v.removeEventListener("ended", endedAfterSeek);
+    v.addEventListener("ended", endedAfterReplay);
+    v.src = test.name + "?1";
+    v.play();
+  };
+
+  function seeked() {
+    isGreaterThanOrEqualEps(v.currentTime, seekTime, "Finished seeking");
+    isGreaterThanOrEqualEps(vout.currentTime, test.duration,
+                "checking vout.currentTime has not changed after seeking");
+    v.removeEventListener("seeked", seeked);
+    function dontPlayAgain() {
+      ok(false, "vout_untilended should not play again");
+    }
+    vout_untilended.addEventListener("playing", dontPlayAgain);
+    vout_untilended.addEventListener("ended", dontPlayAgain);
+    v.addEventListener("ended", endedAfterSeek);
+    v.play();
+  };
+
+  function ended() {
+    // Don't compare current time until both v and vout_untilended are ended,
+    // otherwise, current time could be smaller than the duration.
+    if (!v.ended || !vout_untilended.ended) {
+      return;
+    }
+
+    isGreaterThanOrEqualEps(vout.currentTime, test.duration, "checking vout.currentTime at first 'ended' event");
+    isGreaterThanOrEqualEps(v.currentTime, test.duration, "checking v.currentTime at first 'ended' event");
+    is(vout.ended, false, "checking vout has not ended");
+    is(vout_untilended.ended, true, "checking vout_untilended has actually ended");
+
+    v.removeEventListener("ended", ended);
+    vout_untilended.removeEventListener("ended", ended);
+
+    v.pause();
+    v.currentTime = seekTime;
+    v.addEventListener("seeked", seeked);
+  };
+
+  v.addEventListener("ended", ended);
+  vout_untilended.addEventListener("ended", ended);
+
+  function checkNoEnded() {
+    ok(false, "ended event received unexpectedly");
+  };
+
+  vout.addEventListener("ended", checkNoEnded);
 
   v.src = test.name;
   v.name = test.name;
-  vout.name = test.name;
-  vout_untilended.name = test.name;
   v.preload = "metadata";
-  await new Promise(r => v.onloadedmetadata = r);
-
-  vout.srcObject = v.mozCaptureStream();
-  vout.play();
-
-  vout_untilended.srcObject = v.mozCaptureStreamUntilEnded();
-  vout_untilended.play();
-
-  v.play();
 
-  await new Promise(r => v.onended = r);
-  isGreaterThanOrEqualEps(v.currentTime, test.duration,
-    "checking v.currentTime at first 'ended' event");
-
-  await Promise.all([
-    new Promise(r => vout.onended = r),
-    new Promise(r => vout_untilended.onended = r),
-  ]);
-
-  isGreaterThanOrEqualEps(vout.currentTime, test.duration,
-    "checking vout.currentTime at first 'ended' event");
-  ok(vout.ended, "checking vout has actually ended");
-  ok(vout_untilended.ended, "checking vout_untilended has actually ended");
-
-  vout_untilended.srcObject.onaddtrack = unexpected;
-  vout_untilended.onplaying = unexpected;
-  vout_untilended.onended = unexpected;
+  function loadedmetadata() {
+    vout.srcObject = v.mozCaptureStream();
+    vout.play();
 
-  const voutPreSeekCurrentTime = vout.currentTime;
-  v.currentTime = seekTime;
-  await new Promise(r => v.onseeked = r);
-
-  is(v.currentTime, seekTime, "Finished seeking");
-  is(vout.currentTime, voutPreSeekCurrentTime,
-    "checking vout.currentTime has not changed after seeking");
-
-  v.play();
-  vout.play();
-
-  await new Promise(r => v.onended = r);
-  isGreaterThanOrEqualEps(v.currentTime, test.duration,
-    "checking v.currentTime at second 'ended' event");
+    vout_untilended.srcObject = v.mozCaptureStreamUntilEnded();
+    vout_untilended.play();
 
-  await new Promise(r => vout.onended = r);
-  isGreaterThanOrEqualEps(vout.currentTime,
-    (test.duration - seekTime) + test.duration,
-    "checking vout.currentTime after seeking and playing through again");
-
-  v.src = test.name + "?1";
-  v.play();
-  vout.play();
+    v.play();
+  };
 
-  await new Promise(r => v.onended = r);
-  isGreaterThanOrEqualEps(v.currentTime, test.duration,
-    "checking v.currentTime at third 'ended' event");
-
-  await new Promise(r => vout.onended = r);
-  isGreaterThanOrEqualEps(vout.currentTime,
-    (test.duration - seekTime) + test.duration*2,
-    "checking vout.currentTime after seeking, playing through and reloading");
+  v.addEventListener("loadedmetadata", loadedmetadata, {once: true});
 }
 
-(async () => {
-  SimpleTest.waitForExplicitFinish();
-  try {
-    const testVideo = getPlayableVideo(gSmallTests);
-    if (testVideo) {
-      await startTest(testVideo);
-    } else {
-      todo(false, "No playable video");
-    }
-  } catch(e) {
-    ok(false, `Error: ${e}`);
-  } finally {
-    SimpleTest.finish();
-  }
-})();
+var testVideo = getPlayableVideo(gSmallTests);
+if (testVideo) {
+  startTest(testVideo);
+} else {
+  todo(false, "No playable video");
+}
 </script>
 </pre>
 </body>
 </html>
--- a/dom/media/webaudio/MediaElementAudioSourceNode.cpp
+++ b/dom/media/webaudio/MediaElementAudioSourceNode.cpp
@@ -4,17 +4,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaElementAudioSourceNode.h"
 #include "mozilla/dom/MediaElementAudioSourceNodeBinding.h"
 #include "AudioDestinationNode.h"
 #include "nsIScriptError.h"
 #include "AudioNodeTrack.h"
-#include "MediaStreamTrack.h"
 
 namespace mozilla {
 namespace dom {
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(MediaElementAudioSourceNode)
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(MediaElementAudioSourceNode)
   tmp->Destroy();
--- a/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
+++ b/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
@@ -71,18 +71,18 @@ void MediaStreamAudioSourceNode::Init(DO
   }
 
   mInputStream = aMediaStream;
   AudioNodeEngine* engine = new MediaStreamAudioSourceNodeEngine(this);
   mTrack = AudioNodeExternalInputTrack::Create(Context()->Graph(), engine);
   mInputStream->AddConsumerToKeepAlive(ToSupports(this));
 
   mInputStream->RegisterTrackListener(this);
-  if (mInputStream->Audible()) {
-    NotifyAudible();
+  if (mInputStream->Active()) {
+    NotifyActive();
   }
   AttachToRightTrack(mInputStream, aRv);
 }
 
 void MediaStreamAudioSourceNode::Destroy() {
   if (mInputStream) {
     mInputStream->UnregisterTrackListener(this);
     mInputStream = nullptr;
@@ -114,17 +114,16 @@ void MediaStreamAudioSourceNode::AttachT
   }
 
   mInputTrack = aTrack;
   ProcessedMediaTrack* outputTrack =
       static_cast<ProcessedMediaTrack*>(mTrack.get());
   mInputPort = mInputTrack->ForwardTrackContentsTo(outputTrack);
   PrincipalChanged(mInputTrack);  // trigger enabling/disabling of the connector
   mInputTrack->AddPrincipalChangeObserver(this);
-  MarkActive();
 }
 
 void MediaStreamAudioSourceNode::DetachFromTrack() {
   if (mInputTrack) {
     mInputTrack->RemovePrincipalChangeObserver(this);
     mInputTrack = nullptr;
   }
   if (mInputPort) {
@@ -161,16 +160,17 @@ void MediaStreamAudioSourceNode::AttachT
     if (mBehavior == FollowChanges) {
       if (track->Ended()) {
         continue;
       }
     }
 
     if (!track->Ended()) {
       AttachToTrack(track, aRv);
+      MarkActive();
     }
     return;
   }
 
   // There was no track available. We'll allow the node to be garbage collected.
   MarkInactive();
 }
 
@@ -197,17 +197,17 @@ void MediaStreamAudioSourceNode::NotifyT
       return;
     }
 
     DetachFromTrack();
     AttachToRightTrack(mInputStream, IgnoreErrors());
   }
 }
 
-void MediaStreamAudioSourceNode::NotifyAudible() {
+void MediaStreamAudioSourceNode::NotifyActive() {
   MOZ_ASSERT(mInputStream);
   Context()->StartBlockedAudioContextIfAllowed();
 }
 
 /**
  * Changes the principal. Note that this will be called on the main thread, but
  * changes will be enacted on the MediaTrackGraph thread. If the principal
  * change results in the document principal losing access to the stream, then
--- a/dom/media/webaudio/MediaStreamAudioSourceNode.h
+++ b/dom/media/webaudio/MediaStreamAudioSourceNode.h
@@ -86,17 +86,17 @@ class MediaStreamAudioSourceNode
   // Attaches to the first audio track in the MediaStream, when the tracks are
   // ordered by id.
   void AttachToRightTrack(const RefPtr<DOMMediaStream>& aMediaStream,
                           ErrorResult& aRv);
 
   // From DOMMediaStream::TrackListener.
   void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) override;
   void NotifyTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack) override;
-  void NotifyAudible() override;
+  void NotifyActive() override;
 
   // From PrincipalChangeObserver<MediaStreamTrack>.
   void PrincipalChanged(MediaStreamTrack* aMediaStreamTrack) override;
 
   // This allows implementing the correct behaviour for both
   // MediaElementAudioSourceNode and MediaStreamAudioSourceNode, that have most
   // of their behaviour shared.
   enum TrackChangeBehavior {
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/mediaElementAudioSourceToScriptProcessorTest.html.ini
@@ -0,0 +1,8 @@
+[mediaElementAudioSourceToScriptProcessorTest.html]
+  disabled:
+    if (os == "mac") and (version == "OS X 10.14"): new platform
+    if (os == "android") and debug: https://bugzilla.mozilla.org/show_bug.cgi?id=1546756
+  [All data processed correctly]
+    expected:
+      if processor == "aarch64": ["PASS", "FAIL"]
+