Bug 1172394 - Refactor how DecodedStream is set up. r=padenot
☠☠ backed out by 7272d77d4e80 ☠ ☠
authorAndreas Pehrson <apehrson@mozilla.com>
Wed, 13 Nov 2019 08:55:39 +0000
changeset 501787 c57c41e8c39ea51e96c13af8ecdcbe1640e2a9d6
parent 501786 a796541fe5ef045738f1c10d95830b71fc5c024d
child 501788 1c45b135318d8cb519b8d58af72ab864fe6bfd62
push id114172
push userdluca@mozilla.com
push dateTue, 19 Nov 2019 11:31:10 +0000
treeherdermozilla-inbound@b5c5ba07d3db [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1172394
milestone72.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1172394 - Refactor how DecodedStream is set up. r=padenot This patch removes the responsibility of js-facing MediaStreamTracks from the MediaDecoder stack, and moves the machinery for setting up DecodedStream to higher order functions like state mirroring and watchables. OutputStreamManager is completely gone, since it was designed to manage MediaStreamTracks across multiple output streams for a single decoder, on main thread. HTMLMediaElement took over its task in the previous patch. The MediaDecoderStateMachine now has three control points for capturing: - mOutputCaptured, which, if true, will capture all decoded data into mOutputTracks. If this is set, but mOutputTracks is empty, we are still waiting for tracks, and DecodedStream will not play any data. When tracks are set, a new DecodedStream is created that will play data through SourceMediaTracks piped into mOutputTracks. - mOutputTracks, which is the set of tracks data is captured into, for forwarding to all the output tracks the media element is managing. This set of tracks is managed by the MediaDecoder owner, and must contain one audio track if the decoder is decoding audio, and one video track if the decoder is decoding video. It may be empty since output can be captured before metadata is loaded, or playback has ended. - mOutputPrincipal, which is the principal of the decoded data. All data sent into SourceMediaTracks is tagged with this principal. Differential Revision: https://phabricator.services.mozilla.com/D52042
dom/media/MediaDecoder.cpp
dom/media/MediaDecoder.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
dom/media/mediasink/DecodedStream.cpp
dom/media/mediasink/DecodedStream.h
dom/media/mediasink/OutputStreamManager.cpp
dom/media/mediasink/OutputStreamManager.h
dom/media/mediasink/moz.build
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -230,40 +230,45 @@ RefPtr<GenericPromise> MediaDecoder::Set
   AbstractThread::AutoEnter context(AbstractMainThread());
   return GetStateMachine()->InvokeSetSink(aSink);
 }
 
 void MediaDecoder::SetOutputCaptured(bool aCaptured) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
   AbstractThread::AutoEnter context(AbstractMainThread());
-  MOZ_CRASH("Not implemented");
+  mOutputCaptured = aCaptured;
 }
 
 void MediaDecoder::AddOutputTrack(RefPtr<ProcessedMediaTrack> aTrack) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
   AbstractThread::AutoEnter context(AbstractMainThread());
-  MOZ_CRASH("Not implemented");
+  nsTArray<RefPtr<ProcessedMediaTrack>> tracks = mOutputTracks;
+  tracks.AppendElement(std::move(aTrack));
+  mOutputTracks = tracks;
 }
 
 void MediaDecoder::RemoveOutputTrack(
     const RefPtr<ProcessedMediaTrack>& aTrack) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
   AbstractThread::AutoEnter context(AbstractMainThread());
-  MOZ_CRASH("Not implemented");
+  nsTArray<RefPtr<ProcessedMediaTrack>> tracks = mOutputTracks;
+  if (tracks.RemoveElement(aTrack)) {
+    mOutputTracks = tracks;
+  }
 }
 
 void MediaDecoder::SetOutputTracksPrincipal(
     const RefPtr<nsIPrincipal>& aPrincipal) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
   AbstractThread::AutoEnter context(AbstractMainThread());
-  MOZ_CRASH("Not implemented");
+  mOutputPrincipal = MakePrincipalHandle(aPrincipal);
 }
 
 double MediaDecoder::GetDuration() {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
   return mDuration;
 }
 
@@ -299,16 +304,19 @@ MediaDecoder::MediaDecoder(MediaDecoderI
       mLogicallySeeking(false, "MediaDecoder::mLogicallySeeking"),
       INIT_MIRROR(mBuffered, TimeIntervals()),
       INIT_MIRROR(mCurrentPosition, TimeUnit::Zero()),
       INIT_MIRROR(mStateMachineDuration, NullableTimeUnit()),
       INIT_MIRROR(mIsAudioDataAudible, false),
       INIT_CANONICAL(mVolume, aInit.mVolume),
       INIT_CANONICAL(mPreservesPitch, aInit.mPreservesPitch),
       INIT_CANONICAL(mLooping, aInit.mLooping),
+      INIT_CANONICAL(mOutputCaptured, false),
+      INIT_CANONICAL(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
+      INIT_CANONICAL(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
       INIT_CANONICAL(mPlayState, PLAY_STATE_LOADING),
       mSameOriginMedia(false),
       mVideoDecodingOberver(
           new BackgroundVideoDecodingPermissionObserver(this)),
       mIsBackgroundVideoDecodingAllowed(false),
       mTelemetryReported(false),
       mContainerType(aInit.mContainerType) {
   MOZ_ASSERT(NS_IsMainThread());
@@ -660,17 +668,16 @@ void MediaDecoder::MetadataLoaded(
   LOG("MetadataLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d",
       aInfo->mAudio.mChannels, aInfo->mAudio.mRate, aInfo->HasAudio(),
       aInfo->HasVideo());
 
   mMediaSeekable = aInfo->mMediaSeekable;
   mMediaSeekableOnlyInBufferedRanges =
       aInfo->mMediaSeekableOnlyInBufferedRanges;
   mInfo = aInfo.release();
-  mDecoderStateMachine->EnsureOutputStreamManagerHasTracks(*mInfo);
 
   // Make sure the element and the frame (if any) are told about
   // our new size.
   if (aEventVisibility != MediaDecoderEventVisibility::Suppressed) {
     mFiredMetadataLoaded = true;
     GetOwner()->MetadataLoaded(mInfo, std::move(aTags));
   }
   // Invalidate() will end up calling GetOwner()->UpdateMediaSize with the last
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -44,17 +44,16 @@ class AbstractThread;
 class DOMMediaStream;
 class DecoderBenchmark;
 class ProcessedMediaTrack;
 class FrameStatistics;
 class VideoFrameContainer;
 class MediaFormatReader;
 class MediaDecoderStateMachine;
 struct MediaPlaybackEvent;
-struct SharedDummyTrack;
 
 enum class Visibility : uint8_t;
 
 struct MOZ_STACK_CLASS MediaDecoderInit {
   MediaDecoderOwner* const mOwner;
   const double mVolume;
   const bool mPreservesPitch;
   const double mPlaybackRate;
@@ -163,21 +162,21 @@ class MediaDecoder : public DecoderDocto
   // All MediaStream-related data is protected by mReentrantMonitor.
   // We have at most one DecodedStreamData per MediaDecoder. Its stream
   // is used as the input for each ProcessedMediaTrack created by calls to
   // captureStream(UntilEnded). Seeking creates a new source stream, as does
   // replaying after the input as ended. In the latter case, the new source is
   // not connected to streams created by captureStreamUntilEnded.
 
   // Turn output capturing of this decoder on or off. If it is on, the
-  // MediaDecoderStateMachine will only create a MediaSink after output tracks
-  // have been set. This is to ensure that it doesn't create a regular MediaSink
+  // MediaDecoderStateMachine's media sink will only play after output tracks
+  // have been set. This is to ensure that it doesn't skip over any data
   // while the owner has intended to capture the full output, thus missing to
   // capture some of it. The owner of the MediaDecoder is responsible for adding
-  // output tracks while the output is captured.
+  // output tracks in a timely fashion while the output is captured.
   void SetOutputCaptured(bool aCaptured);
   // Add an output track. All decoder output for the track's media type will be
   // sent to the track.
   // Note that only one audio track and one video track is supported by
   // MediaDecoder at this time. Passing in more of one type, or passing in a
   // type that metadata says we are not decoding, is an error.
   void AddOutputTrack(RefPtr<ProcessedMediaTrack> aTrack);
   // Remove an output track added with AddOutputTrack.
@@ -610,16 +609,26 @@ class MediaDecoder : public DecoderDocto
 
   // Volume of playback.  0.0 = muted. 1.0 = full volume.
   Canonical<double> mVolume;
 
   Canonical<bool> mPreservesPitch;
 
   Canonical<bool> mLooping;
 
+  // Whether this MediaDecoder's output is captured. When captured, all decoded
+  // data must be played out through mOutputTracks.
+  Canonical<bool> mOutputCaptured;
+
+  // Tracks that, if set, will get data routed through them.
+  Canonical<nsTArray<RefPtr<ProcessedMediaTrack>>> mOutputTracks;
+
+  // PrincipalHandle to be used when feeding data into mOutputTracks.
+  Canonical<PrincipalHandle> mOutputPrincipal;
+
   // Media duration set explicitly by JS. At present, this is only ever present
   // for MSE.
   Maybe<double> mExplicitDuration;
 
   // Set to one of the valid play states.
   // This can only be changed on the main thread while holding the decoder
   // monitor. Thus, it can be safely read while holding the decoder monitor
   // OR on the main thread.
@@ -642,16 +651,26 @@ class MediaDecoder : public DecoderDocto
   bool mIsBackgroundVideoDecodingAllowed;
 
  public:
   AbstractCanonical<double>* CanonicalVolume() { return &mVolume; }
   AbstractCanonical<bool>* CanonicalPreservesPitch() {
     return &mPreservesPitch;
   }
   AbstractCanonical<bool>* CanonicalLooping() { return &mLooping; }
+  AbstractCanonical<bool>* CanonicalOutputCaptured() {
+    return &mOutputCaptured;
+  }
+  AbstractCanonical<nsTArray<RefPtr<ProcessedMediaTrack>>>*
+  CanonicalOutputTracks() {
+    return &mOutputTracks;
+  }
+  AbstractCanonical<PrincipalHandle>* CanonicalOutputPrincipal() {
+    return &mOutputPrincipal;
+  }
   AbstractCanonical<PlayState>* CanonicalPlayState() { return &mPlayState; }
 
  private:
   // Notify owner when the audible state changed
   void NotifyAudibleStateChanged();
 
   bool mTelemetryReported;
   const MediaContainerType mContainerType;
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -6,17 +6,16 @@
 
 #include <algorithm>
 #include <stdint.h>
 #include <utility>
 
 #include "mediasink/AudioSink.h"
 #include "mediasink/AudioSinkWrapper.h"
 #include "mediasink/DecodedStream.h"
-#include "mediasink/OutputStreamManager.h"
 #include "mediasink/VideoSink.h"
 #include "mozilla/Logging.h"
 #include "mozilla/MathAlgorithms.h"
 #include "mozilla/NotNull.h"
 #include "mozilla/SharedThreadPool.h"
 #include "mozilla/Sprintf.h"
 #include "mozilla/StaticPrefs_media.h"
 #include "mozilla/Telemetry.h"
@@ -2587,16 +2586,19 @@ RefPtr<ShutdownPromise> MediaDecoderStat
   master->mOnMediaNotSeekable.Disconnect();
 
   // Disconnect canonicals and mirrors before shutting down our task queue.
   master->mBuffered.DisconnectIfConnected();
   master->mPlayState.DisconnectIfConnected();
   master->mVolume.DisconnectIfConnected();
   master->mPreservesPitch.DisconnectIfConnected();
   master->mLooping.DisconnectIfConnected();
+  master->mOutputCaptured.DisconnectIfConnected();
+  master->mOutputTracks.DisconnectIfConnected();
+  master->mOutputPrincipal.DisconnectIfConnected();
 
   master->mDuration.DisconnectAll();
   master->mCurrentPosition.DisconnectAll();
   master->mIsAudioDataAudible.DisconnectAll();
 
   // Shut down the watch manager to stop further notifications.
   master->mWatchManager.Shutdown();
 
@@ -2622,30 +2624,34 @@ MediaDecoderStateMachine::MediaDecoderSt
                                /* aSupportsTailDispatch = */ true)),
       mWatchManager(this, mTaskQueue),
       mDispatchedStateMachine(false),
       mDelayedScheduler(mTaskQueue, true /*aFuzzy*/),
       mCurrentFrameID(0),
       mReader(new ReaderProxy(mTaskQueue, aReader)),
       mPlaybackRate(1.0),
       mAmpleAudioThreshold(detail::AMPLE_AUDIO_THRESHOLD),
-      mAudioCaptured(false),
       mMinimizePreroll(aDecoder->GetMinimizePreroll()),
       mSentFirstFrameLoadedEvent(false),
       mVideoDecodeSuspended(false),
       mVideoDecodeSuspendTimer(mTaskQueue),
-      mOutputStreamManager(nullptr),
       mVideoDecodeMode(VideoDecodeMode::Normal),
       mIsMSE(aDecoder->IsMSE()),
       mSeamlessLoopingAllowed(false),
       INIT_MIRROR(mBuffered, TimeIntervals()),
       INIT_MIRROR(mPlayState, MediaDecoder::PLAY_STATE_LOADING),
       INIT_MIRROR(mVolume, 1.0),
       INIT_MIRROR(mPreservesPitch, true),
       INIT_MIRROR(mLooping, false),
+      INIT_MIRROR(mOutputCaptured, false),
+      INIT_MIRROR(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
+      INIT_MIRROR(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
+      INIT_CANONICAL(mCanonicalOutputTracks,
+                     nsTArray<RefPtr<ProcessedMediaTrack>>()),
+      INIT_CANONICAL(mCanonicalOutputPrincipal, PRINCIPAL_HANDLE_NONE),
       INIT_CANONICAL(mDuration, NullableTimeUnit()),
       INIT_CANONICAL(mCurrentPosition, TimeUnit::Zero()),
       INIT_CANONICAL(mIsAudioDataAudible, false),
       mSetSinkRequestsCount(0) {
   MOZ_COUNT_CTOR(MediaDecoderStateMachine);
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
 
   InitVideoQueuePrefs();
@@ -2666,25 +2672,36 @@ void MediaDecoderStateMachine::Initializ
   MOZ_ASSERT(OnTaskQueue());
 
   // Connect mirrors.
   mBuffered.Connect(mReader->CanonicalBuffered());
   mPlayState.Connect(aDecoder->CanonicalPlayState());
   mVolume.Connect(aDecoder->CanonicalVolume());
   mPreservesPitch.Connect(aDecoder->CanonicalPreservesPitch());
   mLooping.Connect(aDecoder->CanonicalLooping());
+  mOutputCaptured.Connect(aDecoder->CanonicalOutputCaptured());
+  mOutputTracks.Connect(aDecoder->CanonicalOutputTracks());
+  mOutputPrincipal.Connect(aDecoder->CanonicalOutputPrincipal());
 
   // Initialize watchers.
   mWatchManager.Watch(mBuffered,
                       &MediaDecoderStateMachine::BufferedRangeUpdated);
   mWatchManager.Watch(mVolume, &MediaDecoderStateMachine::VolumeChanged);
   mWatchManager.Watch(mPreservesPitch,
                       &MediaDecoderStateMachine::PreservesPitchChanged);
   mWatchManager.Watch(mPlayState, &MediaDecoderStateMachine::PlayStateChanged);
   mWatchManager.Watch(mLooping, &MediaDecoderStateMachine::LoopingChanged);
+  mWatchManager.Watch(mOutputCaptured,
+                      &MediaDecoderStateMachine::UpdateOutputCaptured);
+  mWatchManager.Watch(mOutputTracks,
+                      &MediaDecoderStateMachine::UpdateOutputCaptured);
+  mWatchManager.Watch(mOutputTracks,
+                      &MediaDecoderStateMachine::OutputTracksChanged);
+  mWatchManager.Watch(mOutputPrincipal,
+                      &MediaDecoderStateMachine::OutputPrincipalChanged);
 
   MOZ_ASSERT(!mStateObj);
   auto* s = new DecodeMetadataState(this);
   mStateObj.reset(s);
   s->Enter();
 }
 
 void MediaDecoderStateMachine::AudioAudibleChanged(bool aAudible) {
@@ -2703,22 +2720,21 @@ MediaSink* MediaDecoderStateMachine::Cre
         self->mTaskQueue, self.get(),
         &MediaDecoderStateMachine::AudioAudibleChanged);
     return audioSink;
   };
   return new AudioSinkWrapper(mTaskQueue, mAudioQueue, audioSinkCreator);
 }
 
 already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink(
-    bool aAudioCaptured, OutputStreamManager* aManager) {
-  MOZ_ASSERT_IF(aAudioCaptured, aManager);
+    bool aOutputCaptured) {
   RefPtr<MediaSink> audioSink =
-      aAudioCaptured ? new DecodedStream(mTaskQueue, mAbstractMainThread,
-                                         mAudioQueue, mVideoQueue, aManager)
-                     : CreateAudioSink();
+      aOutputCaptured
+          ? new DecodedStream(this, mOutputTracks, mAudioQueue, mVideoQueue)
+          : CreateAudioSink();
 
   RefPtr<MediaSink> mediaSink =
       new VideoSink(mTaskQueue, audioSink, mVideoQueue, mVideoFrameContainer,
                     *mFrameStats, sVideoQueueSendToCompositorSize);
   return mediaSink.forget();
 }
 
 TimeUnit MediaDecoderStateMachine::GetDecodedAudioDuration() {
@@ -2798,17 +2814,17 @@ nsresult MediaDecoderStateMachine::Init(
   mVideoQueueListener = VideoQueue().PopFrontEvent().Connect(
       mTaskQueue, this, &MediaDecoderStateMachine::OnVideoPopped);
 
   mMetadataManager.Connect(mReader->TimedMetadataEvent(), OwnerThread());
 
   mOnMediaNotSeekable = mReader->OnMediaNotSeekable().Connect(
       OwnerThread(), this, &MediaDecoderStateMachine::SetMediaNotSeekable);
 
-  mMediaSink = CreateMediaSink(mAudioCaptured, mOutputStreamManager);
+  mMediaSink = CreateMediaSink(mOutputCaptured);
 
   nsresult rv = mReader->Init();
   NS_ENSURE_SUCCESS(rv, rv);
 
   mReader->SetCanonicalDuration(&mDuration);
 
   return NS_OK;
 }
@@ -3335,19 +3351,16 @@ void MediaDecoderStateMachine::FinishDec
   // Get potentially updated metadata
   mReader->ReadUpdatedMetadata(mInfo.ptr());
 
   EnqueueFirstFrameLoadedEvent();
 }
 
 RefPtr<ShutdownPromise> MediaDecoderStateMachine::BeginShutdown() {
   MOZ_ASSERT(NS_IsMainThread());
-  if (mOutputStreamManager) {
-    mOutputStreamManager->Disconnect();
-  }
   return InvokeAsync(OwnerThread(), this, __func__,
                      &MediaDecoderStateMachine::Shutdown);
 }
 
 RefPtr<ShutdownPromise> MediaDecoderStateMachine::FinishShutdown() {
   MOZ_ASSERT(OnTaskQueue());
   LOG("Shutting down state machine task queue");
   return OwnerThread()->BeginShutdown();
@@ -3426,17 +3439,17 @@ void MediaDecoderStateMachine::UpdatePla
     auto t = std::min(clockTime, maxEndTime);
     // FIXME: Bug 1091422 - chained ogg files hit this assertion.
     // MOZ_ASSERT(t >= GetMediaTime());
     if (loopback || t > GetMediaTime()) {
       UpdatePlaybackPosition(t);
     }
   }
   // Note we have to update playback position before releasing the monitor.
-  // Otherwise, MediaDecoder::AddOutputStream could kick in when we are outside
+  // Otherwise, MediaDecoder::AddOutputTrack could kick in when we are outside
   // the monitor and get a staled value from GetCurrentTimeUs() which hits the
   // assertion in GetClock().
 
   int64_t delay = std::max<int64_t>(1, AUDIO_DURATION_USECS / mPlaybackRate);
   ScheduleStateMachineIn(TimeUnit::FromMicroseconds(delay));
 
   // Notify the listener as we progress in the playback offset. Note it would
   // be too intensive to send notifications for each popped audio/video sample.
@@ -3512,30 +3525,65 @@ void MediaDecoderStateMachine::Preserves
 void MediaDecoderStateMachine::LoopingChanged() {
   MOZ_ASSERT(OnTaskQueue());
   LOGV("LoopingChanged, looping=%d", mLooping.Ref());
   if (mSeamlessLoopingAllowed) {
     mStateObj->HandleLoopingChanged();
   }
 }
 
+void MediaDecoderStateMachine::UpdateOutputCaptured() {
+  MOZ_ASSERT(OnTaskQueue());
+
+  // Reset these flags so they are consistent with the status of the sink.
+  // TODO: Move these flags into MediaSink to improve cohesion so we don't need
+  // to reset these flags when switching MediaSinks.
+  mAudioCompleted = false;
+  mVideoCompleted = false;
+
+  // Stop and shut down the existing sink.
+  StopMediaSink();
+  mMediaSink->Shutdown();
+
+  // Create a new sink according to whether output is captured.
+  mMediaSink = CreateMediaSink(mOutputCaptured);
+
+  // Don't buffer as much when audio is captured because we don't need to worry
+  // about high latency audio devices.
+  mAmpleAudioThreshold = mOutputCaptured ? detail::AMPLE_AUDIO_THRESHOLD / 2
+                                         : detail::AMPLE_AUDIO_THRESHOLD;
+
+  mStateObj->HandleAudioCaptured();
+}
+
+void MediaDecoderStateMachine::OutputTracksChanged() {
+  MOZ_ASSERT(OnTaskQueue());
+  LOG("OutputTracksChanged, tracks=%zu", mOutputTracks.Ref().Length());
+  mCanonicalOutputTracks = mOutputTracks;
+}
+
+void MediaDecoderStateMachine::OutputPrincipalChanged() {
+  MOZ_ASSERT(OnTaskQueue());
+  mCanonicalOutputPrincipal = mOutputPrincipal;
+}
+
 RefPtr<GenericPromise> MediaDecoderStateMachine::InvokeSetSink(
     RefPtr<AudioDeviceInfo> aSink) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(aSink);
 
   Unused << ++mSetSinkRequestsCount;
   return InvokeAsync(OwnerThread(), this, __func__,
                      &MediaDecoderStateMachine::SetSink, aSink);
 }
 
 RefPtr<GenericPromise> MediaDecoderStateMachine::SetSink(
     RefPtr<AudioDeviceInfo> aSink) {
   MOZ_ASSERT(OnTaskQueue());
-  if (mAudioCaptured) {
+  if (mOutputCaptured) {
     // Not supported yet.
     return GenericPromise::CreateAndReject(NS_ERROR_ABORT, __func__);
   }
 
   // Backup current playback parameters.
   bool wasPlaying = mMediaSink->IsPlaying();
 
   if (--mSetSinkRequestsCount > 0) {
@@ -3651,53 +3699,16 @@ void MediaDecoderStateMachine::OnMediaSi
     return;
   }
 
   // Otherwise notify media decoder/element about this error for it makes
   // no sense to play an audio-only file without sound output.
   DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__));
 }
 
-void MediaDecoderStateMachine::SetAudioCaptured(bool aCaptured,
-                                                OutputStreamManager* aManager) {
-  MOZ_ASSERT(OnTaskQueue());
-
-  if (aCaptured == mAudioCaptured) {
-    return;
-  }
-
-  // Rest these flags so they are consistent with the status of the sink.
-  // TODO: Move these flags into MediaSink to improve cohesion so we don't need
-  // to reset these flags when switching MediaSinks.
-  mAudioCompleted = false;
-  mVideoCompleted = false;
-
-  // Backup current playback parameters.
-  MediaSink::PlaybackParams params = mMediaSink->GetPlaybackParams();
-
-  // Stop and shut down the existing sink.
-  StopMediaSink();
-  mMediaSink->Shutdown();
-
-  // Create a new sink according to whether audio is captured.
-  mMediaSink = CreateMediaSink(aCaptured, aManager);
-
-  // Restore playback parameters.
-  mMediaSink->SetPlaybackParams(params);
-
-  mAudioCaptured = aCaptured;
-
-  // Don't buffer as much when audio is captured because we don't need to worry
-  // about high latency audio devices.
-  mAmpleAudioThreshold = mAudioCaptured ? detail::AMPLE_AUDIO_THRESHOLD / 2
-                                        : detail::AMPLE_AUDIO_THRESHOLD;
-
-  mStateObj->HandleAudioCaptured();
-}
-
 uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const {
   MOZ_ASSERT(OnTaskQueue());
   return mReader->VideoIsHardwareAccelerated()
              ? std::max<uint32_t>(sVideoQueueHWAccelSize, MIN_VIDEO_QUEUE_SIZE)
              : std::max<uint32_t>(sVideoQueueDefaultSize, MIN_VIDEO_QUEUE_SIZE);
 }
 
 void MediaDecoderStateMachine::GetDebugInfo(
@@ -3731,96 +3742,16 @@ RefPtr<GenericPromise> MediaDecoderState
                                p->Resolve(true, __func__);
                              }),
       AbstractThread::TailDispatch);
   MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
   Unused << rv;
   return p.forget();
 }
 
-void MediaDecoderStateMachine::SetOutputStreamPrincipal(
-    nsIPrincipal* aPrincipal) {
-  MOZ_ASSERT(NS_IsMainThread());
-  mOutputStreamPrincipal = aPrincipal;
-  if (mOutputStreamManager) {
-    mOutputStreamManager->SetPrincipal(mOutputStreamPrincipal);
-  }
-}
-
-void MediaDecoderStateMachine::AddOutputStream(DOMMediaStream* aStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-  LOG("AddOutputStream aStream=%p!", aStream);
-  mOutputStreamManager->Add(aStream);
-  nsCOMPtr<nsIRunnable> r =
-      NS_NewRunnableFunction("MediaDecoderStateMachine::SetAudioCaptured",
-                             [self = RefPtr<MediaDecoderStateMachine>(this),
-                              manager = mOutputStreamManager]() {
-                               self->SetAudioCaptured(true, manager);
-                             });
-  nsresult rv = OwnerThread()->Dispatch(r.forget());
-  MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
-  Unused << rv;
-}
-
-void MediaDecoderStateMachine::RemoveOutputStream(DOMMediaStream* aStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-  LOG("RemoveOutputStream=%p!", aStream);
-  mOutputStreamManager->Remove(aStream);
-  if (mOutputStreamManager->IsEmpty()) {
-    mOutputStreamManager->Disconnect();
-    mOutputStreamManager = nullptr;
-    nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction(
-        "MediaDecoderStateMachine::SetAudioCaptured",
-        [self = RefPtr<MediaDecoderStateMachine>(this)]() {
-          self->SetAudioCaptured(false);
-        });
-    nsresult rv = OwnerThread()->Dispatch(r.forget());
-    MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
-    Unused << rv;
-  }
-}
-
-void MediaDecoderStateMachine::EnsureOutputStreamManager(
-    SharedDummyTrack* aDummyStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-  if (mOutputStreamManager) {
-    return;
-  }
-  mOutputStreamManager = new OutputStreamManager(
-      aDummyStream, mOutputStreamPrincipal, mAbstractMainThread);
-}
-
-void MediaDecoderStateMachine::EnsureOutputStreamManagerHasTracks(
-    const MediaInfo& aLoadedInfo) {
-  MOZ_ASSERT(NS_IsMainThread());
-  if (!mOutputStreamManager) {
-    return;
-  }
-  if ((!aLoadedInfo.HasAudio() ||
-       mOutputStreamManager->HasTrackType(MediaSegment::AUDIO)) &&
-      (!aLoadedInfo.HasVideo() ||
-       mOutputStreamManager->HasTrackType(MediaSegment::VIDEO))) {
-    return;
-  }
-  if (aLoadedInfo.HasAudio()) {
-    MOZ_ASSERT(!mOutputStreamManager->HasTrackType(MediaSegment::AUDIO));
-    RefPtr<SourceMediaTrack> dummy =
-        mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
-    LOG("Pre-created audio track with underlying track %p", dummy.get());
-    Unused << dummy;
-  }
-  if (aLoadedInfo.HasVideo()) {
-    MOZ_ASSERT(!mOutputStreamManager->HasTrackType(MediaSegment::VIDEO));
-    RefPtr<SourceMediaTrack> dummy =
-        mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
-    LOG("Pre-created video track with underlying track %p", dummy.get());
-    Unused << dummy;
-  }
-}
-
 class VideoQueueMemoryFunctor : public nsDequeFunctor {
  public:
   VideoQueueMemoryFunctor() : mSize(0) {}
 
   MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf);
 
   virtual void operator()(void* aObject) override {
     const VideoData* v = static_cast<const VideoData*>(aObject);
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -101,17 +101,16 @@ hardware (via AudioStream).
 #  include "nsThreadUtils.h"
 
 namespace mozilla {
 
 class AbstractThread;
 class AudioSegment;
 class DecodedStream;
 class DOMMediaStream;
-class OutputStreamManager;
 class ReaderProxy;
 class TaskQueue;
 
 extern LazyLogModule gMediaDecoderLog;
 
 struct MediaPlaybackEvent {
   enum EventType {
     PlaybackStarted,
@@ -181,29 +180,16 @@ class MediaDecoderStateMachine
   };
 
   // Returns the state machine task queue.
   TaskQueue* OwnerThread() const { return mTaskQueue; }
 
   RefPtr<GenericPromise> RequestDebugInfo(
       dom::MediaDecoderStateMachineDebugInfo& aInfo);
 
-  void SetOutputStreamPrincipal(nsIPrincipal* aPrincipal);
-  // If an OutputStreamManager does not exist, one will be created.
-  void EnsureOutputStreamManager(SharedDummyTrack* aDummyStream);
-  // If an OutputStreamManager exists, tracks matching aLoadedInfo will be
-  // created unless they already exist in the manager.
-  void EnsureOutputStreamManagerHasTracks(const MediaInfo& aLoadedInfo);
-  // Add an output stream to the output stream manager. The manager must have
-  // been created through EnsureOutputStreamManager() before this.
-  void AddOutputStream(DOMMediaStream* aStream);
-  // Remove an output stream added with AddOutputStream. If the last output
-  // stream was removed, we will also tear down the OutputStreamManager.
-  void RemoveOutputStream(DOMMediaStream* aStream);
-
   // Seeks to the decoder to aTarget asynchronously.
   RefPtr<MediaDecoder::SeekPromise> InvokeSeek(const SeekTarget& aTarget);
 
   void DispatchSetPlaybackRate(double aPlaybackRate) {
     OwnerThread()->DispatchStateChange(NewRunnableMethod<double>(
         "MediaDecoderStateMachine::SetPlaybackRate", this,
         &MediaDecoderStateMachine::SetPlaybackRate, aPlaybackRate));
   }
@@ -311,21 +297,16 @@ class MediaDecoderStateMachine
   // on the appropriate threads.
   bool OnTaskQueue() const;
 
   // Initialization that needs to happen on the task queue. This is the first
   // task that gets run on the task queue, and is dispatched from the MDSM
   // constructor immediately after the task queue is created.
   void InitializationTask(MediaDecoder* aDecoder);
 
-  // Sets the audio-captured state and recreates the media sink if needed.
-  // A manager must be passed in if setting the audio-captured state to true.
-  void SetAudioCaptured(bool aCaptured,
-                        OutputStreamManager* aManager = nullptr);
-
   RefPtr<MediaDecoder::SeekPromise> Seek(const SeekTarget& aTarget);
 
   RefPtr<ShutdownPromise> Shutdown();
 
   RefPtr<ShutdownPromise> FinishShutdown();
 
   // Update the playback position. This can result in a timeupdate event
   // and an invalidate of the frame being dispatched asynchronously if
@@ -389,16 +370,19 @@ class MediaDecoderStateMachine
   void OnVideoPopped(const RefPtr<VideoData>& aSample);
 
   void AudioAudibleChanged(bool aAudible);
 
   void VolumeChanged();
   void SetPlaybackRate(double aPlaybackRate);
   void PreservesPitchChanged();
   void LoopingChanged();
+  void UpdateOutputCaptured();
+  void OutputTracksChanged();
+  void OutputPrincipalChanged();
 
   MediaQueue<AudioData>& AudioQueue() { return mAudioQueue; }
   MediaQueue<VideoData>& VideoQueue() { return mVideoQueue; }
 
   // True if we are low in decoded audio/video data.
   // May not be invoked when mReader->UseBufferingHeuristics() is false.
   bool HasLowDecodedData();
 
@@ -433,20 +417,19 @@ class MediaDecoderStateMachine
   void UpdatePlaybackPositionInternal(const media::TimeUnit& aTime);
 
   // Update playback position and trigger next update by default time period.
   // Called on the state machine thread.
   void UpdatePlaybackPositionPeriodically();
 
   MediaSink* CreateAudioSink();
 
-  // Always create mediasink which contains an AudioSink or StreamSink inside.
-  // A manager must be passed in if aAudioCaptured is true.
-  already_AddRefed<MediaSink> CreateMediaSink(
-      bool aAudioCaptured, OutputStreamManager* aManager = nullptr);
+  // Always create mediasink which contains an AudioSink or DecodedStream
+  // inside.
+  already_AddRefed<MediaSink> CreateMediaSink(bool aOutputCaptured);
 
   // Stops the media sink and shut it down.
   // The decoder monitor must be held with exactly one lock count.
   // Called on the state machine thread.
   void StopMediaSink();
 
   // Create and start the media sink.
   // The decoder monitor must be held with exactly one lock count.
@@ -621,21 +604,16 @@ class MediaDecoderStateMachine
   void CancelSuspendTimer();
 
   bool IsInSeamlessLooping() const;
 
   bool mCanPlayThrough = false;
 
   bool mIsLiveStream = false;
 
-  // True if we shouldn't play our audio (but still write it to any capturing
-  // streams). When this is true, the audio thread will never start again after
-  // it has stopped.
-  bool mAudioCaptured;
-
   // True if all audio frames are already rendered.
   bool mAudioCompleted = false;
 
   // True if all video frames are already rendered.
   bool mVideoCompleted = false;
 
   // True if we should not decode/preroll unnecessary samples, unless we're
   // played. "Prerolling" in this context refers to when we decode and
@@ -667,23 +645,16 @@ class MediaDecoderStateMachine
   bool mMediaSeekable = true;
 
   // True if the media is seekable only in buffered ranges.
   bool mMediaSeekableOnlyInBufferedRanges = false;
 
   // Track enabling video decode suspension via timer
   DelayedScheduler mVideoDecodeSuspendTimer;
 
-  // Data about MediaStreams that are being fed by the decoder.
-  // Main thread only.
-  RefPtr<OutputStreamManager> mOutputStreamManager;
-
-  // Principal used by output streams. Main thread only.
-  nsCOMPtr<nsIPrincipal> mOutputStreamPrincipal;
-
   // Track the current video decode mode.
   VideoDecodeMode mVideoDecodeMode;
 
   // Track the complete & error for audio/video separately
   MozPromiseRequestHolder<MediaSink::EndedPromise> mMediaSinkAudioEndedPromise;
   MozPromiseRequestHolder<MediaSink::EndedPromise> mMediaSinkVideoEndedPromise;
 
   MediaEventListener mAudioQueueListener;
@@ -728,16 +699,29 @@ class MediaDecoderStateMachine
 
   // Pitch preservation for the playback rate.
   Mirror<bool> mPreservesPitch;
 
   // Whether to seek back to the start of the media resource
   // upon reaching the end.
   Mirror<bool> mLooping;
 
+  // Whether all output should be captured into mOutputTracks. While true, the
+  // media sink will only play if there are output tracks.
+  Mirror<bool> mOutputCaptured;
+
+  // Tracks to capture data into.
+  Mirror<nsTArray<RefPtr<ProcessedMediaTrack>>> mOutputTracks;
+
+  // PrincipalHandle to feed with data captured into mOutputTracks.
+  Mirror<PrincipalHandle> mOutputPrincipal;
+
+  Canonical<nsTArray<RefPtr<ProcessedMediaTrack>>> mCanonicalOutputTracks;
+  Canonical<PrincipalHandle> mCanonicalOutputPrincipal;
+
   // Duration of the media. This is guaranteed to be non-null after we finish
   // decoding the first frame.
   Canonical<media::NullableTimeUnit> mDuration;
 
   // The time of the current frame, corresponding to the "current
   // playback position" in HTML5. This is referenced from 0, which is the
   // initial playback position.
   Canonical<media::TimeUnit> mCurrentPosition;
@@ -746,16 +730,23 @@ class MediaDecoderStateMachine
   Canonical<bool> mIsAudioDataAudible;
 
   // Used to count the number of pending requests to set a new sink.
   Atomic<int> mSetSinkRequestsCount;
 
  public:
   AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() const;
 
+  AbstractCanonical<nsTArray<RefPtr<ProcessedMediaTrack>>>*
+  CanonicalOutputTracks() {
+    return &mCanonicalOutputTracks;
+  }
+  AbstractCanonical<PrincipalHandle>* CanonicalOutputPrincipal() {
+    return &mCanonicalOutputPrincipal;
+  }
   AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration() {
     return &mDuration;
   }
   AbstractCanonical<media::TimeUnit>* CanonicalCurrentPosition() {
     return &mCurrentPosition;
   }
   AbstractCanonical<bool>* CanonicalIsAudioDataAudible() {
     return &mIsAudioDataAudible;
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -2,20 +2,20 @@
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "DecodedStream.h"
 #include "AudioSegment.h"
 #include "MediaData.h"
+#include "MediaDecoderStateMachine.h"
 #include "MediaQueue.h"
 #include "MediaTrackGraph.h"
 #include "MediaTrackListener.h"
-#include "OutputStreamManager.h"
 #include "SharedBuffer.h"
 #include "VideoSegment.h"
 #include "VideoUtils.h"
 #include "mozilla/AbstractThread.h"
 #include "mozilla/CheckedInt.h"
 #include "mozilla/SyncRunnable.h"
 #include "mozilla/gfx/Point.h"
 #include "nsProxyRelease.h"
@@ -52,31 +52,29 @@ class DecodedStreamTrackListener : publi
 
 class DecodedStreamGraphListener {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStreamGraphListener)
  public:
   DecodedStreamGraphListener(
       SourceMediaTrack* aAudioTrack,
       MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedHolder,
       SourceMediaTrack* aVideoTrack,
-      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder,
-      AbstractThread* aMainThread)
+      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder)
       : mAudioTrackListener(
             aAudioTrack
                 ? MakeRefPtr<DecodedStreamTrackListener>(this, aAudioTrack)
                 : nullptr),
         mAudioEndedHolder(std::move(aAudioEndedHolder)),
         mVideoTrackListener(
             aVideoTrack
                 ? MakeRefPtr<DecodedStreamTrackListener>(this, aVideoTrack)
                 : nullptr),
         mVideoEndedHolder(std::move(aVideoEndedHolder)),
         mAudioTrack(aAudioTrack),
-        mVideoTrack(aVideoTrack),
-        mAbstractMainThread(aMainThread) {
+        mVideoTrack(aVideoTrack) {
     MOZ_ASSERT(NS_IsMainThread());
     if (mAudioTrackListener) {
       mAudioTrack->AddListener(mAudioTrackListener);
     } else {
       mAudioEnded = true;
       mAudioEndedHolder.ResolveIfExists(true, __func__);
     }
 
@@ -203,17 +201,16 @@ class DecodedStreamGraphListener {
   bool mAudioEnded = false;
   bool mVideoEnded = false;
 
   // Any thread.
   const RefPtr<SourceMediaTrack> mAudioTrack;
   const RefPtr<SourceMediaTrack> mVideoTrack;
   Atomic<TrackTime> mAudioEnd{TRACK_TIME_MAX};
   Atomic<TrackTime> mVideoEnd{TRACK_TIME_MAX};
-  const RefPtr<AbstractThread> mAbstractMainThread;
 };
 
 DecodedStreamTrackListener::DecodedStreamTrackListener(
     DecodedStreamGraphListener* aGraphListener, SourceMediaTrack* aTrack)
     : mGraphListener(aGraphListener), mTrack(aTrack) {}
 
 void DecodedStreamTrackListener::NotifyOutput(MediaTrackGraph* aGraph,
                                               TrackTime aCurrentTrackTime) {
@@ -221,31 +218,30 @@ void DecodedStreamTrackListener::NotifyO
 }
 
 void DecodedStreamTrackListener::NotifyEnded(MediaTrackGraph* aGraph) {
   mGraphListener->NotifyEnded(mTrack);
 }
 
 /**
  * All MediaStream-related data is protected by the decoder's monitor. We have
- * at most one DecodedStreamData per MediaDecoder. Its tracks are used as
+ * at most one DecodedStreamData per MediaDecoder. XXX Its tracks are used as
  * inputs for all output tracks created by OutputStreamManager after calls to
  * captureStream/UntilEnded. Seeking creates new source tracks, as does
  * replaying after the input as ended. In the latter case, the new sources are
  * not connected to tracks created by captureStreamUntilEnded.
  */
 class DecodedStreamData final {
  public:
   DecodedStreamData(
-      OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
-      RefPtr<SourceMediaTrack> aAudioTrack,
-      RefPtr<SourceMediaTrack> aVideoTrack,
+      PlaybackInfoInit&& aInit, MediaTrackGraph* aGraph,
+      RefPtr<ProcessedMediaTrack> aAudioOutputTrack,
+      RefPtr<ProcessedMediaTrack> aVideoOutputTrack,
       MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
-      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
-      AbstractThread* aMainThread);
+      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise);
   ~DecodedStreamData();
   MediaEventSource<int64_t>& OnOutput();
   void Forget();
   void GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo);
 
   void WriteVideoToSegment(layers::Image* aImage, const TimeUnit& aStart,
                            const TimeUnit& aEnd,
                            const gfx::IntSize& aIntrinsicSize,
@@ -280,49 +276,74 @@ class DecodedStreamData final {
   // the image.
   RefPtr<layers::Image> mLastVideoImage;
   gfx::IntSize mLastVideoImageDisplaySize;
   bool mHaveSentFinishAudio;
   bool mHaveSentFinishVideo;
 
   const RefPtr<SourceMediaTrack> mAudioTrack;
   const RefPtr<SourceMediaTrack> mVideoTrack;
+  const RefPtr<ProcessedMediaTrack> mAudioOutputTrack;
+  const RefPtr<ProcessedMediaTrack> mVideoOutputTrack;
+  const RefPtr<MediaInputPort> mAudioPort;
+  const RefPtr<MediaInputPort> mVideoPort;
   const RefPtr<DecodedStreamGraphListener> mListener;
-
-  const RefPtr<OutputStreamManager> mOutputStreamManager;
-  const RefPtr<AbstractThread> mAbstractMainThread;
 };
 
 DecodedStreamData::DecodedStreamData(
-    OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
-    RefPtr<SourceMediaTrack> aAudioTrack, RefPtr<SourceMediaTrack> aVideoTrack,
+    PlaybackInfoInit&& aInit, MediaTrackGraph* aGraph,
+    RefPtr<ProcessedMediaTrack> aAudioOutputTrack,
+    RefPtr<ProcessedMediaTrack> aVideoOutputTrack,
     MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
-    MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
-    AbstractThread* aMainThread)
+    MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise)
     : mAudioFramesWritten(0),
       mVideoTrackWritten(0),
       mAudioTrackWritten(0),
       mNextAudioTime(aInit.mStartTime),
       mHaveSentFinishAudio(false),
       mHaveSentFinishVideo(false),
-      mAudioTrack(std::move(aAudioTrack)),
-      mVideoTrack(std::move(aVideoTrack)),
+      mAudioTrack(aInit.mInfo.HasAudio()
+                      ? aGraph->CreateSourceTrack(MediaSegment::AUDIO)
+                      : nullptr),
+      mVideoTrack(aInit.mInfo.HasVideo()
+                      ? aGraph->CreateSourceTrack(MediaSegment::VIDEO)
+                      : nullptr),
+      mAudioOutputTrack(std::move(aAudioOutputTrack)),
+      mVideoOutputTrack(std::move(aVideoOutputTrack)),
+      mAudioPort((mAudioOutputTrack && mAudioTrack)
+                     ? mAudioOutputTrack->AllocateInputPort(mAudioTrack)
+                     : nullptr),
+      mVideoPort((mVideoOutputTrack && mVideoTrack)
+                     ? mVideoOutputTrack->AllocateInputPort(mVideoTrack)
+                     : nullptr),
       // DecodedStreamGraphListener will resolve these promises.
       mListener(MakeRefPtr<DecodedStreamGraphListener>(
           mAudioTrack, std::move(aAudioEndedPromise), mVideoTrack,
-          std::move(aVideoEndedPromise), aMainThread)),
-      mOutputStreamManager(aOutputStreamManager),
-      mAbstractMainThread(aMainThread) {
+          std::move(aVideoEndedPromise))) {
   MOZ_ASSERT(NS_IsMainThread());
-  MOZ_DIAGNOSTIC_ASSERT(
-      mOutputStreamManager->HasTracks(mAudioTrack, mVideoTrack),
-      "Tracks must be pre-created on main thread");
+  if (mAudioTrack) {
+    mAudioTrack->SetAppendDataSourceRate(aInit.mInfo.mAudio.mRate);
+  }
 }
 
-DecodedStreamData::~DecodedStreamData() { MOZ_ASSERT(NS_IsMainThread()); }
+DecodedStreamData::~DecodedStreamData() {
+  MOZ_ASSERT(NS_IsMainThread());
+  if (mAudioTrack) {
+    mAudioTrack->Destroy();
+  }
+  if (mVideoTrack) {
+    mVideoTrack->Destroy();
+  }
+  if (mAudioPort) {
+    mAudioPort->Destroy();
+  }
+  if (mVideoPort) {
+    mVideoPort->Destroy();
+  }
+}
 
 MediaEventSource<int64_t>& DecodedStreamData::OnOutput() {
   return mListener->OnOutput();
 }
 
 void DecodedStreamData::Forget() { mListener->Forget(); }
 
 void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) {
@@ -335,40 +356,35 @@ void DecodedStreamData::GetDebugInfo(dom
           .ToMicroseconds();
   aInfo.mLastVideoEndTime =
       mLastVideoEndTime.valueOr(TimeUnit::FromMicroseconds(-1))
           .ToMicroseconds();
   aInfo.mHaveSentFinishAudio = mHaveSentFinishAudio;
   aInfo.mHaveSentFinishVideo = mHaveSentFinishVideo;
 }
 
-DecodedStream::DecodedStream(AbstractThread* aOwnerThread,
-                             AbstractThread* aMainThread,
-                             MediaQueue<AudioData>& aAudioQueue,
-                             MediaQueue<VideoData>& aVideoQueue,
-                             OutputStreamManager* aOutputStreamManager)
-    : mOwnerThread(aOwnerThread),
-      mAbstractMainThread(aMainThread),
-      mOutputStreamManager(aOutputStreamManager),
+DecodedStream::DecodedStream(
+    MediaDecoderStateMachine* aStateMachine,
+    nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
+    MediaQueue<AudioData>& aAudioQueue, MediaQueue<VideoData>& aVideoQueue)
+    : mOwnerThread(aStateMachine->OwnerThread()),
       mWatchManager(this, mOwnerThread),
       mPlaying(false, "DecodedStream::mPlaying"),
-      mPrincipalHandle(aOwnerThread, PRINCIPAL_HANDLE_NONE,
+      mPrincipalHandle(aStateMachine->OwnerThread(), PRINCIPAL_HANDLE_NONE,
                        "DecodedStream::mPrincipalHandle (Mirror)"),
+      mOutputTracks(std::move(aOutputTracks)),
       mAudioQueue(aAudioQueue),
       mVideoQueue(aVideoQueue) {
-  mPrincipalHandle.Connect(mOutputStreamManager->CanonicalPrincipalHandle());
+  mPrincipalHandle.Connect(aStateMachine->CanonicalOutputPrincipal());
 
   mWatchManager.Watch(mPlaying, &DecodedStream::PlayingChanged);
-  PlayingChanged();  // Notify of the initial state
 }
 
 DecodedStream::~DecodedStream() {
   MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended.");
-  NS_ProxyRelease("DecodedStream::mOutputStreamManager", mAbstractMainThread,
-                  do_AddRef(mOutputStreamManager));
 }
 
 const MediaSink::PlaybackParams& DecodedStream::GetPlaybackParams() const {
   AssertOwnerThread();
   return mParams;
 }
 
 void DecodedStream::SetPlaybackParams(const PlaybackParams& aParams) {
@@ -387,89 +403,87 @@ RefPtr<DecodedStream::EndedPromise> Deco
   }
   return nullptr;
 }
 
 nsresult DecodedStream::Start(const TimeUnit& aStartTime,
                               const MediaInfo& aInfo) {
   AssertOwnerThread();
   MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");
+  MOZ_DIAGNOSTIC_ASSERT(!mOutputTracks.IsEmpty());
 
   mStartTime.emplace(aStartTime);
   mLastOutputTime = TimeUnit::Zero();
   mInfo = aInfo;
   mPlaying = true;
   ConnectListener();
 
   class R : public Runnable {
     typedef MozPromiseHolder<MediaSink::EndedPromise> Promise;
 
    public:
-    R(PlaybackInfoInit&& aInit, Promise&& aAudioEndedPromise,
-      Promise&& aVideoEndedPromise, OutputStreamManager* aManager,
-      AbstractThread* aMainThread)
+    R(PlaybackInfoInit&& aInit,
+      nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
+      Promise&& aAudioEndedPromise, Promise&& aVideoEndedPromise)
         : Runnable("CreateDecodedStreamData"),
           mInit(std::move(aInit)),
+          mOutputTracks(std::move(aOutputTracks)),
           mAudioEndedPromise(std::move(aAudioEndedPromise)),
-          mVideoEndedPromise(std::move(aVideoEndedPromise)),
-          mOutputStreamManager(aManager),
-          mAbstractMainThread(aMainThread) {}
+          mVideoEndedPromise(std::move(aVideoEndedPromise)) {}
     NS_IMETHOD Run() override {
       MOZ_ASSERT(NS_IsMainThread());
-      // No need to create a source track when there are no output tracks.
-      // This happens when RemoveOutput() is called immediately after
-      // StartPlayback().
-      if (mOutputStreamManager->IsEmpty()) {
-        // Resolve the promise to indicate the end of playback.
-        mAudioEndedPromise.Resolve(true, __func__);
-        mVideoEndedPromise.Resolve(true, __func__);
+      RefPtr<ProcessedMediaTrack> audioOutputTrack;
+      RefPtr<ProcessedMediaTrack> videoOutputTrack;
+      for (const auto& track : mOutputTracks) {
+        if (track->mType == MediaSegment::AUDIO) {
+          MOZ_DIAGNOSTIC_ASSERT(
+              !audioOutputTrack,
+              "We only support capturing to one output track per kind");
+          audioOutputTrack = track;
+        } else if (track->mType == MediaSegment::VIDEO) {
+          MOZ_DIAGNOSTIC_ASSERT(
+              !videoOutputTrack,
+              "We only support capturing to one output track per kind");
+          videoOutputTrack = track;
+        } else {
+          MOZ_CRASH("Unknown media type");
+        }
+      }
+      if ((!audioOutputTrack && !videoOutputTrack) ||
+          (audioOutputTrack && audioOutputTrack->IsDestroyed()) ||
+          (videoOutputTrack && videoOutputTrack->IsDestroyed())) {
+        // No output tracks yet, or they're going away. Halt playback by not
+        // creating DecodedStreamData. MDSM will try again with a new
+        // DecodedStream sink when tracks are available.
         return NS_OK;
       }
-      RefPtr<SourceMediaTrack> audioTrack =
-          mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::AUDIO);
-      if (mInit.mInfo.HasAudio() && !audioTrack) {
-        MOZ_DIAGNOSTIC_ASSERT(
-            !mOutputStreamManager->HasTrackType(MediaSegment::AUDIO));
-        audioTrack = mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
-      }
-      if (audioTrack) {
-        audioTrack->SetAppendDataSourceRate(mInit.mInfo.mAudio.mRate);
-      }
-      RefPtr<SourceMediaTrack> videoTrack =
-          mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::VIDEO);
-      if (mInit.mInfo.HasVideo() && !videoTrack) {
-        MOZ_DIAGNOSTIC_ASSERT(
-            !mOutputStreamManager->HasTrackType(MediaSegment::VIDEO));
-        videoTrack = mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
-      }
       mData = MakeUnique<DecodedStreamData>(
-          mOutputStreamManager, std::move(mInit), std::move(audioTrack),
-          std::move(videoTrack), std::move(mAudioEndedPromise),
-          std::move(mVideoEndedPromise), mAbstractMainThread);
+          std::move(mInit), mOutputTracks[0]->Graph(),
+          std::move(audioOutputTrack), std::move(videoOutputTrack),
+          std::move(mAudioEndedPromise), std::move(mVideoEndedPromise));
       return NS_OK;
     }
     UniquePtr<DecodedStreamData> ReleaseData() { return std::move(mData); }
 
    private:
     PlaybackInfoInit mInit;
+    const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
     Promise mAudioEndedPromise;
     Promise mVideoEndedPromise;
-    RefPtr<OutputStreamManager> mOutputStreamManager;
     UniquePtr<DecodedStreamData> mData;
-    const RefPtr<AbstractThread> mAbstractMainThread;
   };
 
   MozPromiseHolder<DecodedStream::EndedPromise> audioEndedHolder;
   mAudioEndedPromise = audioEndedHolder.Ensure(__func__);
   MozPromiseHolder<DecodedStream::EndedPromise> videoEndedHolder;
   mVideoEndedPromise = videoEndedHolder.Ensure(__func__);
   PlaybackInfoInit init{aStartTime, aInfo};
-  nsCOMPtr<nsIRunnable> r = new R(std::move(init), std::move(audioEndedHolder),
-                                  std::move(videoEndedHolder),
-                                  mOutputStreamManager, mAbstractMainThread);
+  nsCOMPtr<nsIRunnable> r = new R(
+      std::move(init), nsTArray<RefPtr<ProcessedMediaTrack>>(mOutputTracks),
+      std::move(audioEndedHolder), std::move(videoEndedHolder));
   SyncRunnable::DispatchToThread(
       SystemGroup::EventTargetFor(TaskCategory::Other), r);
   mData = static_cast<R*>(r.get())->ReleaseData();
 
   if (mData) {
     mOutputListener = mData->OnOutput().Connect(mOwnerThread, this,
                                                 &DecodedStream::NotifyOutput);
     SendData();
@@ -512,22 +526,19 @@ void DecodedStream::DestroyData(UniquePt
   AssertOwnerThread();
 
   if (!aData) {
     return;
   }
 
   mOutputListener.Disconnect();
 
-  NS_DispatchToMainThread(NS_NewRunnableFunction(
-      "DecodedStream::DestroyData",
-      [data = std::move(aData), manager = mOutputStreamManager]() {
-        data->Forget();
-        manager->RemoveTracks();
-      }));
+  NS_DispatchToMainThread(
+      NS_NewRunnableFunction("DecodedStream::DestroyData",
+                             [data = std::move(aData)]() { data->Forget(); }));
 }
 
 void DecodedStream::SetPlaying(bool aPlaying) {
   AssertOwnerThread();
 
   // Resume/pause matters only when playback started.
   if (mStartTime.isNothing()) {
     return;
@@ -887,20 +898,16 @@ void DecodedStream::NotifyOutput(int64_t
 
 void DecodedStream::PlayingChanged() {
   AssertOwnerThread();
 
   if (!mPlaying) {
     // On seek or pause we discard future frames.
     ResetVideo(mPrincipalHandle);
   }
-
-  mAbstractMainThread->Dispatch(NewRunnableMethod<bool>(
-      "OutputStreamManager::SetPlaying", mOutputStreamManager,
-      &OutputStreamManager::SetPlaying, mPlaying));
 }
 
 void DecodedStream::ConnectListener() {
   AssertOwnerThread();
 
   mAudioPushListener = mAudioQueue.PushEvent().Connect(
       mOwnerThread, this, &DecodedStream::SendData);
   mAudioFinishListener = mAudioQueue.FinishEvent().Connect(
--- a/dom/media/mediasink/DecodedStream.h
+++ b/dom/media/mediasink/DecodedStream.h
@@ -17,34 +17,34 @@
 #include "mozilla/MozPromise.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/StateMirroring.h"
 #include "mozilla/UniquePtr.h"
 
 namespace mozilla {
 
 class DecodedStreamData;
+class MediaDecoderStateMachine;
 class AudioData;
 class VideoData;
-class OutputStreamManager;
 struct PlaybackInfoInit;
 class ProcessedMediaTrack;
 class TimeStamp;
 
 template <class T>
 class MediaQueue;
 
 class DecodedStream : public MediaSink {
   using MediaSink::PlaybackParams;
 
  public:
-  DecodedStream(AbstractThread* aOwnerThread, AbstractThread* aMainThread,
+  DecodedStream(MediaDecoderStateMachine* aStateMachine,
+                nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
                 MediaQueue<AudioData>& aAudioQueue,
-                MediaQueue<VideoData>& aVideoQueue,
-                OutputStreamManager* aOutputStreamManager);
+                MediaQueue<VideoData>& aVideoQueue);
 
   // MediaSink functions.
   const PlaybackParams& GetPlaybackParams() const override;
   void SetPlaybackParams(const PlaybackParams& aParams) override;
 
   RefPtr<EndedPromise> OnEnded(TrackType aType) override;
   media::TimeUnit GetEndTime(TrackType aType) const override;
   media::TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
@@ -83,34 +83,27 @@ class DecodedStream : public MediaSink {
 
   void PlayingChanged();
 
   void ConnectListener();
   void DisconnectListener();
 
   const RefPtr<AbstractThread> mOwnerThread;
 
-  const RefPtr<AbstractThread> mAbstractMainThread;
-
-  /*
-   * Main thread only members.
-   */
-  // Data about MediaStreams that are being fed by the decoder.
-  const RefPtr<OutputStreamManager> mOutputStreamManager;
-
   /*
    * Worker thread only members.
    */
   WatchManager<DecodedStream> mWatchManager;
   UniquePtr<DecodedStreamData> mData;
   RefPtr<EndedPromise> mAudioEndedPromise;
   RefPtr<EndedPromise> mVideoEndedPromise;
 
   Watchable<bool> mPlaying;
   Mirror<PrincipalHandle> mPrincipalHandle;
+  const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
 
   PlaybackParams mParams;
 
   media::NullableTimeUnit mStartTime;
   media::TimeUnit mLastOutputTime;
   MediaInfo mInfo;
 
   MediaQueue<AudioData>& mAudioQueue;
deleted file mode 100644
--- a/dom/media/mediasink/OutputStreamManager.cpp
+++ /dev/null
@@ -1,357 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim: set ts=8 sts=2 et sw=2 tw=80: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "OutputStreamManager.h"
-
-#include "DOMMediaStream.h"
-#include "../MediaTrackGraph.h"
-#include "mozilla/dom/MediaStreamTrack.h"
-#include "mozilla/dom/AudioStreamTrack.h"
-#include "mozilla/dom/VideoStreamTrack.h"
-#include "nsContentUtils.h"
-
-namespace mozilla {
-
-#define LOG(level, msg, ...) \
-  MOZ_LOG(gMediaDecoderLog, level, (msg, ##__VA_ARGS__))
-
-class DecodedStreamTrackSource : public dom::MediaStreamTrackSource {
- public:
-  NS_DECL_ISUPPORTS_INHERITED
-  NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(DecodedStreamTrackSource,
-                                           dom::MediaStreamTrackSource)
-
-  explicit DecodedStreamTrackSource(SourceMediaTrack* aSourceStream,
-                                    nsIPrincipal* aPrincipal)
-      : dom::MediaStreamTrackSource(aPrincipal, nsString()),
-        mTrack(aSourceStream->Graph()->CreateForwardedInputTrack(
-            aSourceStream->mType)),
-        mPort(mTrack->AllocateInputPort(aSourceStream)) {
-    MOZ_ASSERT(NS_IsMainThread());
-  }
-
-  dom::MediaSourceEnum GetMediaSource() const override {
-    return dom::MediaSourceEnum::Other;
-  }
-
-  void Stop() override {
-    MOZ_ASSERT(NS_IsMainThread());
-
-    // We don't notify the source that a track was stopped since it will keep
-    // producing tracks until the element ends. The decoder also needs the
-    // tracks it created to be live at the source since the decoder's clock is
-    // based on MediaStreams during capture. We do however, disconnect this
-    // track's underlying track.
-    if (!mTrack->IsDestroyed()) {
-      mTrack->Destroy();
-      mPort->Destroy();
-    }
-  }
-
-  void Disable() override {}
-
-  void Enable() override {}
-
-  void SetPrincipal(nsIPrincipal* aPrincipal) {
-    MOZ_ASSERT(NS_IsMainThread());
-    mPrincipal = aPrincipal;
-    PrincipalChanged();
-  }
-
-  void ForceEnded() { OverrideEnded(); }
-
-  const RefPtr<ProcessedMediaTrack> mTrack;
-  const RefPtr<MediaInputPort> mPort;
-
- protected:
-  virtual ~DecodedStreamTrackSource() {
-    MOZ_ASSERT(NS_IsMainThread());
-    MOZ_ASSERT(mTrack->IsDestroyed());
-  }
-};
-
-NS_IMPL_ADDREF_INHERITED(DecodedStreamTrackSource, dom::MediaStreamTrackSource)
-NS_IMPL_RELEASE_INHERITED(DecodedStreamTrackSource, dom::MediaStreamTrackSource)
-NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(DecodedStreamTrackSource)
-NS_INTERFACE_MAP_END_INHERITING(dom::MediaStreamTrackSource)
-NS_IMPL_CYCLE_COLLECTION_INHERITED(DecodedStreamTrackSource,
-                                   dom::MediaStreamTrackSource)
-
-OutputStreamData::OutputStreamData(OutputStreamManager* aManager,
-                                   AbstractThread* aAbstractMainThread,
-                                   DOMMediaStream* aDOMStream)
-    : mManager(aManager),
-      mAbstractMainThread(aAbstractMainThread),
-      mDOMStream(aDOMStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-}
-
-OutputStreamData::~OutputStreamData() = default;
-
-void OutputStreamData::AddTrack(SourceMediaTrack* aTrack,
-                                MediaSegment::Type aType,
-                                nsIPrincipal* aPrincipal, bool aAsyncAddTrack) {
-  MOZ_ASSERT(NS_IsMainThread());
-  MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
-
-  LOG(LogLevel::Debug,
-      "Adding output %s track sourced from track %p to MediaStream %p%s",
-      aType == MediaSegment::AUDIO ? "audio" : "video", aTrack,
-      mDOMStream.get(), aAsyncAddTrack ? " (async)" : "");
-
-  auto source = MakeRefPtr<DecodedStreamTrackSource>(aTrack, aPrincipal);
-  RefPtr<dom::MediaStreamTrack> track;
-  if (aType == MediaSegment::AUDIO) {
-    track = new dom::AudioStreamTrack(mDOMStream->GetParentObject(),
-                                      source->mTrack, source);
-  } else {
-    MOZ_ASSERT(aType == MediaSegment::VIDEO);
-    track = new dom::VideoStreamTrack(mDOMStream->GetParentObject(),
-                                      source->mTrack, source);
-  }
-  mTracks.AppendElement(track.get());
-  if (aAsyncAddTrack) {
-    GetMainThreadEventTarget()->Dispatch(
-        NewRunnableMethod<RefPtr<dom::MediaStreamTrack>>(
-            "DOMMediaStream::AddTrackInternal", mDOMStream.get(),
-            &DOMMediaStream::AddTrackInternal, track));
-  } else {
-    mDOMStream->AddTrackInternal(track);
-  }
-}
-
-void OutputStreamData::RemoveTrack(SourceMediaTrack* aTrack) {
-  MOZ_ASSERT(NS_IsMainThread());
-  MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
-
-  LOG(LogLevel::Debug,
-      "Removing output track sourced by track %p from MediaStream %p", aTrack,
-      mDOMStream.get());
-
-  for (const auto& t : nsTArray<WeakPtr<dom::MediaStreamTrack>>(mTracks)) {
-    mTracks.RemoveElement(t);
-    if (!t || t->Ended()) {
-      continue;
-    }
-    DecodedStreamTrackSource& source =
-        static_cast<DecodedStreamTrackSource&>(t->GetSource());
-    GetMainThreadEventTarget()->Dispatch(
-        NewRunnableMethod("DecodedStreamTrackSource::ForceEnded", &source,
-                          &DecodedStreamTrackSource::ForceEnded));
-  }
-}
-
-void OutputStreamData::SetPrincipal(nsIPrincipal* aPrincipal) {
-  MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
-  for (const WeakPtr<dom::MediaStreamTrack>& track : mTracks) {
-    if (!track || track->Ended()) {
-      continue;
-    }
-    DecodedStreamTrackSource& source =
-        static_cast<DecodedStreamTrackSource&>(track->GetSource());
-    source.SetPrincipal(aPrincipal);
-  }
-}
-
-OutputStreamManager::OutputStreamManager(SharedDummyTrack* aDummyStream,
-                                         nsIPrincipal* aPrincipal,
-                                         AbstractThread* aAbstractMainThread)
-    : mAbstractMainThread(aAbstractMainThread),
-      mDummyStream(aDummyStream),
-      mPrincipalHandle(
-          aAbstractMainThread,
-          aPrincipal ? MakePrincipalHandle(aPrincipal) : PRINCIPAL_HANDLE_NONE,
-          "OutputStreamManager::mPrincipalHandle (Canonical)") {
-  MOZ_ASSERT(NS_IsMainThread());
-}
-
-void OutputStreamManager::Add(DOMMediaStream* aDOMStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  LOG(LogLevel::Info, "Adding MediaStream %p", aDOMStream);
-
-  OutputStreamData* p = mStreams
-                            .AppendElement(new OutputStreamData(
-                                this, mAbstractMainThread, aDOMStream))
-                            ->get();
-  for (const auto& lt : mLiveTracks) {
-    p->AddTrack(lt->mSourceTrack, lt->mType, mPrincipalHandle.Ref(), false);
-  }
-}
-
-void OutputStreamManager::Remove(DOMMediaStream* aDOMStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  LOG(LogLevel::Info, "Removing MediaStream %p", aDOMStream);
-
-  AutoRemoveDestroyedStreams();
-  mStreams.ApplyIf(
-      aDOMStream, 0, StreamComparator(),
-      [&](const UniquePtr<OutputStreamData>& aData) {
-        for (const auto& lt : mLiveTracks) {
-          aData->RemoveTrack(lt->mSourceTrack);
-        }
-      },
-      []() { MOZ_ASSERT_UNREACHABLE("Didn't exist"); });
-  DebugOnly<bool> rv = mStreams.RemoveElement(aDOMStream, StreamComparator());
-  MOZ_ASSERT(rv);
-}
-
-bool OutputStreamManager::HasTrackType(MediaSegment::Type aType) {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  return mLiveTracks.Contains(aType, TrackTypeComparator());
-}
-
-bool OutputStreamManager::HasTracks(SourceMediaTrack* aAudioStream,
-                                    SourceMediaTrack* aVideoStream) {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  size_t nrExpectedTracks = 0;
-  bool asExpected = true;
-  if (aAudioStream) {
-    Unused << ++nrExpectedTracks;
-    asExpected = asExpected && mLiveTracks.Contains(
-                                   MakePair(aAudioStream, MediaSegment::AUDIO),
-                                   TrackComparator());
-  }
-  if (aVideoStream) {
-    Unused << ++nrExpectedTracks;
-    asExpected = asExpected && mLiveTracks.Contains(
-                                   MakePair(aVideoStream, MediaSegment::VIDEO),
-                                   TrackComparator());
-  }
-  asExpected = asExpected && mLiveTracks.Length() == nrExpectedTracks;
-  return asExpected;
-}
-
-SourceMediaTrack* OutputStreamManager::GetPrecreatedTrackOfType(
-    MediaSegment::Type aType) const {
-  auto i = mLiveTracks.IndexOf(aType, 0, PrecreatedTrackTypeComparator());
-  return i == nsTArray<UniquePtr<LiveTrack>>::NoIndex
-             ? nullptr
-             : mLiveTracks[i]->mSourceTrack.get();
-}
-
-size_t OutputStreamManager::NumberOfTracks() {
-  MOZ_ASSERT(NS_IsMainThread());
-  return mLiveTracks.Length();
-}
-
-already_AddRefed<SourceMediaTrack> OutputStreamManager::AddTrack(
-    MediaSegment::Type aType) {
-  MOZ_ASSERT(NS_IsMainThread());
-  MOZ_ASSERT(!HasTrackType(aType),
-             "Cannot have two tracks of the same type at the same time");
-
-  RefPtr<SourceMediaTrack> track =
-      mDummyStream->mTrack->Graph()->CreateSourceTrack(aType);
-  if (!mPlaying) {
-    track->Suspend();
-  }
-
-  LOG(LogLevel::Info, "Adding %s track sourced by track %p",
-      aType == MediaSegment::AUDIO ? "audio" : "video", track.get());
-
-  mLiveTracks.AppendElement(MakeUnique<LiveTrack>(track, aType));
-  AutoRemoveDestroyedStreams();
-  for (const auto& data : mStreams) {
-    data->AddTrack(track, aType, mPrincipalHandle.Ref(), true);
-  }
-
-  return track.forget();
-}
-
-OutputStreamManager::LiveTrack::LiveTrack(SourceMediaTrack* aSourceTrack,
-                                          MediaSegment::Type aType)
-    : mSourceTrack(aSourceTrack), mType(aType) {}
-
-OutputStreamManager::LiveTrack::~LiveTrack() { mSourceTrack->Destroy(); }
-
-void OutputStreamManager::AutoRemoveDestroyedStreams() {
-  MOZ_ASSERT(NS_IsMainThread());
-  for (size_t i = mStreams.Length(); i > 0; --i) {
-    const auto& data = mStreams[i - 1];
-    if (!data->mDOMStream) {
-      // If the mDOMStream WeakPtr is now null, mDOMStream has been destructed.
-      mStreams.RemoveElementAt(i - 1);
-    }
-  }
-}
-
-void OutputStreamManager::RemoveTrack(SourceMediaTrack* aTrack) {
-  MOZ_ASSERT(NS_IsMainThread());
-  LOG(LogLevel::Info, "Removing track with source track %p", aTrack);
-  DebugOnly<bool> rv =
-      mLiveTracks.RemoveElement(aTrack, TrackStreamComparator());
-  MOZ_ASSERT(rv);
-  AutoRemoveDestroyedStreams();
-  for (const auto& data : mStreams) {
-    data->RemoveTrack(aTrack);
-  }
-}
-
-void OutputStreamManager::RemoveTracks() {
-  MOZ_ASSERT(NS_IsMainThread());
-  for (size_t i = mLiveTracks.Length(); i > 0; --i) {
-    RemoveTrack(mLiveTracks[i - 1]->mSourceTrack);
-  }
-}
-
-void OutputStreamManager::Disconnect() {
-  MOZ_ASSERT(NS_IsMainThread());
-  RemoveTracks();
-  MOZ_ASSERT(mLiveTracks.IsEmpty());
-  AutoRemoveDestroyedStreams();
-  nsTArray<RefPtr<DOMMediaStream>> domStreams(mStreams.Length());
-  for (const auto& data : mStreams) {
-    domStreams.AppendElement(data->mDOMStream);
-  }
-  for (auto& domStream : domStreams) {
-    Remove(domStream);
-  }
-  MOZ_ASSERT(mStreams.IsEmpty());
-}
-
-AbstractCanonical<PrincipalHandle>*
-OutputStreamManager::CanonicalPrincipalHandle() {
-  return &mPrincipalHandle;
-}
-
-void OutputStreamManager::SetPrincipal(nsIPrincipal* aPrincipal) {
-  MOZ_ASSERT(NS_IsMainThread());
-  nsCOMPtr<nsIPrincipal> principal = GetPrincipalFromHandle(mPrincipalHandle);
-  if (nsContentUtils::CombineResourcePrincipals(&principal, aPrincipal)) {
-    AutoRemoveDestroyedStreams();
-    for (const UniquePtr<OutputStreamData>& data : mStreams) {
-      data->SetPrincipal(principal);
-    }
-    mPrincipalHandle = MakePrincipalHandle(principal);
-  }
-}
-
-void OutputStreamManager::SetPlaying(bool aPlaying) {
-  MOZ_ASSERT(NS_IsMainThread());
-  if (mPlaying == aPlaying) {
-    return;
-  }
-
-  mPlaying = aPlaying;
-  for (auto& lt : mLiveTracks) {
-    if (mPlaying) {
-      lt->mSourceTrack->Resume();
-      lt->mEverPlayed = true;
-    } else {
-      lt->mSourceTrack->Suspend();
-    }
-  }
-}
-
-OutputStreamManager::~OutputStreamManager() = default;
-
-#undef LOG
-
-}  // namespace mozilla
deleted file mode 100644
--- a/dom/media/mediasink/OutputStreamManager.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim: set ts=8 sts=2 et sw=2 tw=80: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef OutputStreamManager_h
-#define OutputStreamManager_h
-
-#include "mozilla/RefPtr.h"
-#include "mozilla/StateMirroring.h"
-#include "mozilla/WeakPtr.h"
-#include "nsTArray.h"
-
-namespace mozilla {
-
-class DOMMediaStream;
-class MediaInputPort;
-class OutputStreamManager;
-class ProcessedMediaTrack;
-class SourceMediaTrack;
-
-namespace dom {
-class MediaStreamTrack;
-}
-
-class OutputStreamData {
- public:
-  OutputStreamData(OutputStreamManager* aManager,
-                   AbstractThread* aAbstractMainThread,
-                   DOMMediaStream* aDOMStream);
-  OutputStreamData(const OutputStreamData& aOther) = delete;
-  OutputStreamData(OutputStreamData&& aOther) = delete;
-  ~OutputStreamData();
-
-  // Creates and adds a MediaStreamTrack to mDOMStream so that we can feed data
-  // to it. For a true aAsyncAddTrack we will dispatch a task to add the
-  // created track to mDOMStream, as is required by spec for the "addtrack"
-  // event.
-  void AddTrack(SourceMediaTrack* aTrack, MediaSegment::Type aType,
-                nsIPrincipal* aPrincipal, bool aAsyncAddTrack);
-  // Ends any MediaStreamTracks sourced from aTrack.
-  void RemoveTrack(SourceMediaTrack* aTrack);
-
-  void SetPrincipal(nsIPrincipal* aPrincipal);
-
-  const RefPtr<OutputStreamManager> mManager;
-  const RefPtr<AbstractThread> mAbstractMainThread;
-  // The DOMMediaStream we add tracks to and represent.
-  const WeakPtr<DOMMediaStream> mDOMStream;
-
- private:
-  // Tracks that have been added and not yet removed.
-  nsTArray<WeakPtr<dom::MediaStreamTrack>> mTracks;
-};
-
-class OutputStreamManager {
-  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(OutputStreamManager);
-
- public:
-  OutputStreamManager(SharedDummyTrack* aDummyStream, nsIPrincipal* aPrincipal,
-                      AbstractThread* aAbstractMainThread);
-  // Add the output stream to the collection.
-  void Add(DOMMediaStream* aDOMStream);
-  // Remove the output stream from the collection.
-  void Remove(DOMMediaStream* aDOMStream);
-  // Returns true if there's a live track of the given type.
-  bool HasTrackType(MediaSegment::Type aType);
-  // Returns true if the given tracks are sourcing all currently live tracks.
-  // Use nullptr to make it ignored for that type.
-  bool HasTracks(SourceMediaTrack* aAudioStream,
-                 SourceMediaTrack* aVideoStream);
-  // Gets the underlying track for the given type if it has never been played,
-  // or nullptr if there is none.
-  SourceMediaTrack* GetPrecreatedTrackOfType(MediaSegment::Type aType) const;
-  // Returns the number of live tracks.
-  size_t NumberOfTracks();
-  // Add a track sourced to all output tracks and return the MediaTrack that
-  // sources it.
-  already_AddRefed<SourceMediaTrack> AddTrack(MediaSegment::Type aType);
-  // Remove all currently live tracks.
-  void RemoveTracks();
-  // Remove all currently live tracks and all output streams.
-  void Disconnect();
-  // The principal handle for the underlying decoder.
-  AbstractCanonical<PrincipalHandle>* CanonicalPrincipalHandle();
-  // Called when the underlying decoder's principal has changed.
-  void SetPrincipal(nsIPrincipal* aPrincipal);
-  // Called by DecodedStream when its playing state changes. While not playing
-  // we suspend mSourceTrack.
-  void SetPlaying(bool aPlaying);
-  // Return true if the collection of output streams is empty.
-  bool IsEmpty() const {
-    MOZ_ASSERT(NS_IsMainThread());
-    return mStreams.IsEmpty();
-  }
-
-  const RefPtr<AbstractThread> mAbstractMainThread;
-
- private:
-  ~OutputStreamManager();
-
-  class LiveTrack {
-   public:
-    LiveTrack(SourceMediaTrack* aSourceTrack, MediaSegment::Type aType);
-    ~LiveTrack();
-
-    const RefPtr<SourceMediaTrack> mSourceTrack;
-    const MediaSegment::Type mType;
-    bool mEverPlayed = false;
-  };
-
-  struct StreamComparator {
-    static bool Equals(const UniquePtr<OutputStreamData>& aData,
-                       DOMMediaStream* aStream) {
-      return aData->mDOMStream == aStream;
-    }
-  };
-  struct TrackStreamComparator {
-    static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
-                       SourceMediaTrack* aTrack) {
-      return aLiveTrack->mSourceTrack == aTrack;
-    }
-  };
-  struct TrackTypeComparator {
-    static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
-                       MediaSegment::Type aType) {
-      return aLiveTrack->mType == aType;
-    }
-  };
-  struct PrecreatedTrackTypeComparator {
-    static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
-                       MediaSegment::Type aType) {
-      return !aLiveTrack->mEverPlayed && aLiveTrack->mType == aType;
-    }
-  };
-  struct TrackComparator {
-    static bool Equals(
-        const UniquePtr<LiveTrack>& aLiveTrack,
-        const Pair<SourceMediaTrack*, MediaSegment::Type>& aOther) {
-      return aLiveTrack->mSourceTrack == aOther.first() &&
-             aLiveTrack->mType == aOther.second();
-    }
-  };
-
-  // Goes through mStreams and removes any entries that have been destroyed.
-  void AutoRemoveDestroyedStreams();
-
-  // Remove tracks sourced from aTrack from all output tracks.
-  void RemoveTrack(SourceMediaTrack* aTrack);
-
-  const RefPtr<SharedDummyTrack> mDummyStream;
-  nsTArray<UniquePtr<OutputStreamData>> mStreams;
-  nsTArray<UniquePtr<LiveTrack>> mLiveTracks;
-  Canonical<PrincipalHandle> mPrincipalHandle;
-  bool mPlaying = false;
-};
-
-}  // namespace mozilla
-
-#endif  // OutputStreamManager_h
--- a/dom/media/mediasink/moz.build
+++ b/dom/media/mediasink/moz.build
@@ -3,17 +3,16 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 UNIFIED_SOURCES += [
     'AudioSink.cpp',
     'AudioSinkWrapper.cpp',
     'DecodedStream.cpp',
-    'OutputStreamManager.cpp',
     'VideoSink.cpp',
 ]
 
 EXPORTS += [
     'MediaSink.h'
 ]
 
 include('/ipc/chromium/chromium-config.mozbuild')